From 998f8bf7869f66aa83e4c90776388a09e2340f6a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 21 May 2025 14:37:47 -0600 Subject: [PATCH 001/158] Add conversion from mp_obj_t to cv:Size Needed for some OpenCV functions Converts to ndarray_obj_t to handle all possible input types --- src/convert.cpp | 64 +++++++++++++++++++++++++++++++++++++++++++++---- src/convert.h | 5 +++- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/src/convert.cpp b/src/convert.cpp index 54af82e..104f776 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -16,8 +16,8 @@ uint8_t mat_depth_to_ndarray_type(int depth) case CV_16U: return NDARRAY_UINT16; case CV_16S: return NDARRAY_INT16; case CV_32F: return NDARRAY_FLOAT; - // case CV_Bool: return NDARRAY_BOOL; - default: mp_raise_ValueError(MP_ERROR_TEXT("Unsupported Mat depth")); + case CV_Bool: return NDARRAY_BOOL; + default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported Mat depth")); } } @@ -29,8 +29,8 @@ int ndarray_type_to_mat_depth(uint8_t type) case NDARRAY_UINT16: return CV_16U; case NDARRAY_INT16: return CV_16S; case NDARRAY_FLOAT: return CV_32F; - // case NDARRAY_BOOL: return CV_Bool; - default: mp_raise_ValueError(MP_ERROR_TEXT("Unsupported ndarray type")); + case NDARRAY_BOOL: return CV_Bool; + default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); } } @@ -147,3 +147,59 @@ Mat mp_obj_to_mat(mp_obj_t obj) return mat; } + +Size mp_obj_to_size(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) + { + // Create an empty Size object + return Size(); + } + + // Assume the object is a ndarray, or can be converted to one. Will raise an + // exception if not + ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0); + + // Validate the length of the ndarray + if(ndarray->len != 2) + { + mp_raise_TypeError(MP_ERROR_TEXT("Size must be length 2")); + } + + // Check the type of the ndarray + if(ndarray->dtype == NDARRAY_UINT8) + { + uint8_t *data = (uint8_t *)ndarray->array; + return Size(data[0], data[1]); + } + else if(ndarray->dtype == NDARRAY_INT8) + { + int8_t *data = (int8_t *)ndarray->array; + return Size(data[0], data[1]); + } + else if(ndarray->dtype == NDARRAY_UINT16) + { + uint16_t *data = (uint16_t *)ndarray->array; + return Size(data[0], data[1]); + } + else if(ndarray->dtype == NDARRAY_INT16) + { + int16_t *data = (int16_t *)ndarray->array; + return Size(data[0], data[1]); + } + else if(ndarray->dtype == NDARRAY_FLOAT) + { + float *data = (float *)ndarray->array; + return Size(data[0], data[1]); + } + else if(ndarray->dtype == NDARRAY_BOOL) + { + bool *data = (bool *)ndarray->array; + return Size(data[0], data[1]); + } + else + { + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + } +} diff --git a/src/convert.h b/src/convert.h index 07512e6..262f1fa 100644 --- a/src/convert.h +++ b/src/convert.h @@ -17,7 +17,10 @@ int ndarray_type_to_mat_depth(uint8_t type); ndarray_obj_t *mat_to_ndarray(Mat &mat); Mat ndarray_to_mat(ndarray_obj_t *ndarray); -// Conversion functions between Mat and mp_obj_t. Abstracts away intermediate +// Conversion functions between Mat and mp_obj_t. Abstracts away intermediate // conversions to ndarray_obj_t mp_obj_t mat_to_mp_obj(Mat &mat); Mat mp_obj_to_mat(mp_obj_t obj); + +// Conversion functions between Size and mp_obj_t +Size mp_obj_to_size(mp_obj_t obj); From c538c3de574e26b19912eb213fb2f50ef4354506 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 21 May 2025 14:44:54 -0600 Subject: [PATCH 002/158] Add morphology functions --- src/imgproc.cpp | 119 +++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 4 ++ src/opencv_upy.c | 73 ++++++++++++++++++++--------- 3 files changed, 173 insertions(+), 23 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 47d8f27..85c26d0 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -40,3 +40,122 @@ mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t // Return the result return mat_to_mp_obj(dst); } + +mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_kernel, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + dilate(src, dst, kernel); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_kernel, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + erode(src, dst, kernel); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_shape, ARG_ksize }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_shape, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + int shape = args[ARG_shape].u_int; + Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj); + + // Instantiate result + Mat kernel; + + // Call the corresponding OpenCV function + try { + kernel = getStructuringElement(shape, ksize); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(kernel); +} + +mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_op, ARG_kernel, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_op, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int op = args[ARG_op].u_int; + Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + morphologyEx(src, dst, op, kernel); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} diff --git a/src/imgproc.h b/src/imgproc.h index 33b06b0..9d03e47 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -2,3 +2,7 @@ #include "py/runtime.h" extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 63e24ad..2169a77 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,18 +1,25 @@ #include "core.h" #include "imgproc.h" -// Define a Python reference to the function we'll make available. -// See example.cpp for the definition. +//////////////////////////////////////////////////////////////////////////////// +// Python references to OpenCV functions +//////////////////////////////////////////////////////////////////////////////// + +// OpenCV core module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_max_obj, 2, cv2_core_max); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_min_obj, 2, cv2_core_min); + +// OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); -// Define all attributes of the module. -// Table entries are key/value pairs of the attribute name (a string) -// and the MicroPython object reference. -// All identifiers and strings are written as MP_QSTR_xxx and will be -// optimized to word-sized integers by the build system (interned strings). +//////////////////////////////////////////////////////////////////////////////// +// Module attributes +//////////////////////////////////////////////////////////////////////////////// static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// // Module name @@ -24,22 +31,38 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // Constants //////////////////////////////////////////////////////////////////////////// - // Color conversion codes. These are defined in , - // however we can't include that header here because it's C++ and this is C, - // so we have to redefine them here. Only a subset of the most common - // conversions are included here. - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2BGR), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2RGB), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2RGBA), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2BGRA), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2BGR), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2RGB), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGR2RGB), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGB2BGR), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_BGRA2RGBA), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_COLOR_RGBA2BGRA), MP_ROM_INT(5) }, + // These constants are defined by in OpenCV's header files, however we can't + // include them here because it's C++ and this is C, so we have to redefine + // them here. Only a subset of the most common conversions are included. + + // Morphology operation types, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_OPEN), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_CLOSE), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_GRADIENT), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_TOPHAT), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_BLACKHAT), MP_ROM_INT(6) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_HITMISS), MP_ROM_INT(7) }, + + // Morphology shapes, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_MORPH_RECT), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, + + // Color conversion codes, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2RGB), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGBA), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGRA), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGB), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGB), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGBA), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGRA), MP_ROM_INT(5) }, { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2GRAY), MP_ROM_INT(6) }, { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2GRAY), MP_ROM_INT(7) }, { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR), MP_ROM_INT(8) }, @@ -92,6 +115,10 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, + { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, + { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, + { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, + { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From 8560bcff5f9fb839d1a043b81c0d1d2ab91517f9 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 21 May 2025 17:20:47 -0600 Subject: [PATCH 003/158] Add conversion from mp_obj_t to cv::Point and cv::Scalar Some OpenCV functions need these Also improve conversion to cv:Size --- src/convert.cpp | 151 +++++++++++++++++++++++++++++++++++++++++------- src/convert.h | 6 ++ 2 files changed, 136 insertions(+), 21 deletions(-) diff --git a/src/convert.cpp b/src/convert.cpp index 104f776..73db158 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -167,39 +167,148 @@ Size mp_obj_to_size(mp_obj_t obj) mp_raise_TypeError(MP_ERROR_TEXT("Size must be length 2")); } - // Check the type of the ndarray - if(ndarray->dtype == NDARRAY_UINT8) + // Compute the size, checking the type of the ndarray + Size size; + switch(ndarray->dtype) { - uint8_t *data = (uint8_t *)ndarray->array; - return Size(data[0], data[1]); + case NDARRAY_UINT8: + size.width = ((uint8_t*) ndarray->array)[0]; + size.height = ((uint8_t*) ndarray->array)[1]; + break; + case NDARRAY_INT8: + size.width = ((int8_t*) ndarray->array)[0]; + size.height = ((int8_t*) ndarray->array)[1]; + break; + case NDARRAY_UINT16: + size.width = ((uint16_t*) ndarray->array)[0]; + size.height = ((uint16_t*) ndarray->array)[1]; + break; + case NDARRAY_INT16: + size.width = ((int16_t*) ndarray->array)[0]; + size.height = ((int16_t*) ndarray->array)[1]; + break; + case NDARRAY_FLOAT: + size.width = ((float*) ndarray->array)[0]; + size.height = ((float*) ndarray->array)[1]; + break; + case NDARRAY_BOOL: + size.width = ((bool*) ndarray->array)[0]; + size.height = ((bool*) ndarray->array)[1]; + break; + default: + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + break; } - else if(ndarray->dtype == NDARRAY_INT8) + + return size; +} + +Point mp_obj_to_point(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) { - int8_t *data = (int8_t *)ndarray->array; - return Size(data[0], data[1]); + // Create an empty Point object + return Point(); } - else if(ndarray->dtype == NDARRAY_UINT16) + + // Assume the object is a ndarray, or can be converted to one. Will raise an + // exception if not + ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0); + + // Validate the length of the ndarray + if(ndarray->len != 2) { - uint16_t *data = (uint16_t *)ndarray->array; - return Size(data[0], data[1]); + mp_raise_TypeError(MP_ERROR_TEXT("Point must be length 2")); } - else if(ndarray->dtype == NDARRAY_INT16) + + // Compute the point, checking the type of the ndarray + Point point; + switch(ndarray->dtype) { - int16_t *data = (int16_t *)ndarray->array; - return Size(data[0], data[1]); + case NDARRAY_UINT8: + point.x = ((uint8_t*) ndarray->array)[0]; + point.y = ((uint8_t*) ndarray->array)[1]; + break; + case NDARRAY_INT8: + point.x = ((int8_t*) ndarray->array)[0]; + point.y = ((int8_t*) ndarray->array)[1]; + break; + case NDARRAY_UINT16: + point.x = ((uint16_t*) ndarray->array)[0]; + point.y = ((uint16_t*) ndarray->array)[1]; + break; + case NDARRAY_INT16: + point.x = ((int16_t*) ndarray->array)[0]; + point.y = ((int16_t*) ndarray->array)[1]; + break; + case NDARRAY_FLOAT: + point.x = ((float*) ndarray->array)[0]; + point.y = ((float*) ndarray->array)[1]; + break; + case NDARRAY_BOOL: + point.x = ((bool*) ndarray->array)[0]; + point.y = ((bool*) ndarray->array)[1]; + break; + default: + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + break; } - else if(ndarray->dtype == NDARRAY_FLOAT) + + return point; +} + +Scalar mp_obj_to_scalar(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) { - float *data = (float *)ndarray->array; - return Size(data[0], data[1]); + // Create an empty Scalar object + return Scalar(); } - else if(ndarray->dtype == NDARRAY_BOOL) + + // Assume the object is a ndarray, or can be converted to one. Will raise an + // exception if not + ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0); + + // Validate the length of the ndarray + if(ndarray->len > 4) { - bool *data = (bool *)ndarray->array; - return Size(data[0], data[1]); + mp_raise_TypeError(MP_ERROR_TEXT("Scalar must be length 4 or less")); } - else + + // Compute the scalar, checking the type of the ndarray + Scalar scalar; + switch(ndarray->dtype) { - mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + case NDARRAY_UINT8: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((uint8_t*) ndarray->array)[i]; + break; + case NDARRAY_INT8: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((int8_t*) ndarray->array)[i]; + break; + case NDARRAY_UINT16: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((uint16_t*) ndarray->array)[i]; + break; + case NDARRAY_INT16: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((int16_t*) ndarray->array)[i]; + break; + case NDARRAY_FLOAT: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((float*) ndarray->array)[i]; + break; + case NDARRAY_BOOL: + for(size_t i = 0; i < ndarray->len; i++) + scalar[i] = ((bool*) ndarray->array)[i]; + break; + default: + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + break; } + + return scalar; } diff --git a/src/convert.h b/src/convert.h index 262f1fa..4a81c58 100644 --- a/src/convert.h +++ b/src/convert.h @@ -24,3 +24,9 @@ Mat mp_obj_to_mat(mp_obj_t obj); // Conversion functions between Size and mp_obj_t Size mp_obj_to_size(mp_obj_t obj); + +// Conversion functions between Point and mp_obj_t +Point mp_obj_to_point(mp_obj_t obj); + +// Conversion functions between Scalar and mp_obj_t +Scalar mp_obj_to_scalar(mp_obj_t obj); From 164ef13c5fc68d4299ba0ef98f696eb2029941bd Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 21 May 2025 17:25:52 -0600 Subject: [PATCH 004/158] Ensure morphology functions have all optional arguments --- src/imgproc.cpp | 70 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 85c26d0..101be74 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -43,11 +43,15 @@ mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_src, ARG_kernel, ARG_dst }; + enum { ARG_src, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; static const mp_arg_t allowed_args[] = { { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } }, + { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -58,10 +62,22 @@ mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k Mat src = mp_obj_to_mat(args[ARG_src].u_obj); Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1, -1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); + int iterations = args[ARG_iterations].u_int; + int borderType = args[ARG_borderType].u_int; + Scalar borderValue; + if(args[ARG_borderValue].u_obj == mp_const_none) + borderValue = morphologyDefaultBorderValue(); // Default value + else + borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj); // Call the corresponding OpenCV function try { - dilate(src, dst, kernel); + dilate(src, dst, kernel, anchor, iterations, borderType, borderValue); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } @@ -72,11 +88,15 @@ mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_src, ARG_kernel, ARG_dst }; + enum { ARG_src, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; static const mp_arg_t allowed_args[] = { { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } }, + { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -87,10 +107,22 @@ mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw Mat src = mp_obj_to_mat(args[ARG_src].u_obj); Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1, -1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); + int iterations = args[ARG_iterations].u_int; + int borderType = args[ARG_borderType].u_int; + Scalar borderValue; + if(args[ARG_borderValue].u_obj == mp_const_none) + borderValue = morphologyDefaultBorderValue(); // Default value + else + borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj); // Call the corresponding OpenCV function try { - erode(src, dst, kernel); + erode(src, dst, kernel, anchor, iterations, borderType, borderValue); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } @@ -101,10 +133,11 @@ mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_shape, ARG_ksize }; + enum { ARG_shape, ARG_ksize, ARG_anchor }; static const mp_arg_t allowed_args[] = { { MP_QSTR_shape, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -114,13 +147,18 @@ mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_ar // Convert arguments to required types int shape = args[ARG_shape].u_int; Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1, -1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); // Instantiate result Mat kernel; // Call the corresponding OpenCV function try { - kernel = getStructuringElement(shape, ksize); + kernel = getStructuringElement(shape, ksize, anchor); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } @@ -131,12 +169,16 @@ mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_ar mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_src, ARG_op, ARG_kernel, ARG_dst }; + enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; static const mp_arg_t allowed_args[] = { { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_op, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } }, + { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -148,10 +190,22 @@ mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_ma int op = args[ARG_op].u_int; Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1, -1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); + int iterations = args[ARG_iterations].u_int; + int borderType = args[ARG_borderType].u_int; + Scalar borderValue; + if(args[ARG_borderValue].u_obj == mp_const_none) + borderValue = morphologyDefaultBorderValue(); // Default value + else + borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj); // Call the corresponding OpenCV function try { - morphologyEx(src, dst, op, kernel); + morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } From 2f9b0f7c58d1fedb5e4480b644bc09f524410dc7 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 21 May 2025 17:26:40 -0600 Subject: [PATCH 005/158] Add simple drawing functions For some reason, putText() doubles the flash requirement, from roughly 2.4MB to 4.8MB. The RP2350 linker script hard codes the flash size at 4MB, so have to modify that to get it to link properly. Even after that, putText() had a weird output. Commented out for now. --- src/imgproc.cpp | 359 +++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 9 ++ src/opencv_upy.c | 55 ++++++++ 3 files changed, 423 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 101be74..7ae8334 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -12,6 +12,86 @@ extern "C" { using namespace cv; +mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_tipLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj); + Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int line_type = args[ARG_line_type].u_int; + int shift = args[ARG_shift].u_int; + mp_float_t tipLength; + if(args[ARG_tipLength].u_obj == mp_const_none) + tipLength = 0.1; // Default value + else + tipLength = mp_obj_get_float(args[ARG_tipLength].u_obj); + + // Call the corresponding OpenCV function + try { + arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + +mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_radius, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point center = mp_obj_to_point(args[ARG_center].u_obj); + int radius = args[ARG_radius].u_int; + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // Call the corresponding OpenCV function + try { + circle(img, center, radius, color, thickness, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_code, ARG_dst }; @@ -86,6 +166,86 @@ mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_position, ARG_color, ARG_markerType, ARG_markerSize, ARG_thickness, ARG_line_type }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_position, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_markerType, MP_ARG_INT, { .u_int = MARKER_CROSS } }, + { MP_QSTR_markerSize, MP_ARG_INT, { .u_int = 20 } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point position = mp_obj_to_point(args[ARG_position].u_obj); + int markerType = args[ARG_markerType].u_int; + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int markerSize = args[ARG_markerSize].u_int; + int thickness = args[ARG_thickness].u_int; + int line_type = args[ARG_line_type].u_int; + + // Call the corresponding OpenCV function + try { + drawMarker(img, position, color, markerType, markerSize, thickness, line_type); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + +mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_center, ARG_axes, ARG_angle, ARG_startAngle, ARG_endAngle, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_axes, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_angle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_startAngle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_endAngle, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point center = mp_obj_to_point(args[ARG_center].u_obj); + Size axes = mp_obj_to_size(args[ARG_axes].u_obj); + int angle = args[ARG_angle].u_int; + int startAngle = args[ARG_startAngle].u_int; + int endAngle = args[ARG_endAngle].u_int; + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // Call the corresponding OpenCV function + try { + ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; @@ -131,6 +291,88 @@ mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_points, ARG_color, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // points must be of type CV_32S + Mat points_32S; + points.allocator = &GetNumpyAllocator(); + points.convertTo(points_32S, CV_32S); + + // Call the corresponding OpenCV function + try { + fillConvexPoly(img, points_32S, color, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + +mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_pts, ARG_color, ARG_lineType, ARG_shift, ARG_offset }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pts, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Mat pts = mp_obj_to_mat(args[ARG_pts].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + Point offset; + if(args[ARG_offset].u_obj == mp_const_none) + offset = Point(); // Default value + else + offset = mp_obj_to_point(args[ARG_offset].u_obj); + + // points must be of type CV_32S + Mat pts_32S; + pts.allocator = &GetNumpyAllocator(); + pts.convertTo(pts_32S, CV_32S); + + // Call the corresponding OpenCV function + try { + fillPoly(img, pts_32S, color, lineType, shift, offset); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_shape, ARG_ksize, ARG_anchor }; @@ -167,6 +409,43 @@ mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_ar return mat_to_mp_obj(kernel); } +mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj); + Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // Call the corresponding OpenCV function + try { + line(img, pt1, pt2, color, thickness, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; @@ -213,3 +492,83 @@ mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_ma // Return the result return mat_to_mp_obj(dst); } + +// mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +// // Define the arguments +// enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin }; +// static const mp_arg_t allowed_args[] = { +// { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_text, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_org, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_fontFace, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = FONT_HERSHEY_SIMPLEX } }, +// { MP_QSTR_fontScale, MP_ARG_REQUIRED, { .u_obj = mp_const_none } }, +// { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, +// { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, +// { MP_QSTR_bottomLeftOrigin, MP_ARG_BOOL, { .u_bool = 0 } }, +// }; + +// // Parse the arguments +// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; +// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + +// // Convert arguments to required types +// Mat img = mp_obj_to_mat(args[ARG_img].u_obj); +// size_t len; +// const char *text_str = mp_obj_str_get_data(args[ARG_text].u_obj, &len); +// String text(text_str, len); +// Point org = mp_obj_to_point(args[ARG_org].u_obj); +// int fontFace = args[ARG_fontFace].u_int; +// mp_float_t fontScale = mp_obj_get_float(args[ARG_fontScale].u_obj); +// Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); +// int thickness = args[ARG_thickness].u_int; +// int lineType = args[ARG_lineType].u_int; +// bool bottomLeftOrigin = args[ARG_bottomLeftOrigin].u_bool; + +// // Call the corresponding OpenCV function +// try { +// putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin); +// } catch(Exception& e) { +// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); +// } + +// // Return the result +// return mat_to_mp_obj(img); +// } + +mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj); + Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // Call the corresponding OpenCV function + try { + rectangle(img, pt1, pt2, color, thickness, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} diff --git a/src/imgproc.h b/src/imgproc.h index 9d03e47..6aa16c1 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -1,8 +1,17 @@ // C headers #include "py/runtime.h" +extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +// extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 2169a77..13bae96 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -11,11 +11,20 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_max_obj, 2, cv2_core_max); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_min_obj, 2, cv2_core_min); // OpenCV imgproc module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawMarker_obj, 3, cv2_imgproc_drawMarker); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellipse); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); +// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); //////////////////////////////////////////////////////////////////////////////// // Module attributes @@ -35,6 +44,17 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // include them here because it's C++ and this is C, so we have to redefine // them here. Only a subset of the most common conversions are included. + // Border types, from opencv2/core/base.hpp + { MP_ROM_QSTR(MP_QSTR_BORDER_CONSTANT), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_REPLICATE), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_WRAP), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT_101), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT101), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }, + // Morphology operation types, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, @@ -101,6 +121,32 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2GRAY), MP_ROM_INT(87) }, { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2GRAY), MP_ROM_INT(88) }, { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2GRAY), MP_ROM_INT(89) }, + + // Line types, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_FILLED), MP_ROM_INT(-1) }, + { MP_ROM_QSTR(MP_QSTR_LINE_4), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_LINE_8), MP_ROM_INT(8) }, + { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, + + // Fonts, from opencv2/imgproc.hpp + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, + // { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, + + // Marker types, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_MARKER_CROSS), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_TILTED_CROSS), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_STAR), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_DIAMOND), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_SQUARE), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_UP), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_DOWN), MP_ROM_INT(6) }, //////////////////////////////////////////////////////////////////////////// // OpenCV core functions @@ -114,11 +160,20 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // OpenCV imgproc functions //////////////////////////////////////////////////////////////////////////// + { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, + { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, + { MP_ROM_QSTR(MP_QSTR_drawMarker), MP_ROM_PTR(&cv2_imgproc_drawMarker_obj) }, + { MP_ROM_QSTR(MP_QSTR_ellipse), MP_ROM_PTR(&cv2_imgproc_ellipse_obj) }, { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, + { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, + { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, + { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, + // { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, + { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From 6590714e76a5fb47dd0a790f5be8b2d340f70683 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 22 May 2025 14:36:34 -0600 Subject: [PATCH 006/158] Downgrade to OpenCV 4.11 Fixes #1 --- src/convert.cpp | 2 -- src/imgproc.cpp | 84 ++++++++++++++++++++++++------------------------ src/imgproc.h | 2 +- src/opencv | 2 +- src/opencv_upy.c | 24 +++++++------- 5 files changed, 56 insertions(+), 58 deletions(-) diff --git a/src/convert.cpp b/src/convert.cpp index 73db158..6e12490 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -16,7 +16,6 @@ uint8_t mat_depth_to_ndarray_type(int depth) case CV_16U: return NDARRAY_UINT16; case CV_16S: return NDARRAY_INT16; case CV_32F: return NDARRAY_FLOAT; - case CV_Bool: return NDARRAY_BOOL; default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported Mat depth")); } } @@ -29,7 +28,6 @@ int ndarray_type_to_mat_depth(uint8_t type) case NDARRAY_UINT16: return CV_16U; case NDARRAY_INT16: return CV_16S; case NDARRAY_FLOAT: return CV_32F; - case NDARRAY_BOOL: return CV_Bool; default: mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); } } diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 7ae8334..0636b6e 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -493,48 +493,48 @@ mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_ma return mat_to_mp_obj(dst); } -// mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { -// // Define the arguments -// enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin }; -// static const mp_arg_t allowed_args[] = { -// { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_text, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_org, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_fontFace, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = FONT_HERSHEY_SIMPLEX } }, -// { MP_QSTR_fontScale, MP_ARG_REQUIRED, { .u_obj = mp_const_none } }, -// { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, -// { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, -// { MP_QSTR_bottomLeftOrigin, MP_ARG_BOOL, { .u_bool = 0 } }, -// }; - -// // Parse the arguments -// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; -// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - -// // Convert arguments to required types -// Mat img = mp_obj_to_mat(args[ARG_img].u_obj); -// size_t len; -// const char *text_str = mp_obj_str_get_data(args[ARG_text].u_obj, &len); -// String text(text_str, len); -// Point org = mp_obj_to_point(args[ARG_org].u_obj); -// int fontFace = args[ARG_fontFace].u_int; -// mp_float_t fontScale = mp_obj_get_float(args[ARG_fontScale].u_obj); -// Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); -// int thickness = args[ARG_thickness].u_int; -// int lineType = args[ARG_lineType].u_int; -// bool bottomLeftOrigin = args[ARG_bottomLeftOrigin].u_bool; - -// // Call the corresponding OpenCV function -// try { -// putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin); -// } catch(Exception& e) { -// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); -// } - -// // Return the result -// return mat_to_mp_obj(img); -// } +mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_text, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_org, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_fontFace, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = FONT_HERSHEY_SIMPLEX } }, + { MP_QSTR_fontScale, MP_ARG_REQUIRED, { .u_obj = mp_const_none } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_bottomLeftOrigin, MP_ARG_BOOL, { .u_bool = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + size_t len; + const char *text_str = mp_obj_str_get_data(args[ARG_text].u_obj, &len); + String text(text_str, len); + Point org = mp_obj_to_point(args[ARG_org].u_obj); + int fontFace = args[ARG_fontFace].u_int; + mp_float_t fontScale = mp_obj_get_float(args[ARG_fontScale].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + bool bottomLeftOrigin = args[ARG_bottomLeftOrigin].u_bool; + + // Call the corresponding OpenCV function + try { + putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments diff --git a/src/imgproc.h b/src/imgproc.h index 6aa16c1..479e765 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -13,5 +13,5 @@ extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -// extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv b/src/opencv index 6e8a224..8eb737f 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit 6e8a2245ddcf849cef74519a71f00dd9abbfcfeb +Subproject commit 8eb737f902be3e0606b865b3e7ef58dcd213609f diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 13bae96..d8234b4 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -23,7 +23,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillP static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); -// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); //////////////////////////////////////////////////////////////////////////////// @@ -50,9 +50,9 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT), MP_ROM_INT(2) }, { MP_ROM_QSTR(MP_QSTR_BORDER_WRAP), MP_ROM_INT(3) }, { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT_101), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT101), MP_ROM_INT(4) }, { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }, // Morphology operation types, from opencv2/imgproc.hpp @@ -129,15 +129,15 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, // Fonts, from opencv2/imgproc.hpp - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, - // { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, + { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, // Marker types, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_MARKER_CROSS), MP_ROM_INT(0) }, @@ -172,7 +172,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, - // { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, + { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From 4cf886dfe12b8a46afa227b28503b6a370db319c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 22 May 2025 16:18:40 -0600 Subject: [PATCH 007/158] Remove min() and max() These already exist in ulab numpy as minimum() and maximum(). Removing helps keep the firmware a bit smaller without losing any functionality. --- src/core.cpp | 58 ------------------------------------------------ src/core.h | 2 -- src/opencv_upy.c | 4 ---- 3 files changed, 64 deletions(-) diff --git a/src/core.cpp b/src/core.cpp index 18d3e7a..5ac0d82 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -41,61 +41,3 @@ mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ // Return the result return mat_to_mp_obj(dst); } - -mp_obj_t cv2_core_max(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - // Define the arguments - enum { ARG_src1, ARG_src2, ARG_dst }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_src1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_src2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - }; - - // Parse the arguments - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - // Convert arguments to required types - Mat src1 = mp_obj_to_mat(args[ARG_src1].u_obj); - Mat src2 = mp_obj_to_mat(args[ARG_src2].u_obj); - Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); - - // Call the corresponding OpenCV function - try { - max(src1, src2, dst); - } catch(Exception& e) { - mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); - } - - // Return the result - return mat_to_mp_obj(dst); -} - -mp_obj_t cv2_core_min(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - // Define the arguments - enum { ARG_src1, ARG_src2, ARG_dst }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_src1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_src2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - }; - - // Parse the arguments - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - // Convert arguments to required types - Mat src1 = mp_obj_to_mat(args[ARG_src1].u_obj); - Mat src2 = mp_obj_to_mat(args[ARG_src2].u_obj); - Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); - - // Call the corresponding OpenCV function - try { - min(src1, src2, dst); - } catch(Exception& e) { - mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); - } - - // Return the result - return mat_to_mp_obj(dst); -} diff --git a/src/core.h b/src/core.h index 66f9538..6f4b062 100644 --- a/src/core.h +++ b/src/core.h @@ -2,5 +2,3 @@ #include "py/runtime.h" extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_core_max(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_core_min(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index d8234b4..7e9faeb 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -7,8 +7,6 @@ // OpenCV core module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_max_obj, 2, cv2_core_max); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_min_obj, 2, cv2_core_min); // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); @@ -153,8 +151,6 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, - { MP_ROM_QSTR(MP_QSTR_max), MP_ROM_PTR(&cv2_core_max_obj) }, - { MP_ROM_QSTR(MP_QSTR_min), MP_ROM_PTR(&cv2_core_min_obj) }, //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions From f4f776d3d8e0e61f554ee0c1fc9bf9732bd2b185 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 28 May 2025 17:15:59 -0600 Subject: [PATCH 008/158] Add Canny and Hough functions HoughLinesP uses int32 for the output array, which ulab currenltly does not support. Commented out for now. --- src/imgproc.cpp | 322 +++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 6 + src/opencv_upy.c | 21 +++- 3 files changed, 348 insertions(+), 1 deletion(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 0636b6e..cdc941c 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -55,6 +55,41 @@ mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_threshold1, ARG_threshold2, ARG_edges, ARG_apertureSize, ARG_L2gradient }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_threshold1, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_threshold2, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_edges, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_apertureSize, MP_ARG_INT, { .u_int = 3 } }, + { MP_QSTR_L2gradient, MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + int threshold1 = args[ARG_threshold1].u_int; + int threshold2 = args[ARG_threshold2].u_int; + Mat edges = mp_obj_to_mat(args[ARG_edges].u_obj); + int apertureSize = args[ARG_apertureSize].u_int; + bool L2gradient = args[ARG_L2gradient].u_bool; + + // Call the corresponding OpenCV function + try { + Canny(image, edges, threshold1, threshold2, apertureSize, L2gradient); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(edges); +} + mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; @@ -409,6 +444,293 @@ mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_ar return mat_to_mp_obj(kernel); } +mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_method, ARG_dp, ARG_minDist, ARG_circles, ARG_param1, ARG_param2, ARG_minRadius, ARG_maxRadius }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dp, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_minDist, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_circles, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_param1, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_param2, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_minRadius, MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_maxRadius, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + int method = args[ARG_method].u_int; + mp_float_t dp = mp_obj_get_float(args[ARG_dp].u_obj); + mp_float_t minDist = mp_obj_get_float(args[ARG_minDist].u_obj); + Mat circles = mp_obj_to_mat(args[ARG_circles].u_obj); + float param1; + if(args[ARG_param1].u_obj == mp_const_none) + param1 = 100; // Default value + else + param1 = mp_obj_get_float(args[ARG_param1].u_obj); + float param2; + if(args[ARG_param2].u_obj == mp_const_none) + param2 = 100; // Default value + else + param2 = mp_obj_get_float(args[ARG_param2].u_obj); + int minRadius = args[ARG_minRadius].u_int; + int maxRadius = args[ARG_maxRadius].u_int; + + // Call the corresponding OpenCV function + try { + HoughCircles(image, circles, method, dp, minDist, param1, param2, minRadius, maxRadius); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(circles); +} + +mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_method, ARG_dp, ARG_minDist, ARG_circles, ARG_param1, ARG_param2, ARG_minRadius, ARG_maxRadius }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dp, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_minDist, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_circles, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_param1, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_param2, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_minRadius, MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_maxRadius, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + int method = args[ARG_method].u_int; + mp_float_t dp = mp_obj_get_float(args[ARG_dp].u_obj); + mp_float_t minDist = mp_obj_get_float(args[ARG_minDist].u_obj); + Mat circles = mp_obj_to_mat(args[ARG_circles].u_obj); + float param1; + if(args[ARG_param1].u_obj == mp_const_none) + param1 = 100; // Default value + else + param1 = mp_obj_get_float(args[ARG_param1].u_obj); + float param2; + if(args[ARG_param2].u_obj == mp_const_none) + param2 = 100; // Default value + else + param2 = mp_obj_get_float(args[ARG_param2].u_obj); + int minRadius = args[ARG_minRadius].u_int; + int maxRadius = args[ARG_maxRadius].u_int; + + // Vector to hold the circles and votes + std::vector circles_acc; + + // Call the corresponding OpenCV function + try { + HoughCircles(image, circles_acc, method, dp, minDist, param1, param2, minRadius, maxRadius); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Copy the vector of circles and votes to output circles object + Mat(circles_acc).copyTo(circles); + + // Return the result + return mat_to_mp_obj(circles); +} + +mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_srn, ARG_stn, ARG_min_theta, ARG_max_theta, ARG_use_edgeval }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } }, + { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_srn, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_stn, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_min_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_max_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_use_edgeval, MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + mp_float_t rho; + if(args[ARG_rho].u_obj == mp_const_none) + rho = 1; // Default value + else + rho = mp_obj_get_float(args[ARG_rho].u_obj); + mp_float_t theta; + if(args[ARG_theta].u_obj == mp_const_none) + theta = CV_PI / 180; // Default value + else + theta = mp_obj_get_float(args[ARG_theta].u_obj); + int threshold = args[ARG_threshold].u_int; + Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj); + mp_float_t srn; + if(args[ARG_srn].u_obj == mp_const_none) + srn = 0; // Default value + else + srn = mp_obj_get_float(args[ARG_srn].u_obj); + mp_float_t stn; + if(args[ARG_stn].u_obj == mp_const_none) + stn = 0; // Default value + else + stn = mp_obj_get_float(args[ARG_stn].u_obj); + mp_float_t min_theta; + if(args[ARG_min_theta].u_obj == mp_const_none) + min_theta = 0; // Default value + else + min_theta = mp_obj_get_float(args[ARG_min_theta].u_obj); + mp_float_t max_theta; + if(args[ARG_max_theta].u_obj == mp_const_none) + max_theta = CV_PI; // Default value + else + max_theta = mp_obj_get_float(args[ARG_max_theta].u_obj); + bool use_edgeval = args[ARG_use_edgeval].u_bool; + + // Call the corresponding OpenCV function + try { + HoughLines(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta, use_edgeval); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(lines); +} + +// mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +// // Define the arguments +// enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_minLineLength, ARG_maxLineGap }; +// static const mp_arg_t allowed_args[] = { +// { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } }, +// { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_minLineLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_maxLineGap, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// }; + +// // Parse the arguments +// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; +// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + +// // Convert arguments to required types +// Mat image = mp_obj_to_mat(args[ARG_image].u_obj); +// mp_float_t rho = mp_obj_get_float(args[ARG_rho].u_obj); +// mp_float_t theta = mp_obj_get_float(args[ARG_theta].u_obj); +// int threshold = args[ARG_threshold].u_int; +// Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj); +// mp_float_t minLineLength; +// if(args[ARG_minLineLength].u_obj == mp_const_none) +// minLineLength = 0; // Default value +// else +// minLineLength = mp_obj_get_float(args[ARG_minLineLength].u_obj); +// mp_float_t maxLineGap; +// if(args[ARG_maxLineGap].u_obj == mp_const_none) +// maxLineGap = 0; // Default value +// else +// maxLineGap = mp_obj_get_float(args[ARG_maxLineGap].u_obj); + +// // Call the corresponding OpenCV function +// try { +// HoughLinesP(image, lines, rho, theta, threshold, minLineLength, maxLineGap); +// } catch(Exception& e) { +// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); +// } + +// // Return the result +// return mat_to_mp_obj(lines); +// } + +mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_srn, ARG_stn, ARG_min_theta, ARG_max_theta }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } }, + { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_srn, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_stn, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_min_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_max_theta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + mp_float_t rho; + if(args[ARG_rho].u_obj == mp_const_none) + rho = 1; // Default value + else + rho = mp_obj_get_float(args[ARG_rho].u_obj); + mp_float_t theta; + if(args[ARG_theta].u_obj == mp_const_none) + theta = CV_PI / 180; // Default value + else + theta = mp_obj_get_float(args[ARG_theta].u_obj); + int threshold = args[ARG_threshold].u_int; + Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj); + mp_float_t srn; + if(args[ARG_srn].u_obj == mp_const_none) + srn = 0; // Default value + else + srn = mp_obj_get_float(args[ARG_srn].u_obj); + mp_float_t stn; + if(args[ARG_stn].u_obj == mp_const_none) + stn = 0; // Default value + else + stn = mp_obj_get_float(args[ARG_stn].u_obj); + mp_float_t min_theta; + if(args[ARG_min_theta].u_obj == mp_const_none) + min_theta = 0; // Default value + else + min_theta = mp_obj_get_float(args[ARG_min_theta].u_obj); + mp_float_t max_theta; + if(args[ARG_max_theta].u_obj == mp_const_none) + max_theta = CV_PI; // Default value + else + max_theta = mp_obj_get_float(args[ARG_max_theta].u_obj); + + // Vector to hold the lines and votes + std::vector lines_acc; + + // Call the corresponding OpenCV function + try { + HoughLines(image, lines_acc, rho, theta, threshold, srn, stn, min_theta, max_theta); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Copy the vector of lines and votes to output lines object + Mat(lines_acc).copyTo(lines); + + // Return the result + return mat_to_mp_obj(lines); +} + mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; diff --git a/src/imgproc.h b/src/imgproc.h index 479e765..9637348 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -2,6 +2,7 @@ #include "py/runtime.h" extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -11,6 +12,11 @@ extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_ma extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +// extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 7e9faeb..294d223 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -10,6 +10,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); @@ -19,6 +20,11 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); +// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); @@ -68,6 +74,13 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, + // Hough modes, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_HOUGH_MULTI_SCALE), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT_ALT), MP_ROM_INT(4) }, + // Color conversion codes, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, @@ -126,7 +139,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_LINE_8), MP_ROM_INT(8) }, { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, - // Fonts, from opencv2/imgproc.hpp + // Hershey fonts, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, @@ -157,6 +170,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, + { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, @@ -166,6 +180,11 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, + { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, + { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, + { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, + // { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, + { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, From c918dd5fae94be77043a58db321ff70b1aee13c8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 30 May 2025 11:02:17 -0600 Subject: [PATCH 009/158] Add blurring/filtering functions --- src/imgproc.cpp | 212 +++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 6 ++ src/opencv_upy.c | 12 +++ 3 files changed, 230 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index cdc941c..7838240 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -55,6 +55,111 @@ mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_d, ARG_sigmaColor, ARG_sigmaSpace, ARG_dst, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_d, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_sigmaColor, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_sigmaSpace, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int d = args[ARG_d].u_int; + mp_float_t sigmaColor = mp_obj_get_float(args[ARG_sigmaColor].u_obj); + mp_float_t sigmaSpace = mp_obj_get_float(args[ARG_sigmaSpace].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ksize, ARG_dst, ARG_anchor, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor = args[ARG_anchor].u_obj == mp_const_none ? Point(-1,-1) : mp_obj_to_point(args[ARG_anchor].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + blur(src, dst, ksize, anchor, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ddepth, ARG_ksize, ARG_dst, ARG_anchor, ARG_normalize, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } }, + { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_normalize, MP_ARG_BOOL, { .u_bool = true } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ddepth = args[ARG_ddepth].u_int; + Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor = args[ARG_anchor].u_obj == mp_const_none ? Point(-1,-1) : mp_obj_to_point(args[ARG_anchor].u_obj); + bool normalize = args[ARG_normalize].u_bool; + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + boxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_image, ARG_threshold1, ARG_threshold2, ARG_edges, ARG_apertureSize, ARG_L2gradient }; @@ -408,6 +513,84 @@ mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ddepth, ARG_kernel, ARG_dst, ARG_anchor, ARG_delta, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } }, + { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ddepth = args[ARG_ddepth].u_int; + Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1,-1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); + mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + filter2D(src, dst, ddepth, kernel, anchor, delta, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ksize, ARG_sigmaX, ARG_dst, ARG_sigmaY, ARG_borderType, ARG_hint }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_sigmaX, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_sigmaY, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + { MP_QSTR_hint, MP_ARG_INT, { .u_int = ALGO_HINT_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Size ksize = mp_obj_to_size(args[ARG_ksize].u_obj); + mp_float_t sigmaX = mp_obj_get_float(args[ARG_sigmaX].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t sigmaY = args[ARG_sigmaY].u_obj == mp_const_none ? sigmaX : mp_obj_get_float(args[ARG_sigmaY].u_obj); + int borderType = args[ARG_borderType].u_int; + AlgorithmHint hint = (AlgorithmHint) args[ARG_hint].u_int; + + // Call the corresponding OpenCV function + try { + GaussianBlur(src, dst, ksize, sigmaX, sigmaY, borderType, hint); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_shape, ARG_ksize, ARG_anchor }; @@ -768,6 +951,35 @@ mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ksize, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ksize, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ksize = args[ARG_ksize].u_int; + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + medianBlur(src, dst, ksize); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; diff --git a/src/imgproc.h b/src/imgproc.h index 9637348..cd78299 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -2,6 +2,9 @@ #include "py/runtime.h" extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -11,6 +14,8 @@ extern mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_ extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -18,6 +23,7 @@ extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, // extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 294d223..8c62793 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -10,6 +10,9 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); @@ -19,6 +22,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellips static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator); @@ -26,6 +31,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_Hou // static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); @@ -170,6 +176,9 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, + { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, + { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, + { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, @@ -179,6 +188,8 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, + { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, + { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, @@ -186,6 +197,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, + { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, From 1948925e81f0faacfc5ea1d7a31835560569b4d3 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 30 May 2025 11:22:35 -0600 Subject: [PATCH 010/158] Add gradient filter functions --- src/imgproc.cpp | 153 +++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 4 ++ src/opencv_upy.c | 8 +++ 3 files changed, 165 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 7838240..f228120 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -914,6 +914,43 @@ mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *po return mat_to_mp_obj(lines); } +mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ddepth, ARG_dst, ARG_ksize, ARG_scale, ARG_delta, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ddepth = args[ARG_ddepth].u_int; + int ksize = args[ARG_ksize].u_int; + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj); + mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + Laplacian(src, dst, ddepth, ksize, scale, delta, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; @@ -1106,3 +1143,119 @@ mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t // Return the result return mat_to_mp_obj(img); } + +mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ddepth, ARG_dx, ARG_dy, ARG_dst, ARG_scale, ARG_delta, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dy, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ddepth = args[ARG_ddepth].u_int; + int dx = args[ARG_dx].u_int; + int dy = args[ARG_dy].u_int; + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj); + mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_ddepth, ARG_dx, ARG_dy, ARG_dst, ARG_ksize, ARG_scale, ARG_delta, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_ddepth, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dy, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 3 } }, + { MP_QSTR_scale, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_delta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int ddepth = args[ARG_ddepth].u_int; + int dx = args[ARG_dx].u_int; + int dy = args[ARG_dy].u_int; + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + int ksize = args[ARG_ksize].u_int; + mp_float_t scale = args[ARG_scale].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_scale].u_obj); + mp_float_t delta = args[ARG_delta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_delta].u_obj); + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + Sobel(src, dst, ddepth, dx, dy, ksize, scale, delta, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + +mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_dx, ARG_dy, ARG_ksize, ARG_borderType }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dx, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_dy, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_ksize, MP_ARG_INT, { .u_int = 3 } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_DEFAULT } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat dx = mp_obj_to_mat(args[ARG_dx].u_obj); + Mat dy = mp_obj_to_mat(args[ARG_dy].u_obj); + int ksize = args[ARG_ksize].u_int; + int borderType = args[ARG_borderType].u_int; + + // Call the corresponding OpenCV function + try { + spatialGradient(src, dx, dy, ksize, borderType); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + mp_obj_t result[2]; + result[0] = mat_to_mp_obj(dx); + result[1] = mat_to_mp_obj(dy); + return mp_obj_new_tuple(2, result); +} diff --git a/src/imgproc.h b/src/imgproc.h index cd78299..134dc42 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -22,8 +22,12 @@ extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_ extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); // extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 8c62793..2b9d443 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -30,11 +30,15 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4 static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); // static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Sobel_obj, 4, cv2_imgproc_Sobel); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_spatialGradient_obj, 1, cv2_imgproc_spatialGradient); //////////////////////////////////////////////////////////////////////////////// // Module attributes @@ -196,11 +200,15 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, // { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, + { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, + { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, + { MP_ROM_QSTR(MP_QSTR_Sobel), MP_ROM_PTR(&cv2_imgproc_Sobel_obj) }, + { MP_ROM_QSTR(MP_QSTR_spatialGradient), MP_ROM_PTR(&cv2_imgproc_spatialGradient_obj) }, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From da3452c0b2da6d2d67c099a246c0164d1c81232f Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 30 May 2025 11:46:22 -0600 Subject: [PATCH 011/158] Add thresholding functions --- src/imgproc.cpp | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 2 ++ src/opencv_upy.c | 18 +++++++++++++ 3 files changed, 90 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index f228120..c655fdf 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -12,6 +12,43 @@ extern "C" { using namespace cv; +mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_maxValue, ARG_adaptiveMethod, ARG_thresholdType, ARG_blockSize, ARG_C, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_maxValue, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_adaptiveMethod, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_thresholdType, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_blockSize, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_C, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + mp_float_t maxValue = mp_obj_get_float(args[ARG_maxValue].u_obj); + int adaptiveMethod = args[ARG_adaptiveMethod].u_int; + int thresholdType = args[ARG_thresholdType].u_int; + int blockSize = args[ARG_blockSize].u_int; + mp_float_t C = mp_obj_get_float(args[ARG_C].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + adaptiveThreshold(src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength }; @@ -1259,3 +1296,36 @@ mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp result[1] = mat_to_mp_obj(dy); return mp_obj_new_tuple(2, result); } + +mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_thresh, ARG_maxval, ARG_type, ARG_dst }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thresh, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_maxval, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_type, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = THRESH_BINARY } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + mp_float_t thresh = mp_obj_get_float(args[ARG_thresh].u_obj); + mp_float_t maxval = mp_obj_get_float(args[ARG_maxval].u_obj); + int type = args[ARG_type].u_int; + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + + // Call the corresponding OpenCV function + try { + threshold(src, dst, thresh, maxval, type); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} diff --git a/src/imgproc.h b/src/imgproc.h index 134dc42..a2ad3e9 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -1,6 +1,7 @@ // C headers #include "py/runtime.h" +extern mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -31,3 +32,4 @@ extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, m extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 2b9d443..2aabfd1 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -9,6 +9,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); // OpenCV imgproc module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); @@ -39,6 +40,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rect static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Sobel_obj, 4, cv2_imgproc_Sobel); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_spatialGradient_obj, 1, cv2_imgproc_spatialGradient); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_threshold); //////////////////////////////////////////////////////////////////////////////// // Module attributes @@ -84,6 +86,20 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, + // Threshold types, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY_INV), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_TRUNC), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO_INV), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_MASK), MP_ROM_INT(7) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_OTSU), MP_ROM_INT(8) }, + { MP_ROM_QSTR(MP_QSTR_THRESH_TRIANGLE), MP_ROM_INT(16) }, + + // Adaptive threshold methods, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_MEAN_C), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_GAUSSIAN_C), MP_ROM_INT(1) }, + // Hough modes, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, @@ -179,6 +195,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // OpenCV imgproc functions //////////////////////////////////////////////////////////////////////////// + { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, @@ -209,6 +226,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, { MP_ROM_QSTR(MP_QSTR_Sobel), MP_ROM_PTR(&cv2_imgproc_Sobel_obj) }, { MP_ROM_QSTR(MP_QSTR_spatialGradient), MP_ROM_PTR(&cv2_imgproc_spatialGradient_obj) }, + { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&cv2_imgproc_threshold_obj) }, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From 60cd3681ef3d0fc696005afb1b94ef8a44244d22 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 30 May 2025 16:15:33 -0600 Subject: [PATCH 012/158] Add template matching --- src/imgproc.cpp | 33 +++++++++++++++++++++++++++++++++ src/imgproc.h | 1 + src/opencv_upy.c | 10 ++++++++++ 3 files changed, 44 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index c655fdf..5605bc7 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -1025,6 +1025,39 @@ mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_templ, ARG_method, ARG_result, ARG_mask }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_templ, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = TM_CCOEFF_NORMED } }, + { MP_QSTR_result, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_mask, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Mat templ = mp_obj_to_mat(args[ARG_templ].u_obj); + int method = args[ARG_method].u_int; + Mat result = mp_obj_to_mat(args[ARG_result].u_obj); + Mat mask = mp_obj_to_mat(args[ARG_mask].u_obj); + + // Call the corresponding OpenCV function + try { + matchTemplate(img, templ, result, method, mask); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(result); +} + mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_ksize, ARG_dst }; diff --git a/src/imgproc.h b/src/imgproc.h index a2ad3e9..0fb68fa 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -25,6 +25,7 @@ extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 2aabfd1..10fc2ed 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -33,6 +33,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_Hou static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); @@ -184,6 +185,14 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_MARKER_SQUARE), MP_ROM_INT(4) }, { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_UP), MP_ROM_INT(5) }, { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_DOWN), MP_ROM_INT(6) }, + + // Template matching modes, from opencv2/imgproc.hpp + { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF_NORMED), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_TM_CCORR), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_TM_CCORR_NORMED), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF_NORMED), MP_ROM_INT(5) }, //////////////////////////////////////////////////////////////////////////// // OpenCV core functions @@ -219,6 +228,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, + { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, From ec055742c64ce44623cdce287256a4d67f572dea Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 30 May 2025 17:01:59 -0600 Subject: [PATCH 013/158] Add connectedComponents --- src/imgproc.cpp | 80 ++++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 2 ++ src/opencv_upy.c | 12 ++++++++ 3 files changed, 94 insertions(+) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 5605bc7..6ec5772 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -232,6 +232,86 @@ mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw return mat_to_mp_obj(edges); } +mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_labels, ARG_connectivity, ARG_ltype }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } }, + { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + Mat labels = mp_obj_to_mat(args[ARG_labels].u_obj); + int connectivity = args[ARG_connectivity].u_int; + int ltype = args[ARG_ltype].u_int; + + // Return value + int retval = 0; + + // Call the corresponding OpenCV function + try { + retval = connectedComponents(image, labels, connectivity, ltype); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + mp_obj_t result[2]; + result[0] = mp_obj_new_int(retval); + result[1] = mat_to_mp_obj(labels); + return mp_obj_new_tuple(2, result); +} + +// mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +// // Define the arguments +// enum { ARG_image, ARG_labels, ARG_stats, ARG_centroids, ARG_connectivity, ARG_ltype }; +// static const mp_arg_t allowed_args[] = { +// { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, +// { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_stats, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_centroids, MP_ARG_OBJ, { .u_obj = mp_const_none } }, +// { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } }, +// { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers +// }; + +// // Parse the arguments +// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; +// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + +// // Convert arguments to required types +// Mat image = mp_obj_to_mat(args[ARG_image].u_obj); +// Mat labels = mp_obj_to_mat(args[ARG_labels].u_obj); +// Mat stats = mp_obj_to_mat(args[ARG_stats].u_obj); +// Mat centroids = mp_obj_to_mat(args[ARG_centroids].u_obj); +// int connectivity = args[ARG_connectivity].u_int; +// int ltype = args[ARG_ltype].u_int; + +// // Return value +// int retval = 0; + +// // Call the corresponding OpenCV function +// try { +// retval = connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype); +// } catch(Exception& e) { +// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); +// } + +// // Return the result +// mp_obj_t result[4]; +// result[0] = mp_obj_new_int(retval); +// result[1] = mat_to_mp_obj(labels); +// result[2] = mat_to_mp_obj(stats); +// result[3] = mat_to_mp_obj(centroids); +// return mp_obj_new_tuple(4, result); +// } + mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; diff --git a/src/imgproc.h b/src/imgproc.h index 0fb68fa..75480ed 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -7,6 +7,8 @@ extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_a extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +// extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 10fc2ed..b63a18d 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -15,6 +15,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgpro static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); +// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); @@ -61,6 +63,14 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // include them here because it's C++ and this is C, so we have to redefine // them here. Only a subset of the most common conversions are included. + // OpenCV data types, from opencv2/core/hal/interface.h + { MP_ROM_QSTR(MP_QSTR_CV_8U), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_CV_8S), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_CV_16U), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_CV_16S), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_CV_32F), MP_ROM_INT(4) }, + // Other types are currently not supported by ulab + // Border types, from opencv2/core/base.hpp { MP_ROM_QSTR(MP_QSTR_BORDER_CONSTANT), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_BORDER_REPLICATE), MP_ROM_INT(1) }, @@ -210,6 +220,8 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, + { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, + // { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, From 48fcb8f8f599af2903a6c6a1521f692efe3ed755 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 2 Jun 2025 17:45:04 -0600 Subject: [PATCH 014/158] Initial imshow() implementation --- drivers/display/st7789_spi.py | 352 ++++++++++++++++++++++++++++++++++ examples/hello_opencv.py | 51 +++++ src/highgui.cpp | 45 +++++ src/highgui.h | 4 + src/opencv_upy.c | 10 + src/opencv_upy.cmake | 1 + 6 files changed, 463 insertions(+) create mode 100644 drivers/display/st7789_spi.py create mode 100644 examples/hello_opencv.py create mode 100644 src/highgui.cpp create mode 100644 src/highgui.h diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py new file mode 100644 index 0000000..9ea7c53 --- /dev/null +++ b/drivers/display/st7789_spi.py @@ -0,0 +1,352 @@ +# Modified from: +# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py + +import struct +from time import sleep_ms +from ulab import numpy as np +import cv2 + +# ST7789 commands +_ST7789_SWRESET = b"\x01" +_ST7789_SLPIN = b"\x10" +_ST7789_SLPOUT = b"\x11" +_ST7789_NORON = b"\x13" +_ST7789_INVOFF = b"\x20" +_ST7789_INVON = b"\x21" +_ST7789_DISPOFF = b"\x28" +_ST7789_DISPON = b"\x29" +_ST7789_CASET = b"\x2a" +_ST7789_RASET = b"\x2b" +_ST7789_RAMWR = b"\x2c" +_ST7789_VSCRDEF = b"\x33" +_ST7789_COLMOD = b"\x3a" +_ST7789_MADCTL = b"\x36" +_ST7789_VSCSAD = b"\x37" +_ST7789_RAMCTL = b"\xb0" + +# MADCTL bits +_ST7789_MADCTL_MY = const(0x80) +_ST7789_MADCTL_MX = const(0x40) +_ST7789_MADCTL_MV = const(0x20) +_ST7789_MADCTL_ML = const(0x10) +_ST7789_MADCTL_BGR = const(0x08) +_ST7789_MADCTL_MH = const(0x04) +_ST7789_MADCTL_RGB = const(0x00) + +RGB = 0x00 +BGR = 0x08 + +# 8 basic color definitions +BLACK = const(0x0000) +BLUE = const(0x001F) +RED = const(0xF800) +GREEN = const(0x07E0) +CYAN = const(0x07FF) +MAGENTA = const(0xF81F) +YELLOW = const(0xFFE0) +WHITE = const(0xFFFF) + +_ENCODE_POS = const(">HH") + +_BIT7 = const(0x80) +_BIT6 = const(0x40) +_BIT5 = const(0x20) +_BIT4 = const(0x10) +_BIT3 = const(0x08) +_BIT2 = const(0x04) +_BIT1 = const(0x02) +_BIT0 = const(0x01) + +# Rotation tables +# (madctl, width, height, xstart, ystart)[rotation % 4] + +_DISPLAY_240x320 = ( + (0x00, 240, 320, 0, 0), + (0x60, 320, 240, 0, 0), + (0xc0, 240, 320, 0, 0), + (0xa0, 320, 240, 0, 0)) + +_DISPLAY_170x320 = ( + (0x00, 170, 320, 35, 0), + (0x60, 320, 170, 0, 35), + (0xc0, 170, 320, 35, 0), + (0xa0, 320, 170, 0, 35)) + +_DISPLAY_240x240 = ( + (0x00, 240, 240, 0, 0), + (0x60, 240, 240, 0, 0), + (0xc0, 240, 240, 0, 80), + (0xa0, 240, 240, 80, 0)) + +_DISPLAY_135x240 = ( + (0x00, 135, 240, 52, 40), + (0x60, 240, 135, 40, 53), + (0xc0, 135, 240, 53, 40), + (0xa0, 240, 135, 40, 52)) + +_DISPLAY_128x128 = ( + (0x00, 128, 128, 2, 1), + (0x60, 128, 128, 1, 2), + (0xc0, 128, 128, 2, 1), + (0xa0, 128, 128, 1, 2)) + +# Supported displays (physical width, physical height, rotation table) +_SUPPORTED_DISPLAYS = ( + (240, 320, _DISPLAY_240x320), + (170, 320, _DISPLAY_170x320), + (240, 240, _DISPLAY_240x240), + (135, 240, _DISPLAY_135x240), + (128, 128, _DISPLAY_128x128)) + +# init tuple format (b'command', b'data', delay_ms) +_ST7789_INIT_CMDS = ( + ( b'\x11', b'\x00', 120), # Exit sleep mode + ( b'\x13', b'\x00', 0), # Turn on the display + ( b'\xb6', b'\x0a\x82', 0), # Set display function control + ( b'\x3a', b'\x55', 10), # Set pixel format to 16 bits per pixel (RGB565) + ( b'\xb2', b'\x0c\x0c\x00\x33\x33', 0), # Set porch control + ( b'\xb7', b'\x35', 0), # Set gate control + ( b'\xbb', b'\x28', 0), # Set VCOMS setting + ( b'\xc0', b'\x0c', 0), # Set power control 1 + ( b'\xc2', b'\x01\xff', 0), # Set power control 2 + ( b'\xc3', b'\x10', 0), # Set power control 3 + ( b'\xc4', b'\x20', 0), # Set power control 4 + ( b'\xc6', b'\x0f', 0), # Set VCOM control 1 + ( b'\xd0', b'\xa4\xa1', 0), # Set power control A + # Set gamma curve positive polarity + ( b'\xe0', b'\xd0\x00\x02\x07\x0a\x28\x32\x44\x42\x06\x0e\x12\x14\x17', 0), + # Set gamma curve negative polarity + ( b'\xe1', b'\xd0\x00\x02\x07\x0a\x28\x31\x54\x47\x0e\x1c\x17\x1b\x1e', 0), + ( b'\x21', b'\x00', 0), # Enable display inversion + ( b'\x29', b'\x00', 120) # Turn on the display +) + +class ST7789(): + """ + ST7789 driver class base + """ + def __init__(self, width, height, backlight, bright, rotation, color_order, reverse_bytes_in_word): + """ + Initialize display and backlight. + """ + # Initial dimensions and offsets; will be overridden when rotation applied + self.width = width + self.height = height + self.xstart = 0 + self.ystart = 0 + # backlight pin + self.backlight = backlight + self._pwm_bl = True + # Check display is known and get rotation table + self.rotations = self._find_rotations(width, height) + if not self.rotations: + supported_displays = ", ".join( + [f"{display[0]}x{display[1]}" for display in _SUPPORTED_DISPLAYS]) + raise ValueError( + f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") + # Colors + self.color_order = color_order + self.needs_swap = reverse_bytes_in_word + # init the st7789 + self.init_cmds = _ST7789_INIT_CMDS + self.soft_reset() + # Yes, send init twice, once is not always enough + self.send_init(self.init_cmds) + self.send_init(self.init_cmds) + # Initial rotation + self._rotation = rotation % 4 + # Apply rotation + self.rotation(self._rotation) + # Create the framebuffer for the correct rotation + self.buffer = np.zeros((self.rotations[self._rotation][2], self.rotations[self._rotation][1], 2), dtype=np.uint8) + + def send_init(self, commands): + """ + Send initialisation commands to display. + """ + for command, data, delay in commands: + self._write(command, data) + sleep_ms(delay) + + def soft_reset(self): + """ + Soft reset display. + """ + self._write(_ST7789_SWRESET) + sleep_ms(150) + + def _find_rotations(self, width, height): + """ Find the correct rotation for our display or return None """ + for display in _SUPPORTED_DISPLAYS: + if display[0] == width and display[1] == height: + return display[2] + return None + + def rotation(self, rotation): + """ + Set display rotation. + + Args: + rotation (int): + - 0-Portrait + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + """ + if ((rotation % 2) != (self._rotation % 2)) and (self.width != self.height): + # non-square displays can currently only be rotated by 180 degrees + # TODO: can framebuffer of super class be destroyed and re-created + # to match the new dimensions? or it's width/height changed? + return + + # find rotation parameters and send command + rotation %= len(self.rotations) + ( madctl, + self.width, + self.height, + self.xstart, + self.ystart, ) = self.rotations[rotation] + if self.color_order == BGR: + madctl |= _ST7789_MADCTL_BGR + else: + madctl &= ~_ST7789_MADCTL_BGR + self._write(_ST7789_MADCTL, bytes([madctl])) + # Set window for writing into + self._write(_ST7789_CASET, + struct.pack(_ENCODE_POS, self.xstart, self.width + self.xstart - 1)) + self._write(_ST7789_RASET, + struct.pack(_ENCODE_POS, self.ystart, self.height + self.ystart - 1)) + self._write(_ST7789_RAMWR) + # TODO: Can we swap (modify) framebuffer width/height in the super() class? + self._rotation = rotation + + def imshow(self, image): + """ + Display an image on the screen. + + Args: + image (Image): Image to display + """ + # Check if image is a numpy ndarray + if type(image) is not np.ndarray: + raise TypeError("Image must be a numpy ndarray") + + # Determine image shape + row = 0 + col = 0 + ch = 0 + if len(image.shape) == 3: + row, col, ch = image.shape + elif len(image.shape) == 2: + row, col = image.shape + ch = 1 + else: + row = image.shape[0] + col = 1 + ch = 1 + + # Crop input image to match display size + row_max = min(row, self.height) + col_max = min(col, self.width) + img_cropped = image[:row_max, :col_max] + + # Crop the buffer if image is smaller than the display + row_max = min(row_max, self.buffer.shape[0]) + col_max = min(col_max, self.buffer.shape[1]) + buffer_cropped = self.buffer[:row_max, :col_max] + + # Convert image to BGR565 format + if ch == 3: # BGR + buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_BGR2BGR565, buffer_cropped) + elif ch == 1: # Grayscale + buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_GRAY2BGR565, buffer_cropped) + else: # Already in BGR565 format + buffer_cropped[:] = img_cropped + + # Create bytearray to send to display. Swap bytes if needed + bytes_to_write = None + if self.needs_swap: + bytes_to_write = buffer_cropped[:, :, ::-1].tobytes() + else: + bytes_to_write = buffer_cropped.tobytes() + + # Write to the display + self._write(None, bytes_to_write) + +class ST7789_SPI(ST7789): + """ + ST7789 driver class for SPI bus devices + + Args: + spi (bus): bus object **Required** + width (int): display width **Required** + height (int): display height **Required** + reset (pin): reset pin + cs (pin): cs pin + dc (pin): dc pin + backlight (pin) or (pwm): backlight pin + - can be type Pin (digital), PWM or None + bright (value): Initial brightness level; default 'on' + - a (float) between 0 and 1 if backlight is pwm + - otherwise (bool) or (int) for pin value() + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words + """ + def __init__( + self, + spi, + width, + height, + reset=None, + cs=None, + dc=None, + backlight=None, + bright=1, + rotation=0, + color_order=BGR, + reverse_bytes_in_word=True, + ): + self.spi = spi + self.reset = reset + self.cs = cs + self.dc = dc + super().__init__(width, height, backlight, bright, rotation, color_order, reverse_bytes_in_word) + + def _write(self, command=None, data=None): + """SPI write to the device: commands and data.""" + if self.cs: + self.cs.off() + if command is not None: + self.dc.off() + self.spi.write(command) + if data is not None: + self.dc.on() + self.spi.write(data) + if self.cs: + self.cs.on() + + def hard_reset(self): + """ + Hard reset display. + """ + if self.cs: + self.cs.off() + if self.reset: + self.reset.on() + sleep_ms(10) + if self.reset: + self.reset.off() + sleep_ms(10) + if self.reset: + self.reset.on() + sleep_ms(120) + if self.cs: + self.cs.on() \ No newline at end of file diff --git a/examples/hello_opencv.py b/examples/hello_opencv.py new file mode 100644 index 0000000..445c08f --- /dev/null +++ b/examples/hello_opencv.py @@ -0,0 +1,51 @@ +# Import OpenCV, just as you would in any other Python environment! +import cv2 + +# Import NumPy. Note that we use ulab's NumPy, which is a lightweight version of +# standard NumPy +from ulab import numpy as np + +# Import a display driver. Any display driver can be used, as long as it +# implements an `imshow()` function that takes an NumPy array as input +import st7789_spi as st7789 + +# The display driver requires some hardware-specific imports +from machine import Pin, SPI + +# Create SPI object +spi = SPI(0, baudrate=24000000) + +# Create display object +display = st7789.ST7789_SPI(spi, + 240, 320, + reset=None, + cs=machine.Pin(17, Pin.OUT, value=1), + dc=machine.Pin(16, Pin.OUT, value=1), + backlight=None, + bright=1, + rotation=1, + color_order=st7789.BGR, + reverse_bytes_in_word=True) + +# Initialize an image (NumPy array) to be displayed +img = np.zeros((240,320, 3), dtype=np.uint8) + +# Images can be modified directly if desired. Here we set the top 50 rows of the +# image to blue (255, 0, 0) in BGR format +img[0:50, :] = (255, 0, 0) + +# OpenCV's drawing functions can be used to modify the image as well. For +# example, we can draw a green ellipse on the image. Note that many OpenCV +# functions return the output image, meaning the entire array will be printed +# if it's not assigned to a variable. In this case, we assign the output to the +# same variable `img`, which has almost no overhead +img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) + +# And the obligatory text, this time in red +img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + +# Once we have an image ready to show, just call `imshow()` as you would in +# any other Python environment! However it's a bit different here, as we +# don't have a window to show the image in. Instead, we pass the display object +# to the `imshow()` function, which will show the image on the screen +cv2.imshow(display, img) diff --git a/src/highgui.cpp b/src/highgui.cpp new file mode 100644 index 0000000..4bc49f6 --- /dev/null +++ b/src/highgui.cpp @@ -0,0 +1,45 @@ +// C++ headers +#include "opencv2/core.hpp" +#include "convert.h" +#include "numpy.h" + +// C headers +extern "C" { +#include "highgui.h" +#include "ulab/code/ndarray.h" +} // extern "C" + +extern const mp_obj_type_t cv2_display_type; + +using namespace cv; + +mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_display, ARG_img }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_display, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Assume the display object has an `imshow` method and load it. The method + // array should be loaded with method[0] as the method object and method[1] + // as the self object. + mp_obj_t method[3]; + mp_load_method_maybe(args[ARG_display].u_obj, MP_QSTR_imshow, method); + + // Check if the method was found + if(method[0] == MP_OBJ_NULL) { + // Method not found, raise an AttributeError + mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("`cv2.imshow()` requires a display object with its own 'imshow()' method, not a window name string.")); + } + + // Add the image object to the method arguments + method[2] = args[ARG_img].u_obj; + + // Call the method with one positional argument (the image we just added) + return mp_call_method_n_kw(1, 0, method); +} diff --git a/src/highgui.h b/src/highgui.h new file mode 100644 index 0000000..3c94d52 --- /dev/null +++ b/src/highgui.h @@ -0,0 +1,4 @@ +// C headers +#include "py/runtime.h" + +extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index b63a18d..c88a564 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,4 +1,5 @@ #include "core.h" +#include "highgui.h" #include "imgproc.h" //////////////////////////////////////////////////////////////////////////////// @@ -8,6 +9,9 @@ // OpenCV core module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); +// OpenCV highgui module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); + // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); @@ -210,6 +214,12 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, + //////////////////////////////////////////////////////////////////////////// + // OpenCV highgui functions + //////////////////////////////////////////////////////////////////////////// + + { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, + //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions //////////////////////////////////////////////////////////////////////////// diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index f2ea973..66367e6 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -6,6 +6,7 @@ target_sources(usermod_cv2 INTERFACE ${CMAKE_CURRENT_LIST_DIR}/alloc.c ${CMAKE_CURRENT_LIST_DIR}/convert.cpp ${CMAKE_CURRENT_LIST_DIR}/core.cpp + ${CMAKE_CURRENT_LIST_DIR}/highgui.cpp ${CMAKE_CURRENT_LIST_DIR}/imgproc.cpp ${CMAKE_CURRENT_LIST_DIR}/numpy.cpp ${CMAKE_CURRENT_LIST_DIR}/opencv_upy.c From e1800f927e62a148e422c80871282bb4636a58c4 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 3 Jun 2025 14:04:03 -0600 Subject: [PATCH 015/158] Add waitKey() Mostly just for compatibility with existing OpenCV code --- src/highgui.cpp | 26 ++++++++++++++++++++++++++ src/highgui.h | 1 + src/opencv_upy.c | 2 ++ 3 files changed, 29 insertions(+) diff --git a/src/highgui.cpp b/src/highgui.cpp index 4bc49f6..7dc7b3e 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -7,6 +7,7 @@ extern "C" { #include "highgui.h" #include "ulab/code/ndarray.h" +#include "py/mphal.h" } // extern "C" extern const mp_obj_type_t cv2_display_type; @@ -43,3 +44,28 @@ mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k // Call the method with one positional argument (the image we just added) return mp_call_method_n_kw(1, 0, method); } + +mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { Arg_delay }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_delay, MP_ARG_INT, {.u_int = 0} }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + int delay = args[Arg_delay].u_int; + + // Because we have no way to get user input in this environment, we'll just + // delay for the specified time and return a dummy value. Normally, passing + // a delay of 0 would wait infinitely until a keyPress, but since that will + // never happen here, we will just return immediately after the delay. + if(delay > 0) + mp_hal_delay_ms(delay); + + // Return a dummy value to indicate no key was pressed + return MP_OBJ_NEW_SMALL_INT(-1); +} diff --git a/src/highgui.h b/src/highgui.h index 3c94d52..92f5ed5 100644 --- a/src/highgui.h +++ b/src/highgui.h @@ -2,3 +2,4 @@ #include "py/runtime.h" extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index c88a564..0900e02 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -11,6 +11,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); // OpenCV highgui module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); @@ -219,6 +220,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, + { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions From 4d0627e2f0315e4d59b1a36c779c065bf75a040a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 3 Jun 2025 19:01:10 -0600 Subject: [PATCH 016/158] Improve st7789_spi.py imshow() Based on feedback in #7 --- drivers/display/st7789_spi.py | 57 ++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index 9ea7c53..0e77171 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -227,24 +227,20 @@ def imshow(self, image): Args: image (Image): Image to display """ - # Check if image is a numpy ndarray + # Check if image is a NumPy ndarray if type(image) is not np.ndarray: - raise TypeError("Image must be a numpy ndarray") - - # Determine image shape - row = 0 - col = 0 - ch = 0 - if len(image.shape) == 3: - row, col, ch = image.shape - elif len(image.shape) == 2: - row, col = image.shape - ch = 1 - else: - row = image.shape[0] - col = 1 - ch = 1 - + raise TypeError("Image must be a NumPy ndarray") + + # Ensure image is 3D (row, col, ch) by reshaping if necessary + ndim = len(image.shape) + if ndim == 1: + image = image.reshape((image.shape[0], 1, 1)) + elif ndim == 2: + image = image.reshape((image.shape[0], image.shape[1], 1)) + + # Determine number of rows, columns, and channels + row, col, ch = image.shape + # Crop input image to match display size row_max = min(row, self.height) col_max = min(col, self.width) @@ -255,6 +251,21 @@ def imshow(self, image): col_max = min(col_max, self.buffer.shape[1]) buffer_cropped = self.buffer[:row_max, :col_max] + # Check dtype and convert to uint8 if necessary + if img_cropped.dtype is not np.uint8: + # Have to create a new buffer for non-uint8 images + if img_cropped.dtype == np.int8: + temp = cv2.convertScaleAbs(img_cropped, alpha=1, beta=127) + elif img_cropped.dtype == np.int16: + temp = cv2.convertScaleAbs(img_cropped, alpha=1/255, beta=127) + elif img_cropped.dtype == np.uint16: + temp = cv2.convertScaleAbs(img_cropped, alpha=1/255) + elif img_cropped.dtype == np.float: + # Standard OpenCV will clamp values to 0-1 using convertTo(), + # but this implementation wraps instead + temp = np.asarray(img_cropped * 255, dtype=np.uint8) + img_cropped = temp + # Convert image to BGR565 format if ch == 3: # BGR buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_BGR2BGR565, buffer_cropped) @@ -262,16 +273,12 @@ def imshow(self, image): buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_GRAY2BGR565, buffer_cropped) else: # Already in BGR565 format buffer_cropped[:] = img_cropped - - # Create bytearray to send to display. Swap bytes if needed - bytes_to_write = None + + # Write to display. Swap bytes if needed if self.needs_swap: - bytes_to_write = buffer_cropped[:, :, ::-1].tobytes() + self._write(None, self.buffer[:, :, ::-1]) else: - bytes_to_write = buffer_cropped.tobytes() - - # Write to the display - self._write(None, bytes_to_write) + self._write(None, self.buffer) class ST7789_SPI(ST7789): """ From 361fbbefb359fd8e10284162ac67d3eb4e134aa5 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 3 Jun 2025 19:24:16 -0600 Subject: [PATCH 017/158] Add convertScaleAbs() Needed for new ST7789 implementation to convert other dtypes --- src/core.cpp | 31 +++++++++++++++++++++++++++++++ src/core.h | 1 + src/opencv_upy.c | 2 ++ 3 files changed, 34 insertions(+) diff --git a/src/core.cpp b/src/core.cpp index 5ac0d82..2a55263 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -11,6 +11,37 @@ extern "C" { using namespace cv; +mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_dst, ARG_alpha, ARG_beta }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_alpha, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_beta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t alpha = args[ARG_alpha].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_alpha].u_obj); + mp_float_t beta = args[ARG_beta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_beta].u_obj); + + // Call the corresponding OpenCV function + try { + convertScaleAbs(src, dst, alpha, beta); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_lower, ARG_upper, ARG_dst }; diff --git a/src/core.h b/src/core.h index 6f4b062..71d922f 100644 --- a/src/core.h +++ b/src/core.h @@ -1,4 +1,5 @@ // C headers #include "py/runtime.h" +extern mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 0900e02..06ffc8c 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -7,6 +7,7 @@ //////////////////////////////////////////////////////////////////////////////// // OpenCV core module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); // OpenCV highgui module @@ -213,6 +214,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // OpenCV core functions //////////////////////////////////////////////////////////////////////////// + { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, //////////////////////////////////////////////////////////////////////////// From 7e304261f6d577333cbfcbd61e28d65bf9da273a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 4 Jun 2025 11:39:59 -0600 Subject: [PATCH 018/158] Improve/simplify/modularize ST7789 SPI driver Create helper functions for imshow() (could move these to a generic interface class for other display drivers to take advantage of) Move machine dependency from example to display driver Add extra checks for edge cases Remove unused functions and arguments Add clear() method Fix imshow() with float images to clip instead of wrap the range 0-1 Other minor optimizations --- drivers/display/st7789_spi.py | 284 ++++++++++++++++++---------------- examples/hello_opencv.py | 26 +--- 2 files changed, 157 insertions(+), 153 deletions(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index 0e77171..2927422 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -3,6 +3,7 @@ import struct from time import sleep_ms +from machine import Pin, SPI from ulab import numpy as np import cv2 @@ -48,15 +49,6 @@ _ENCODE_POS = const(">HH") -_BIT7 = const(0x80) -_BIT6 = const(0x40) -_BIT5 = const(0x20) -_BIT4 = const(0x10) -_BIT3 = const(0x08) -_BIT2 = const(0x04) -_BIT1 = const(0x02) -_BIT0 = const(0x01) - # Rotation tables # (madctl, width, height, xstart, ystart)[rotation % 4] @@ -121,22 +113,58 @@ ( b'\x29', b'\x00', 120) # Turn on the display ) -class ST7789(): +class ST7789_SPI(): """ - ST7789 driver class base + OpenCV SPI driver for ST7789 displays + + Args: + width (int): display width **Required** + height (int): display height **Required** + spi_id (int): SPI bus ID + spi_baudrate (int): SPI baudrate, default 24MHz + pin_sck (pin): SCK pin number + pin_mosi (pin): MOSI pin number + pin_miso (pin): MISO pin number + pin_cs (pin): Chip Select pin number + pin_dc (pin): Data/Command pin number + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words """ - def __init__(self, width, height, backlight, bright, rotation, color_order, reverse_bytes_in_word): - """ - Initialize display and backlight. - """ + def __init__( + self, + width, + height, + spi_id, + spi_baudrate=24000000, + pin_sck=None, + pin_mosi=None, + pin_miso=None, + pin_cs=None, + pin_dc=None, + rotation=0, + color_order=BGR, + reverse_bytes_in_word=True, + ): + # Store SPI arguments + self.spi = SPI(spi_id, baudrate=spi_baudrate, + sck=Pin(pin_sck, Pin.OUT) if pin_sck else None, + mosi=Pin(pin_mosi, Pin.OUT) if pin_mosi else None, + miso=Pin(pin_miso, Pin.IN) if pin_miso else None) + self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + self.dc = Pin(pin_dc, Pin.OUT, value=1) if pin_dc else None # Initial dimensions and offsets; will be overridden when rotation applied self.width = width self.height = height self.xstart = 0 self.ystart = 0 - # backlight pin - self.backlight = backlight - self._pwm_bl = True # Check display is known and get rotation table self.rotations = self._find_rotations(width, height) if not self.rotations: @@ -147,18 +175,17 @@ def __init__(self, width, height, backlight, bright, rotation, color_order, reve # Colors self.color_order = color_order self.needs_swap = reverse_bytes_in_word - # init the st7789 - self.init_cmds = _ST7789_INIT_CMDS + # Reset the display self.soft_reset() # Yes, send init twice, once is not always enough - self.send_init(self.init_cmds) - self.send_init(self.init_cmds) + self.send_init(_ST7789_INIT_CMDS) + self.send_init(_ST7789_INIT_CMDS) # Initial rotation self._rotation = rotation % 4 # Apply rotation self.rotation(self._rotation) # Create the framebuffer for the correct rotation - self.buffer = np.zeros((self.rotations[self._rotation][2], self.rotations[self._rotation][1], 2), dtype=np.uint8) + self.buffer = np.zeros((self.height, self.width, 2), dtype=np.uint8) def send_init(self, commands): """ @@ -220,112 +247,117 @@ def rotation(self, rotation): # TODO: Can we swap (modify) framebuffer width/height in the super() class? self._rotation = rotation - def imshow(self, image): + def _get_common_roi_with_buffer(self, image): """ - Display an image on the screen. + Get the common region of interest (ROI) between the image and the + display's internal buffer. Args: - image (Image): Image to display + image (ndarray): Image to display + + Returns: + tuple: (image_roi, buffer_roi) """ - # Check if image is a NumPy ndarray + # Ensure image is a NumPy ndarray if type(image) is not np.ndarray: raise TypeError("Image must be a NumPy ndarray") + + # Determing number of rows and columns in the image + image_rows = image.shape[0] + if len(image.shape) < 2: + image_cols = 1 + else: + image_cols = image.shape[1] + + # Get the common ROI between the image and the buffer + row_max = min(image_rows, self.height) + col_max = min(image_cols, self.width) + img_roi = image[:row_max, :col_max] + buffer_roi = self.buffer[:row_max, :col_max] + return img_roi, buffer_roi + + def _convert_image_to_uint8(self, image): + """ + Convert the image to uint8 format if necessary. + + Args: + image (ndarray): Image to convert + + Returns: + Image: Converted image + """ + # Check if the image is already in uint8 format + if image.dtype is np.uint8: + return image + + # Convert to uint8 format. This unfortunately requires creating a new + # buffer for the converted image, which takes more memory + if image.dtype == np.int8: + return cv2.convertScaleAbs(image, alpha=1, beta=127) + elif image.dtype == np.int16: + return cv2.convertScaleAbs(image, alpha=1/255, beta=127) + elif image.dtype == np.uint16: + return cv2.convertScaleAbs(image, alpha=1/255) + elif image.dtype == np.float: + # This implementation creates an additional buffer from np.clip() + # TODO: Find another solution that avoids an additional buffer + return cv2.convertScaleAbs(np.clip(image, 0, 1), alpha=255) + else: + raise ValueError(f"Unsupported image dtype: {image.dtype}") + + def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): + """ + Convert the image ROI to BGR565 format and write it to the buffer ROI. + + Args: + image_roi (ndarray): Image region of interest + buffer_roi (ndarray): Buffer region of interest + """ + # Determine the number of channels in the image + if len(image_roi.shape) < 3: + ch = 1 + else: + ch = image_roi.shape[2] + + if ch == 1: # Grayscale + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) + elif ch == 2: # Already in BGR565 format + buffer_roi[:] = image_roi + elif ch == 3: # BGR + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) + else: + raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") - # Ensure image is 3D (row, col, ch) by reshaping if necessary - ndim = len(image.shape) - if ndim == 1: - image = image.reshape((image.shape[0], 1, 1)) - elif ndim == 2: - image = image.reshape((image.shape[0], image.shape[1], 1)) - - # Determine number of rows, columns, and channels - row, col, ch = image.shape - - # Crop input image to match display size - row_max = min(row, self.height) - col_max = min(col, self.width) - img_cropped = image[:row_max, :col_max] - - # Crop the buffer if image is smaller than the display - row_max = min(row_max, self.buffer.shape[0]) - col_max = min(col_max, self.buffer.shape[1]) - buffer_cropped = self.buffer[:row_max, :col_max] - - # Check dtype and convert to uint8 if necessary - if img_cropped.dtype is not np.uint8: - # Have to create a new buffer for non-uint8 images - if img_cropped.dtype == np.int8: - temp = cv2.convertScaleAbs(img_cropped, alpha=1, beta=127) - elif img_cropped.dtype == np.int16: - temp = cv2.convertScaleAbs(img_cropped, alpha=1/255, beta=127) - elif img_cropped.dtype == np.uint16: - temp = cv2.convertScaleAbs(img_cropped, alpha=1/255) - elif img_cropped.dtype == np.float: - # Standard OpenCV will clamp values to 0-1 using convertTo(), - # but this implementation wraps instead - temp = np.asarray(img_cropped * 255, dtype=np.uint8) - img_cropped = temp - - # Convert image to BGR565 format - if ch == 3: # BGR - buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_BGR2BGR565, buffer_cropped) - elif ch == 1: # Grayscale - buffer_cropped = cv2.cvtColor(img_cropped, cv2.COLOR_GRAY2BGR565, buffer_cropped) - else: # Already in BGR565 format - buffer_cropped[:] = img_cropped - - # Write to display. Swap bytes if needed + def imshow(self, image): + """ + Display a NumPy image on the screen. + + Args: + image (ndarray): Image to display + """ + # Get the common ROI between the image and internal display buffer + image_roi, buffer_roi = self._get_common_roi_with_buffer(image) + + # Ensure the image is in uint8 format + image_roi = self._convert_image_to_uint8(image_roi) + + # Convert the image to BGR565 format and write it to the buffer + self._write_image_to_buffer_bgr565(image_roi, buffer_roi) + + # Write buffer to display. Swap bytes if needed if self.needs_swap: self._write(None, self.buffer[:, :, ::-1]) else: self._write(None, self.buffer) -class ST7789_SPI(ST7789): - """ - ST7789 driver class for SPI bus devices - - Args: - spi (bus): bus object **Required** - width (int): display width **Required** - height (int): display height **Required** - reset (pin): reset pin - cs (pin): cs pin - dc (pin): dc pin - backlight (pin) or (pwm): backlight pin - - can be type Pin (digital), PWM or None - bright (value): Initial brightness level; default 'on' - - a (float) between 0 and 1 if backlight is pwm - - otherwise (bool) or (int) for pin value() - rotation (int): Orientation of display - - 0-Portrait, default - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - color_order (int): - - RGB: Red, Green Blue, default - - BGR: Blue, Green, Red - reverse_bytes_in_word (bool): - - Enable if the display uses LSB byte order for color words - """ - def __init__( - self, - spi, - width, - height, - reset=None, - cs=None, - dc=None, - backlight=None, - bright=1, - rotation=0, - color_order=BGR, - reverse_bytes_in_word=True, - ): - self.spi = spi - self.reset = reset - self.cs = cs - self.dc = dc - super().__init__(width, height, backlight, bright, rotation, color_order, reverse_bytes_in_word) + def clear(self): + """ + Clear the display by filling it with black color. + """ + # Clear the buffer by filling it with zeros (black) + self.buffer[:] = 0 + # Write the buffer to the display + self._write(None, self.buffer) def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" @@ -339,21 +371,3 @@ def _write(self, command=None, data=None): self.spi.write(data) if self.cs: self.cs.on() - - def hard_reset(self): - """ - Hard reset display. - """ - if self.cs: - self.cs.off() - if self.reset: - self.reset.on() - sleep_ms(10) - if self.reset: - self.reset.off() - sleep_ms(10) - if self.reset: - self.reset.on() - sleep_ms(120) - if self.cs: - self.cs.on() \ No newline at end of file diff --git a/examples/hello_opencv.py b/examples/hello_opencv.py index 445c08f..c548077 100644 --- a/examples/hello_opencv.py +++ b/examples/hello_opencv.py @@ -9,26 +9,16 @@ # implements an `imshow()` function that takes an NumPy array as input import st7789_spi as st7789 -# The display driver requires some hardware-specific imports -from machine import Pin, SPI - -# Create SPI object -spi = SPI(0, baudrate=24000000) - # Create display object -display = st7789.ST7789_SPI(spi, - 240, 320, - reset=None, - cs=machine.Pin(17, Pin.OUT, value=1), - dc=machine.Pin(16, Pin.OUT, value=1), - backlight=None, - bright=1, - rotation=1, - color_order=st7789.BGR, - reverse_bytes_in_word=True) +display = st7789.ST7789_SPI(width=240, + height=320, + spi_id=0, + pin_cs=17, + pin_dc=16, + rotation=1,) # Initialize an image (NumPy array) to be displayed -img = np.zeros((240,320, 3), dtype=np.uint8) +img = np.zeros((240, 320, 3), dtype=np.uint8) # Images can be modified directly if desired. Here we set the top 50 rows of the # image to blue (255, 0, 0) in BGR format @@ -41,7 +31,7 @@ # same variable `img`, which has almost no overhead img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) -# And the obligatory text, this time in red +# And the obligatory "Hello OpenCV" text, this time in red img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # Once we have an image ready to show, just call `imshow()` as you would in From 56903a46be75e1eec115c1b4d8e43857d04dd896 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 4 Jun 2025 14:41:06 -0600 Subject: [PATCH 019/158] Make waitKey() get input from REPL Derived from https://github.com/orgs/micropython/discussions/11448 --- src/highgui.cpp | 59 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/src/highgui.cpp b/src/highgui.cpp index 7dc7b3e..eb7de5e 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -59,13 +59,56 @@ mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t * // Convert arguments to required types int delay = args[Arg_delay].u_int; - // Because we have no way to get user input in this environment, we'll just - // delay for the specified time and return a dummy value. Normally, passing - // a delay of 0 would wait infinitely until a keyPress, but since that will - // never happen here, we will just return immediately after the delay. - if(delay > 0) - mp_hal_delay_ms(delay); + // Derived from: + // https://github.com/orgs/micropython/discussions/11448 - // Return a dummy value to indicate no key was pressed - return MP_OBJ_NEW_SMALL_INT(-1); + // Import `sys` and `select` modules + mp_obj_t sys_module = mp_import_name(MP_QSTR_sys, mp_const_none, MP_OBJ_NEW_SMALL_INT(0)); + mp_obj_t select_module = mp_import_name(MP_QSTR_select, mp_const_none, MP_OBJ_NEW_SMALL_INT(0)); + + // Get the `sys.stdin` object + mp_obj_t stdin_obj = mp_load_attr(sys_module, MP_QSTR_stdin); + + // Get the `select.POLLIN` constant + mp_obj_t pollin_obj = mp_load_attr(select_module, MP_QSTR_POLLIN); + + // Call `select.poll()` function to create a poll object + mp_obj_t select_poll_method[2]; + mp_load_method(select_module, MP_QSTR_poll, select_poll_method); + mp_obj_t poll_obj = mp_call_method_n_kw(0, 0, select_poll_method); + + // Call `poll.register(sys.stdin, select.POLLIN)` + mp_obj_t poll_register_method[4]; + mp_load_method(poll_obj, MP_QSTR_register, poll_register_method); + poll_register_method[2] = stdin_obj; + poll_register_method[3] = pollin_obj; + mp_call_method_n_kw(2, 0, poll_register_method); + + // Create timeout integer object for next method call. OpenCV uses a delay + // of 0 to wait indefinitely, whereas `select.poll` uses -1 + mp_obj_t timeout = MP_OBJ_NEW_SMALL_INT(delay <= 0 ? -1 : delay); + + // Call `poll.poll(timeout)` + mp_obj_t poll_poll_method[3]; + mp_load_method(poll_obj, MP_QSTR_poll, poll_poll_method); + poll_poll_method[2] = timeout; + mp_obj_t result = mp_call_method_n_kw(1, 0, poll_poll_method); + + // Extract the items from the result list + mp_obj_t *items; + size_t len; + mp_obj_list_get(result, &len, &items); + + // Check if any items were returned + if(len == 0) { + // If no items were returned, return -1 to indicate no key was pressed + return MP_OBJ_NEW_SMALL_INT(-1); + } + + // Since something was returned, a key was pressed. We need to extract it + // with `sys.stdin.read(1)` + mp_obj_t read_method[3]; + mp_load_method(stdin_obj, MP_QSTR_read, read_method); + read_method[2] = MP_OBJ_NEW_SMALL_INT(1); + return mp_call_method_n_kw(1, 0, read_method); } From be186c20958bb554f0d2774df09727190b91d441 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 4 Jun 2025 15:36:29 -0600 Subject: [PATCH 020/158] Update waitKey() to return integer instead of string character --- src/highgui.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/highgui.cpp b/src/highgui.cpp index eb7de5e..dbeb957 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -88,6 +88,10 @@ mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t * // of 0 to wait indefinitely, whereas `select.poll` uses -1 mp_obj_t timeout = MP_OBJ_NEW_SMALL_INT(delay <= 0 ? -1 : delay); + // TODO: Some key presses return multiple characters (eg. up arrow key + // returns 3 characters: "\x1b[A"). Need to handle this case properly. + // Should also look into implementing waitKeyEx() for these extra cases + // Call `poll.poll(timeout)` mp_obj_t poll_poll_method[3]; mp_load_method(poll_obj, MP_QSTR_poll, poll_poll_method); @@ -110,5 +114,9 @@ mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t * mp_obj_t read_method[3]; mp_load_method(stdin_obj, MP_QSTR_read, read_method); read_method[2] = MP_OBJ_NEW_SMALL_INT(1); - return mp_call_method_n_kw(1, 0, read_method); + mp_obj_t key_str = mp_call_method_n_kw(1, 0, read_method); + + // Convert the key character to an integer and return it + const char *key_chars = mp_obj_str_get_str(key_str); + return MP_OBJ_NEW_SMALL_INT(key_chars[0]); } From 74c1832a590a24f7c7a3db027e4bf29cef32082b Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 4 Jun 2025 16:00:52 -0600 Subject: [PATCH 021/158] Clean up Hello OpenCV example --- examples/hello_opencv.py | 71 ++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 24 deletions(-) diff --git a/examples/hello_opencv.py b/examples/hello_opencv.py index c548077..288fb32 100644 --- a/examples/hello_opencv.py +++ b/examples/hello_opencv.py @@ -1,41 +1,64 @@ # Import OpenCV, just as you would in any other Python environment! import cv2 -# Import NumPy. Note that we use ulab's NumPy, which is a lightweight version of -# standard NumPy +# Import NumPy, almost like any other Python environment! The only difference is +# the addition of `from ulab` since MicroPython does not have a full NumPy +# implementation; ulab NumPy is a lightweight version of standard NumPy from ulab import numpy as np -# Import a display driver. Any display driver can be used, as long as it -# implements an `imshow()` function that takes an NumPy array as input -import st7789_spi as st7789 +# Standard OpenCV leverages the host operating system to display images, but we +# don't have that luxury in MicroPython. Instead, we need to import a display +# driver. Any display driver can be used, as long as it implements an `imshow()` +# method that takes an NumPy array as input +from st7789_spi import ST7789_SPI -# Create display object -display = st7789.ST7789_SPI(width=240, - height=320, - spi_id=0, - pin_cs=17, - pin_dc=16, - rotation=1,) +# Create a display object. This will depend on the display driver you are using, +# and you may need to adjust the parameters based on your specific display and +# board configuration +display = ST7789_SPI(width=240, + height=320, + spi_id=0, + pin_cs=17, + pin_dc=16, + rotation=1) -# Initialize an image (NumPy array) to be displayed +# Initialize an image (NumPy array) to be displayed, just like in any other +# Python environment! Here we create a 240x320 pixel image with 3 color channels +# (BGR order, like standard OpenCV) and a data type of `uint8` (you should +# always specify the data type, because NumPy defaults to `float`) img = np.zeros((240, 320, 3), dtype=np.uint8) -# Images can be modified directly if desired. Here we set the top 50 rows of the -# image to blue (255, 0, 0) in BGR format +# Images can be accessed and modified directly if desired with array slicing. +# Here we set the top 50 rows of the image to blue (remember, BGR order!) img[0:50, :] = (255, 0, 0) # OpenCV's drawing functions can be used to modify the image as well. For -# example, we can draw a green ellipse on the image. Note that many OpenCV -# functions return the output image, meaning the entire array will be printed -# if it's not assigned to a variable. In this case, we assign the output to the -# same variable `img`, which has almost no overhead +# example, we can draw a green ellipse at the center of the image img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) -# And the obligatory "Hello OpenCV" text, this time in red +# Note - Most OpenCV functions return the resulting image. It's redundant for +# the drawing functions and often ignored, but if you call those functions from +# the REPL without assigning it to a variable, the entire array will be printed. +# To avoid this, you can simply re-assign the image, which has no effect other +# than preventing the output from being printed + +# And the obligatory "Hello OpenCV" text! This time in red img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) -# Once we have an image ready to show, just call `imshow()` as you would in -# any other Python environment! However it's a bit different here, as we -# don't have a window to show the image in. Instead, we pass the display object -# to the `imshow()` function, which will show the image on the screen +# Once we have an image ready to show, just call `cv2.imshow()`, almost like any +# other Python environment! The only difference is that we need to pass the +# display object we created earlier as the first argument, instead of a window +# name string. Alternatively, you can call `display.imshow(img)` directly cv2.imshow(display, img) + +# Standard OpenCV requires a call to `cv2.waitKey()` to process events and +# actually display the image. However the display driver shows the image +# immediately, so it's not necessary to call `cv2.waitKey()` in MicroPython. +# But it is available, and behaves almost like any other Python environment! The +# only difference is that it requires a key to be pressed in the REPL instead of +# a window. It will wait for up to the specified number of milliseconds (0 for +# indefinite), and return the ASCII code of the key pressed (-1 if no key press) +# +# Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses +# until you hit Enter on your keyboard +key = cv2.waitKey(1) # Not necessary to display image, can remove if desired From 4202cb9bce83f4bc801cecbc5981fb2c8e3e0299 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 4 Jun 2025 16:01:42 -0600 Subject: [PATCH 022/158] Rename Hello OpenCV example with `ex01_` prefix --- examples/{hello_opencv.py => ex01_hello_opencv.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{hello_opencv.py => ex01_hello_opencv.py} (100%) diff --git a/examples/hello_opencv.py b/examples/ex01_hello_opencv.py similarity index 100% rename from examples/hello_opencv.py rename to examples/ex01_hello_opencv.py From a2bf9f2564524ee4cb4dd5ccda39ce5f82b87f11 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 5 Jun 2025 12:00:30 -0600 Subject: [PATCH 023/158] Fix for #13 --- src/core.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/core.cpp b/src/core.cpp index 2a55263..23238c8 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -11,6 +11,23 @@ extern "C" { using namespace cv; +// Fix for https://github.com/sparkfun/micropython-opencv/issues/13 +// +// TLDR; The CoreTLSData object gets allocated once, whenever the first OpenCV +// function that needs it happens to be called. That will only happen from the +// user's code, after the GC has been initialized, meaning it gets allocated on +// the GC heap (see `__wrap_malloc()`). If a soft reset occurs, the GC gets +// reset and overwrites the memory location, but the same memory location is +// still referenced for the CoreTLSData object, resulting in bogus values and +// subsequent `CV_Assert()` calls fail +// +// The solution here is to create a global variable that subsequently calls +// `getCoreTlsData()` to allocate the CoreTLSData object before the GC has +// been initialized, so it gets allocated on the C heap and persists through +// soft resets. `getCoreTlsData()` is not publicly exposed, but `theRNG()` is +// exposed, which just runs `return getCoreTlsData().rng` +volatile RNG rng = theRNG(); + mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_dst, ARG_alpha, ARG_beta }; From 1d27888645aec5d28dbf5767e4425dd1af0b36bd Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 6 Jun 2025 15:37:22 -0600 Subject: [PATCH 024/158] Fix RP2350 atomics not working with PSRAM Workaround for https://github.com/raspberrypi/pico-sdk/issues/2505 Fixes #12 --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index 8eb737f..df1e769 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit 8eb737f902be3e0606b865b3e7ef58dcd213609f +Subproject commit df1e769007b2304d8e92d64b9755786bfd291265 From 98edf13e20e5697ba52edc4a485cafceef69c7cb Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 6 Jun 2025 16:58:49 -0600 Subject: [PATCH 025/158] Update OpenCV submodule with generic embedded platform cmake files --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index df1e769..3eaaee2 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit df1e769007b2304d8e92d64b9755786bfd291265 +Subproject commit 3eaaee27a26a217191a1da19d3d20d7a3d04f8a9 From 4354a73d75445b773a2220aa243c05ae1f481e56 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 6 Jun 2025 17:00:43 -0600 Subject: [PATCH 026/158] Update ulab Fixes a couple issues, see https://github.com/v923z/micropython-ulab/pull/724 and https://github.com/v923z/micropython-ulab/pull/725 --- src/ulab | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ulab b/src/ulab index 825ec2b..8eb8eaf 160000 --- a/src/ulab +++ b/src/ulab @@ -1 +1 @@ -Subproject commit 825ec2b143ebd8d3d3707bac2af0fe1ae6cb401a +Subproject commit 8eb8eaf5a19f5ed3a2e2193ba6e727d7518458a9 From e2f0fc6dd56ffa3930e852f1bd4ca7a58b134abc Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 6 Jun 2025 17:06:15 -0600 Subject: [PATCH 027/158] Update ST7789 SPI driver to use ndarray.ndim See https://github.com/v923z/micropython-ulab/pull/725 --- drivers/display/st7789_spi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index 2927422..6134350 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -264,7 +264,7 @@ def _get_common_roi_with_buffer(self, image): # Determing number of rows and columns in the image image_rows = image.shape[0] - if len(image.shape) < 2: + if image.ndim < 2: image_cols = 1 else: image_cols = image.shape[1] @@ -314,7 +314,7 @@ def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): buffer_roi (ndarray): Buffer region of interest """ # Determine the number of channels in the image - if len(image_roi.shape) < 3: + if image_roi.ndim < 3: ch = 1 else: ch = image_roi.shape[2] From e98482aecbf8e658a150c3f7473328bcd4006138 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 9 Jun 2025 13:33:50 -0600 Subject: [PATCH 028/158] Add imread() --- src/imgcodecs.cpp | 76 ++++++++++++++++++++++++++++++++++++++++++++ src/imgcodecs.h | 4 +++ src/opencv_upy.c | 27 ++++++++++++++++ src/opencv_upy.cmake | 1 + 4 files changed, 108 insertions(+) create mode 100644 src/imgcodecs.cpp create mode 100644 src/imgcodecs.h diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp new file mode 100644 index 0000000..7e2cb53 --- /dev/null +++ b/src/imgcodecs.cpp @@ -0,0 +1,76 @@ +// C++ headers +#include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" +#include "convert.h" +#include "numpy.h" + +// C headers +extern "C" { +#include "imgcodecs.h" +#include "ulab/code/ndarray.h" +#include "py/builtin.h" +} // extern "C" + +using namespace cv; + +// Helper macro to create an empty mp_map_t, derived from MP_DEFINE_CONST_MAP. +// Primarily used for function calls with no keyword arguments, since we can't +// just pass `NULL` or mp_const_none (crash occurs otherwise) +#define MP_EMPTY_MAP() { \ + .all_keys_are_qstrs = 0, \ + .is_fixed = 1, \ + .is_ordered = 0, \ + .used = 0, \ + .alloc = 0, \ + .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)mp_const_none, \ + } + +mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_filename, ARG_flags }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_filename, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_flags, MP_ARG_INT, { .u_int = IMREAD_COLOR_BGR } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + mp_obj_t filename = args[ARG_filename].u_obj; + int flags = args[ARG_flags].u_int; + + // Call MicroPython's `open()` function to read the image file + mp_obj_t open_args[2]; + open_args[0] = filename; + open_args[1] = mp_obj_new_str("rb", 2); // Open in binary read mode + mp_map_t open_kw_args = MP_EMPTY_MAP(); // No keyword arguments + mp_obj_t file_obj = mp_builtin_open(2, open_args, &open_kw_args); + + // Call the `read()` method on the file object to get the image data + mp_obj_t read_method[2]; + mp_load_method(file_obj, MP_QSTR_read, read_method); + mp_obj_t bytes_obj = mp_call_method_n_kw(0, 0, read_method); + + // Close the file object + mp_obj_t close_method[2]; + mp_load_method(file_obj, MP_QSTR_close, close_method); + mp_call_method_n_kw(0, 0, close_method); + + // Convert bytes_obj to vector of uint8_t for decoding + size_t len; + const char *buf_data = mp_obj_str_get_data(bytes_obj, &len); + std::vector buf(buf_data, buf_data + len); + + // Decode the image from the buffer + Mat img; + try { + img = imdecode(buf, flags); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the image + return mat_to_mp_obj(img); +} diff --git a/src/imgcodecs.h b/src/imgcodecs.h new file mode 100644 index 0000000..f336f3a --- /dev/null +++ b/src/imgcodecs.h @@ -0,0 +1,4 @@ +// C headers +#include "py/runtime.h" + +extern mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 06ffc8c..fa38f56 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,6 +1,7 @@ #include "core.h" #include "highgui.h" #include "imgproc.h" +#include "imgcodecs.h" //////////////////////////////////////////////////////////////////////////////// // Python references to OpenCV functions @@ -14,6 +15,9 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); +// OpenCV imgcodecs module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imread_obj, 1, cv2_imgcodecs_imread); + // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); @@ -88,6 +92,23 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }, + // Image read mode flags, from opencv2/imgcodecs.hpp + { MP_ROM_QSTR(MP_QSTR_IMREAD_UNCHANGED), MP_ROM_INT(-1) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_GRAYSCALE), MP_ROM_INT(0) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_BGR), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYDEPTH), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYCOLOR), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_LOAD_GDAL), MP_ROM_INT(8) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_2), MP_ROM_INT(16) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_2), MP_ROM_INT(17) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_4), MP_ROM_INT(32) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_4), MP_ROM_INT(33) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_8), MP_ROM_INT(64) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_8), MP_ROM_INT(65) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_IGNORE_ORIENTATION), MP_ROM_INT(128) }, + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_RGB), MP_ROM_INT(256) }, + // Morphology operation types, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, @@ -223,6 +244,12 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, + + //////////////////////////////////////////////////////////////////////////// + // OpenCV imgcodecs functions + //////////////////////////////////////////////////////////////////////////// + + { MP_ROM_QSTR(MP_QSTR_imread), MP_ROM_PTR(&cv2_imgcodecs_imread_obj) }, //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index 66367e6..60b5e67 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -7,6 +7,7 @@ target_sources(usermod_cv2 INTERFACE ${CMAKE_CURRENT_LIST_DIR}/convert.cpp ${CMAKE_CURRENT_LIST_DIR}/core.cpp ${CMAKE_CURRENT_LIST_DIR}/highgui.cpp + ${CMAKE_CURRENT_LIST_DIR}/imgcodecs.cpp ${CMAKE_CURRENT_LIST_DIR}/imgproc.cpp ${CMAKE_CURRENT_LIST_DIR}/numpy.cpp ${CMAKE_CURRENT_LIST_DIR}/opencv_upy.c From da558f4136a1d69688259ea1401effc4c97fb980 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 9 Jun 2025 13:37:59 -0600 Subject: [PATCH 029/158] Disable OPENJPEG in OpenCV --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index 3eaaee2..a7bc9ad 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit 3eaaee27a26a217191a1da19d3d20d7a3d04f8a9 +Subproject commit a7bc9ad59cb0981c7647a7598569baca40e7d593 From 2d9605c9320e504bc11c1977821bdf8b2d354812 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 9 Jun 2025 16:16:00 -0600 Subject: [PATCH 030/158] Add imwrite() --- src/imgcodecs.cpp | 80 ++++++++++++++++++++++++++++++++++++++++++++++- src/imgcodecs.h | 1 + src/opencv_upy.c | 43 ++++++++++++++++++++++++- 3 files changed, 122 insertions(+), 2 deletions(-) diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index 7e2cb53..d41f305 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -71,6 +71,84 @@ mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } - // Return the image + // Return the result return mat_to_mp_obj(img); } + +mp_obj_t cv2_imgcodecs_imwrite(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_filename, ARG_img, ARG_params }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_filename, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_params, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + mp_obj_t filename = args[ARG_filename].u_obj; + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + ndarray_obj_t *params; + if (args[ARG_params].u_obj == mp_const_none) { + // If no parameters are provided, use an empty ndarray + params = ndarray_new_linear_array(0, NDARRAY_INT16); + } else { + params = ndarray_from_mp_obj(args[ARG_params].u_obj, 0); + } + + // Convert the filename to a std::string + size_t filename_len; + const char *filename_chr = mp_obj_str_get_data(filename, &filename_len); + std::string filename_str(filename_chr, filename_len); + + // Create vector of uint8_t for encoding + std::vector buf; + + // Convert the parameters to a vector of int + std::vector params_vec; + if (params->len > 0) { + params_vec.reserve(params->len); + for (size_t i = 0; i < params->len; ++i) { + mp_obj_t val = (mp_obj_t*) mp_binary_get_val_array(params->dtype, params->array, i); + // ndarrays default to float, and mp_obj_get_int() does not support + // float values, so we need to do the type conversion ourselves + mp_float_t val_float = mp_obj_get_float(val); + params_vec.push_back((int) val_float); + } + } + + // Encode the image from the buffer + bool retval; + try { + retval = imencode(filename_str, img, buf, params_vec); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Convert the vector of uint8_t to a bytes object + mp_obj_t buf_obj = mp_obj_new_bytes((const byte *)buf.data(), buf.size()); + + // Call MicroPython's `open()` function to write the image file + mp_obj_t open_args[2]; + open_args[0] = filename; + open_args[1] = mp_obj_new_str("wb", 2); // Open in binary write mode + mp_map_t open_kw_args = MP_EMPTY_MAP(); // No keyword arguments + mp_obj_t file_obj = mp_builtin_open(2, open_args, &open_kw_args); + + // Call the `write()` method on the file object to write the image data + mp_obj_t write_method[3]; + mp_load_method(file_obj, MP_QSTR_write, write_method); + write_method[2] = buf_obj; // Set the data to write + mp_call_method_n_kw(1, 0, write_method); + + // Close the file object + mp_obj_t close_method[2]; + mp_load_method(file_obj, MP_QSTR_close, close_method); + mp_call_method_n_kw(0, 0, close_method); + + // Return the result + return mp_obj_new_bool(retval); +} diff --git a/src/imgcodecs.h b/src/imgcodecs.h index f336f3a..fe9095c 100644 --- a/src/imgcodecs.h +++ b/src/imgcodecs.h @@ -2,3 +2,4 @@ #include "py/runtime.h" extern mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgcodecs_imwrite(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index fa38f56..9039639 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -17,6 +17,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKe // OpenCV imgcodecs module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imread_obj, 1, cv2_imgcodecs_imread); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imwrite_obj, 2, cv2_imgcodecs_imwrite); // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); @@ -92,7 +93,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }, - // Image read mode flags, from opencv2/imgcodecs.hpp + // Image read flags, from opencv2/imgcodecs.hpp { MP_ROM_QSTR(MP_QSTR_IMREAD_UNCHANGED), MP_ROM_INT(-1) }, { MP_ROM_QSTR(MP_QSTR_IMREAD_GRAYSCALE), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_BGR), MP_ROM_INT(1) }, @@ -109,6 +110,45 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_IMREAD_IGNORE_ORIENTATION), MP_ROM_INT(128) }, { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_RGB), MP_ROM_INT(256) }, + // Image write flags, from opencv2/imgcodecs.hpp + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_QUALITY), MP_ROM_INT(1) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_PROGRESSIVE), MP_ROM_INT(2) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_OPTIMIZE), MP_ROM_INT(3) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_RST_INTERVAL), MP_ROM_INT(4) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_LUMA_QUALITY), MP_ROM_INT(5) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_CHROMA_QUALITY), MP_ROM_INT(6) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_SAMPLING_FACTOR), MP_ROM_INT(7) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_COMPRESSION), MP_ROM_INT(16) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_STRATEGY), MP_ROM_INT(17) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_BILEVEL), MP_ROM_INT(18) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PXM_BINARY), MP_ROM_INT(32) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_TYPE), MP_ROM_INT((3 << 4) + 0) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_COMPRESSION), MP_ROM_INT((3 << 4) + 1) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_DWA_COMPRESSION_LEVEL), MP_ROM_INT((3 << 4) + 2) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_WEBP_QUALITY), MP_ROM_INT(64) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_HDR_COMPRESSION), MP_ROM_INT((5 << 4) + 0) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PAM_TUPLETYPE), MP_ROM_INT(128) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_RESUNIT), MP_ROM_INT(256) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_XDPI), MP_ROM_INT(257) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_YDPI), MP_ROM_INT(258) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_COMPRESSION), MP_ROM_INT(259) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_ROWSPERSTRIP), MP_ROM_INT(278) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_PREDICTOR), MP_ROM_INT(317) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG2000_COMPRESSION_X1000), MP_ROM_INT(272) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_QUALITY), MP_ROM_INT(512) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_DEPTH), MP_ROM_INT(513) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_SPEED), MP_ROM_INT(514) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_QUALITY), MP_ROM_INT(640) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_EFFORT), MP_ROM_INT(641) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DISTANCE), MP_ROM_INT(642) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DECODING_SPEED), MP_ROM_INT(643) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_LOOP), MP_ROM_INT(1024) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_SPEED), MP_ROM_INT(1025) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_QUALITY), MP_ROM_INT(1026) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_DITHER), MP_ROM_INT(1027) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_TRANSPARENCY), MP_ROM_INT(1028) }, + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_COLORTABLE), MP_ROM_INT(1029) }, + // Morphology operation types, from opencv2/imgproc.hpp { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, @@ -250,6 +290,7 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { //////////////////////////////////////////////////////////////////////////// { MP_ROM_QSTR(MP_QSTR_imread), MP_ROM_PTR(&cv2_imgcodecs_imread_obj) }, + { MP_ROM_QSTR(MP_QSTR_imwrite), MP_ROM_PTR(&cv2_imgcodecs_imwrite_obj) }, //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions From e2defe4e1eb94d8e0636f8852966159f1c834168 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 9 Jun 2025 16:51:07 -0600 Subject: [PATCH 031/158] Move module globals into module headers A bit tidier organization, easier to manage --- src/core.h | 30 +++++ src/highgui.h | 11 ++ src/imgcodecs.h | 68 ++++++++++ src/imgproc.h | 200 ++++++++++++++++++++++++++++ src/opencv_upy.c | 334 ++--------------------------------------------- 5 files changed, 317 insertions(+), 326 deletions(-) diff --git a/src/core.h b/src/core.h index 71d922f..dc289c6 100644 --- a/src/core.h +++ b/src/core.h @@ -1,5 +1,35 @@ // C headers #include "py/runtime.h" +// Function declarations extern mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); + +// Python references to the functions +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); + +// Global definitions for functions and constants +#define OPENCV_CORE_GLOBALS \ + /* Functions */ \ + { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, \ + \ + /* OpenCV data types, from opencv2/core/hal/interface.h */ \ + /* Other types are currently not supported by ulab */ \ + { MP_ROM_QSTR(MP_QSTR_CV_8U), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_CV_8S), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_CV_16U), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_CV_16S), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_CV_32F), MP_ROM_INT(4) }, \ + \ + /* Border types, from opencv2/core/base.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_BORDER_CONSTANT), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_REPLICATE), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_WRAP), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT_101), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT101), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) } diff --git a/src/highgui.h b/src/highgui.h index 92f5ed5..1c4438d 100644 --- a/src/highgui.h +++ b/src/highgui.h @@ -1,5 +1,16 @@ // C headers #include "py/runtime.h" +// Function declarations extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); + +// Python references to the functions +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); + +// Global definitions for functions and constants +#define OPENCV_HIGHGUI_GLOBALS \ + /* Functions */ \ + { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) } diff --git a/src/imgcodecs.h b/src/imgcodecs.h index fe9095c..f7e64f3 100644 --- a/src/imgcodecs.h +++ b/src/imgcodecs.h @@ -1,5 +1,73 @@ // C headers #include "py/runtime.h" +// Function declarations extern mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgcodecs_imwrite(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); + +// Python references to the functions +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imread_obj, 1, cv2_imgcodecs_imread); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imwrite_obj, 2, cv2_imgcodecs_imwrite); + +// Global definitions for functions and constants +#define OPENCV_IMGCODECS_GLOBALS \ + /* Functions */ \ + { MP_ROM_QSTR(MP_QSTR_imread), MP_ROM_PTR(&cv2_imgcodecs_imread_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_imwrite), MP_ROM_PTR(&cv2_imgcodecs_imwrite_obj) }, \ + \ + /* Image read flags, from opencv2/imgcodecs.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_UNCHANGED), MP_ROM_INT(-1) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_GRAYSCALE), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_BGR), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYDEPTH), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYCOLOR), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_LOAD_GDAL), MP_ROM_INT(8) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_2), MP_ROM_INT(16) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_2), MP_ROM_INT(17) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_4), MP_ROM_INT(32) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_4), MP_ROM_INT(33) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_8), MP_ROM_INT(64) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_8), MP_ROM_INT(65) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_IGNORE_ORIENTATION), MP_ROM_INT(128) }, \ + { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_RGB), MP_ROM_INT(256) }, \ + \ + /* Image write flags, from opencv2/imgcodecs.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_QUALITY), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_PROGRESSIVE), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_OPTIMIZE), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_RST_INTERVAL), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_LUMA_QUALITY), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_CHROMA_QUALITY), MP_ROM_INT(6) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_SAMPLING_FACTOR), MP_ROM_INT(7) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_COMPRESSION), MP_ROM_INT(16) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_STRATEGY), MP_ROM_INT(17) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_BILEVEL), MP_ROM_INT(18) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PXM_BINARY), MP_ROM_INT(32) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_TYPE), MP_ROM_INT((3 << 4) + 0) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_COMPRESSION), MP_ROM_INT((3 << 4) + 1) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_DWA_COMPRESSION_LEVEL), MP_ROM_INT((3 << 4) + 2) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_WEBP_QUALITY), MP_ROM_INT(64) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_HDR_COMPRESSION), MP_ROM_INT((5 << 4) + 0) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_PAM_TUPLETYPE), MP_ROM_INT(128) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_RESUNIT), MP_ROM_INT(256) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_XDPI), MP_ROM_INT(257) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_YDPI), MP_ROM_INT(258) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_COMPRESSION), MP_ROM_INT(259) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_ROWSPERSTRIP), MP_ROM_INT(278) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_PREDICTOR), MP_ROM_INT(317) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG2000_COMPRESSION_X1000), MP_ROM_INT(272) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_QUALITY), MP_ROM_INT(512) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_DEPTH), MP_ROM_INT(513) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_SPEED), MP_ROM_INT(514) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_QUALITY), MP_ROM_INT(640) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_EFFORT), MP_ROM_INT(641) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DISTANCE), MP_ROM_INT(642) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DECODING_SPEED), MP_ROM_INT(643) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_LOOP), MP_ROM_INT(1024) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_SPEED), MP_ROM_INT(1025) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_QUALITY), MP_ROM_INT(1026) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_DITHER), MP_ROM_INT(1027) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_TRANSPARENCY), MP_ROM_INT(1028) }, \ + { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_COLORTABLE), MP_ROM_INT(1029) } + \ No newline at end of file diff --git a/src/imgproc.h b/src/imgproc.h index 75480ed..4a28175 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -1,6 +1,7 @@ // C headers #include "py/runtime.h" +// Function declarations extern mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -36,3 +37,202 @@ extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_m extern mp_obj_t cv2_imgproc_Sobel(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_spatialGradient(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); + +// Python references to the functions +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); +// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawMarker_obj, 3, cv2_imgproc_drawMarker); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellipse); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); +// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Sobel_obj, 4, cv2_imgproc_Sobel); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_spatialGradient_obj, 1, cv2_imgproc_spatialGradient); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_threshold); + +// Global definitions for functions and constants +#define OPENCV_IMGPROC_GLOBALS \ + /* Functions */ \ + { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, \ + /* { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, */ \ + { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_drawMarker), MP_ROM_PTR(&cv2_imgproc_drawMarker_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_ellipse), MP_ROM_PTR(&cv2_imgproc_ellipse_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, \ + /* { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, */ \ + { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_Sobel), MP_ROM_PTR(&cv2_imgproc_Sobel_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_spatialGradient), MP_ROM_PTR(&cv2_imgproc_spatialGradient_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&cv2_imgproc_threshold_obj) }, \ + \ + /* Morphology operation types, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_OPEN), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_CLOSE), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_GRADIENT), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_TOPHAT), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_BLACKHAT), MP_ROM_INT(6) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_HITMISS), MP_ROM_INT(7) }, \ + \ + /* Morphology shapes, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_MORPH_RECT), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, \ + \ + /* Threshold types, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY_INV), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_TRUNC), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO_INV), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_MASK), MP_ROM_INT(7) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_OTSU), MP_ROM_INT(8) }, \ + { MP_ROM_QSTR(MP_QSTR_THRESH_TRIANGLE), MP_ROM_INT(16) }, \ + \ + /* Adaptive threshold methods, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_MEAN_C), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_GAUSSIAN_C), MP_ROM_INT(1) }, \ + \ + /* Hough modes, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_HOUGH_MULTI_SCALE), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT_ALT), MP_ROM_INT(4) }, \ + \ + /* Color conversion codes, from opencv2/imgproc.hpp */ \ + /* Not all are included, to reduce bloat */ \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2RGB), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGBA), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGRA), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGB), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGB), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGBA), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGRA), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2GRAY), MP_ROM_INT(6) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2GRAY), MP_ROM_INT(7) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR), MP_ROM_INT(8) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGB), MP_ROM_INT(8) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGRA), MP_ROM_INT(9) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGBA), MP_ROM_INT(9) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2GRAY), MP_ROM_INT(10) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2GRAY), MP_ROM_INT(11) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGR565), MP_ROM_INT(12) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR565), MP_ROM_INT(13) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGR), MP_ROM_INT(14) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGB), MP_ROM_INT(15) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR565), MP_ROM_INT(16) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR565), MP_ROM_INT(17) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGRA), MP_ROM_INT(18) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGBA), MP_ROM_INT(19) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR565), MP_ROM_INT(20) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652GRAY), MP_ROM_INT(21) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2YCrCb), MP_ROM_INT(36) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2YCrCb), MP_ROM_INT(37) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2BGR), MP_ROM_INT(38) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2RGB), MP_ROM_INT(39) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2HSV), MP_ROM_INT(40) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2HSV), MP_ROM_INT(41) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2BGR), MP_ROM_INT(54) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2RGB), MP_ROM_INT(55) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2BGR), MP_ROM_INT(46) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2BGR), MP_ROM_INT(47) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2BGR), MP_ROM_INT(48) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2BGR), MP_ROM_INT(49) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2RGB), MP_ROM_INT(46) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2RGB), MP_ROM_INT(47) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2RGB), MP_ROM_INT(48) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2RGB), MP_ROM_INT(49) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2GRAY), MP_ROM_INT(86) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2GRAY), MP_ROM_INT(87) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2GRAY), MP_ROM_INT(88) }, \ + { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2GRAY), MP_ROM_INT(89) }, \ + \ + /* Line types, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_FILLED), MP_ROM_INT(-1) }, \ + { MP_ROM_QSTR(MP_QSTR_LINE_4), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_LINE_8), MP_ROM_INT(8) }, \ + { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, \ + \ + /* Hershey fonts, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, \ + { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, \ + \ + /* Marker types, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_MARKER_CROSS), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_TILTED_CROSS), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_STAR), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_DIAMOND), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_SQUARE), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_UP), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_DOWN), MP_ROM_INT(6) }, \ + \ + /* Template matching modes, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF_NORMED), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_TM_CCORR), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_TM_CCORR_NORMED), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF_NORMED), MP_ROM_INT(5) } diff --git a/src/opencv_upy.c b/src/opencv_upy.c index 9039639..e2cbb59 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,336 +1,18 @@ #include "core.h" #include "highgui.h" -#include "imgproc.h" #include "imgcodecs.h" +#include "imgproc.h" -//////////////////////////////////////////////////////////////////////////////// -// Python references to OpenCV functions -//////////////////////////////////////////////////////////////////////////////// - -// OpenCV core module -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); - -// OpenCV highgui module -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); - -// OpenCV imgcodecs module -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imread_obj, 1, cv2_imgcodecs_imread); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgcodecs_imwrite_obj, 2, cv2_imgcodecs_imwrite); - -// OpenCV imgproc module -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); -// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawMarker_obj, 3, cv2_imgproc_drawMarker); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellipse); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); -// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Sobel_obj, 4, cv2_imgproc_Sobel); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_spatialGradient_obj, 1, cv2_imgproc_spatialGradient); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_threshold); - -//////////////////////////////////////////////////////////////////////////////// -// Module attributes -//////////////////////////////////////////////////////////////////////////////// +// Python module globals dictionary static const mp_rom_map_elem_t cv2_module_globals_table[] = { - //////////////////////////////////////////////////////////////////////////// - // Module name - //////////////////////////////////////////////////////////////////////////// - + // Python module name { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_cv2) }, - //////////////////////////////////////////////////////////////////////////// - // Constants - //////////////////////////////////////////////////////////////////////////// - - // These constants are defined by in OpenCV's header files, however we can't - // include them here because it's C++ and this is C, so we have to redefine - // them here. Only a subset of the most common conversions are included. - - // OpenCV data types, from opencv2/core/hal/interface.h - { MP_ROM_QSTR(MP_QSTR_CV_8U), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_CV_8S), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_CV_16U), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_CV_16S), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_CV_32F), MP_ROM_INT(4) }, - // Other types are currently not supported by ulab - - // Border types, from opencv2/core/base.hpp - { MP_ROM_QSTR(MP_QSTR_BORDER_CONSTANT), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_REPLICATE), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_WRAP), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT_101), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_TRANSPARENT), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_REFLECT101), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_DEFAULT), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_BORDER_ISOLATED), MP_ROM_INT(16) }, - - // Image read flags, from opencv2/imgcodecs.hpp - { MP_ROM_QSTR(MP_QSTR_IMREAD_UNCHANGED), MP_ROM_INT(-1) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_GRAYSCALE), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_BGR), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYDEPTH), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_ANYCOLOR), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_LOAD_GDAL), MP_ROM_INT(8) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_2), MP_ROM_INT(16) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_2), MP_ROM_INT(17) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_4), MP_ROM_INT(32) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_4), MP_ROM_INT(33) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_GRAYSCALE_8), MP_ROM_INT(64) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_REDUCED_COLOR_8), MP_ROM_INT(65) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_IGNORE_ORIENTATION), MP_ROM_INT(128) }, - { MP_ROM_QSTR(MP_QSTR_IMREAD_COLOR_RGB), MP_ROM_INT(256) }, - - // Image write flags, from opencv2/imgcodecs.hpp - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_QUALITY), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_PROGRESSIVE), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_OPTIMIZE), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_RST_INTERVAL), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_LUMA_QUALITY), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_CHROMA_QUALITY), MP_ROM_INT(6) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG_SAMPLING_FACTOR), MP_ROM_INT(7) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_COMPRESSION), MP_ROM_INT(16) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_STRATEGY), MP_ROM_INT(17) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_PNG_BILEVEL), MP_ROM_INT(18) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_PXM_BINARY), MP_ROM_INT(32) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_TYPE), MP_ROM_INT((3 << 4) + 0) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_COMPRESSION), MP_ROM_INT((3 << 4) + 1) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_EXR_DWA_COMPRESSION_LEVEL), MP_ROM_INT((3 << 4) + 2) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_WEBP_QUALITY), MP_ROM_INT(64) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_HDR_COMPRESSION), MP_ROM_INT((5 << 4) + 0) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_PAM_TUPLETYPE), MP_ROM_INT(128) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_RESUNIT), MP_ROM_INT(256) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_XDPI), MP_ROM_INT(257) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_YDPI), MP_ROM_INT(258) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_COMPRESSION), MP_ROM_INT(259) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_ROWSPERSTRIP), MP_ROM_INT(278) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_TIFF_PREDICTOR), MP_ROM_INT(317) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEG2000_COMPRESSION_X1000), MP_ROM_INT(272) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_QUALITY), MP_ROM_INT(512) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_DEPTH), MP_ROM_INT(513) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_AVIF_SPEED), MP_ROM_INT(514) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_QUALITY), MP_ROM_INT(640) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_EFFORT), MP_ROM_INT(641) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DISTANCE), MP_ROM_INT(642) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_JPEGXL_DECODING_SPEED), MP_ROM_INT(643) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_LOOP), MP_ROM_INT(1024) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_SPEED), MP_ROM_INT(1025) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_QUALITY), MP_ROM_INT(1026) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_DITHER), MP_ROM_INT(1027) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_TRANSPARENCY), MP_ROM_INT(1028) }, - { MP_ROM_QSTR(MP_QSTR_IMWRITE_GIF_COLORTABLE), MP_ROM_INT(1029) }, - - // Morphology operation types, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_MORPH_ERODE), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_DILATE), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_OPEN), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_CLOSE), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_GRADIENT), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_TOPHAT), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_BLACKHAT), MP_ROM_INT(6) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_HITMISS), MP_ROM_INT(7) }, - - // Morphology shapes, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_MORPH_RECT), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, - - // Threshold types, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY_INV), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_TRUNC), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_TOZERO_INV), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_MASK), MP_ROM_INT(7) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_OTSU), MP_ROM_INT(8) }, - { MP_ROM_QSTR(MP_QSTR_THRESH_TRIANGLE), MP_ROM_INT(16) }, - - // Adaptive threshold methods, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_MEAN_C), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_GAUSSIAN_C), MP_ROM_INT(1) }, - - // Hough modes, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_HOUGH_MULTI_SCALE), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_HOUGH_GRADIENT_ALT), MP_ROM_INT(4) }, - - // Color conversion codes, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGRA), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2RGBA), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2RGB), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGBA), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGRA), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGB), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2RGB), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2RGBA), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGRA), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2GRAY), MP_ROM_INT(6) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2GRAY), MP_ROM_INT(7) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR), MP_ROM_INT(8) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGB), MP_ROM_INT(8) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGRA), MP_ROM_INT(9) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2RGBA), MP_ROM_INT(9) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2GRAY), MP_ROM_INT(10) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2GRAY), MP_ROM_INT(11) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2BGR565), MP_ROM_INT(12) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2BGR565), MP_ROM_INT(13) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGR), MP_ROM_INT(14) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGB), MP_ROM_INT(15) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGRA2BGR565), MP_ROM_INT(16) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGBA2BGR565), MP_ROM_INT(17) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652BGRA), MP_ROM_INT(18) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652RGBA), MP_ROM_INT(19) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_GRAY2BGR565), MP_ROM_INT(20) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR5652GRAY), MP_ROM_INT(21) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2YCrCb), MP_ROM_INT(36) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2YCrCb), MP_ROM_INT(37) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2BGR), MP_ROM_INT(38) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_YCrCb2RGB), MP_ROM_INT(39) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BGR2HSV), MP_ROM_INT(40) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_RGB2HSV), MP_ROM_INT(41) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2BGR), MP_ROM_INT(54) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_HSV2RGB), MP_ROM_INT(55) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2BGR), MP_ROM_INT(46) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2BGR), MP_ROM_INT(47) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2BGR), MP_ROM_INT(48) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2BGR), MP_ROM_INT(49) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2RGB), MP_ROM_INT(46) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2RGB), MP_ROM_INT(47) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2RGB), MP_ROM_INT(48) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2RGB), MP_ROM_INT(49) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerBG2GRAY), MP_ROM_INT(86) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGB2GRAY), MP_ROM_INT(87) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerRG2GRAY), MP_ROM_INT(88) }, - { MP_ROM_QSTR(MP_QSTR_COLOR_BayerGR2GRAY), MP_ROM_INT(89) }, - - // Line types, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_FILLED), MP_ROM_INT(-1) }, - { MP_ROM_QSTR(MP_QSTR_LINE_4), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_LINE_8), MP_ROM_INT(8) }, - { MP_ROM_QSTR(MP_QSTR_LINE_AA), MP_ROM_INT(16) }, - - // Hershey fonts, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SIMPLEX), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_PLAIN), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_DUPLEX), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_TRIPLEX), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_COMPLEX_SMALL), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_SIMPLEX), MP_ROM_INT(6) }, - { MP_ROM_QSTR(MP_QSTR_FONT_HERSHEY_SCRIPT_COMPLEX), MP_ROM_INT(7) }, - { MP_ROM_QSTR(MP_QSTR_FONT_ITALIC), MP_ROM_INT(16) }, - - // Marker types, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_MARKER_CROSS), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_TILTED_CROSS), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_STAR), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_DIAMOND), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_SQUARE), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_UP), MP_ROM_INT(5) }, - { MP_ROM_QSTR(MP_QSTR_MARKER_TRIANGLE_DOWN), MP_ROM_INT(6) }, - - // Template matching modes, from opencv2/imgproc.hpp - { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF), MP_ROM_INT(0) }, - { MP_ROM_QSTR(MP_QSTR_TM_SQDIFF_NORMED), MP_ROM_INT(1) }, - { MP_ROM_QSTR(MP_QSTR_TM_CCORR), MP_ROM_INT(2) }, - { MP_ROM_QSTR(MP_QSTR_TM_CCORR_NORMED), MP_ROM_INT(3) }, - { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF), MP_ROM_INT(4) }, - { MP_ROM_QSTR(MP_QSTR_TM_CCOEFF_NORMED), MP_ROM_INT(5) }, - - //////////////////////////////////////////////////////////////////////////// - // OpenCV core functions - //////////////////////////////////////////////////////////////////////////// - - { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, - { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, - - //////////////////////////////////////////////////////////////////////////// - // OpenCV highgui functions - //////////////////////////////////////////////////////////////////////////// - - { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, - { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, - - //////////////////////////////////////////////////////////////////////////// - // OpenCV imgcodecs functions - //////////////////////////////////////////////////////////////////////////// - - { MP_ROM_QSTR(MP_QSTR_imread), MP_ROM_PTR(&cv2_imgcodecs_imread_obj) }, - { MP_ROM_QSTR(MP_QSTR_imwrite), MP_ROM_PTR(&cv2_imgcodecs_imwrite_obj) }, - - //////////////////////////////////////////////////////////////////////////// - // OpenCV imgproc functions - //////////////////////////////////////////////////////////////////////////// - - { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, - { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, - { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, - { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, - { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, - { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, - { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, - // { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, - { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, - { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, - { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, - { MP_ROM_QSTR(MP_QSTR_drawMarker), MP_ROM_PTR(&cv2_imgproc_drawMarker_obj) }, - { MP_ROM_QSTR(MP_QSTR_ellipse), MP_ROM_PTR(&cv2_imgproc_ellipse_obj) }, - { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, - { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, - { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, - { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, - { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, - { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, - { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, - { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, - { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, - // { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, - { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, - { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, - { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, - { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, - { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, - { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, - { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, - { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, - { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, - { MP_ROM_QSTR(MP_QSTR_Sobel), MP_ROM_PTR(&cv2_imgproc_Sobel_obj) }, - { MP_ROM_QSTR(MP_QSTR_spatialGradient), MP_ROM_PTR(&cv2_imgproc_spatialGradient_obj) }, - { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&cv2_imgproc_threshold_obj) }, + // Inlude globals from each OpenCV module + OPENCV_CORE_GLOBALS, + OPENCV_HIGHGUI_GLOBALS, + OPENCV_IMGCODECS_GLOBALS, + OPENCV_IMGPROC_GLOBALS, }; static MP_DEFINE_CONST_DICT(cv2_module_globals, cv2_module_globals_table); From a3f6cd58268d5639bf6883492490fe189f1f10b3 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 9 Jun 2025 17:41:14 -0600 Subject: [PATCH 032/158] Set Mat allocator in imread() --- src/imgcodecs.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index d41f305..710364e 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -65,6 +65,7 @@ mp_obj_t cv2_imgcodecs_imread(size_t n_args, const mp_obj_t *pos_args, mp_map_t // Decode the image from the buffer Mat img; + img.allocator = &GetNumpyAllocator(); try { img = imdecode(buf, flags); } catch(Exception& e) { From 50f29e5dc46ad776fc4e985543d8eaa1c2bb8803 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 10 Jun 2025 12:49:51 -0600 Subject: [PATCH 033/158] Ensure image codecs are allocated in C heap See #17 --- src/imgcodecs.cpp | 19 +++++++++++++++++++ src/opencv_upy.cmake | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index 710364e..35795c7 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -13,6 +13,25 @@ extern "C" { using namespace cv; +// Fix for https://github.com/sparkfun/micropython-opencv/issues/17 +// +// TLDR; The `g_codecs` object (which stores all image encoders and decoders) is +// allocated once, whenever the first OpenCV function that needs it happens to +// be called. That will only happen from the user's code, after the GC has been +// initialized, meaning it gets allocated on the GC heap (see `__wrap_malloc()`) +// If a soft reset occurs, the GC gets reset and overwrites the memory location, +// but the same memory location is still referenced for the the `g_codecs` +// object, resulting in bogus values and subsequent `imread()` and `imwrite()` +// calls fail +// +// The solution here is to create a global variable that subsequently creates +// the `g_codecs` object before the GC has been initialized, so it's allocated +// on the C heap and persists through soft resets. `g_codecs` is initialized +// when calling `getCodecs()`, which is not publicly exposed. The next best +// option is to call `haveImageWriter()`, which calls `findEncoder()`, which +// calls `getCodecs()` +volatile bool haveImageWriterPNG = haveImageWriter(".png"); + // Helper macro to create an empty mp_map_t, derived from MP_DEFINE_CONST_MAP. // Primarily used for function calls with no keyword arguments, since we can't // just pass `NULL` or mp_const_none (crash occurs otherwise) diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index 60b5e67..0915d98 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -23,8 +23,8 @@ target_link_libraries(usermod INTERFACE usermod_cv2) # OpenCV creates some global variables on the heap. These get created before # the GC is initialized, so we need to allocate some space for them on the C -# heap. 10kB seems sufficient. TODO: See if we can get away with less. -set(MICROPY_C_HEAP_SIZE 10240) +# heap. 64kB seems sufficient. +set(MICROPY_C_HEAP_SIZE 65536) # Makes m_tracked_calloc() and m_tracked_free() available. These track pointers # in a linked list to ensure the GC does not free them. Needed for some OpenCV From 52a290ec8263a534eb7cd23f30085821b8599497 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 10 Jun 2025 13:47:15 -0600 Subject: [PATCH 034/158] Set default Mat allocator to NumpyAllocator Fixes #17 --- src/core.cpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/core.cpp b/src/core.cpp index 23238c8..be6e19e 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -28,6 +28,30 @@ using namespace cv; // exposed, which just runs `return getCoreTlsData().rng` volatile RNG rng = theRNG(); +// Fix for https://github.com/sparkfun/micropython-opencv/issues/17 +// +// TLDR; The `StdMatAllocator` gets allocated once, whenever the first time a +// Mat object is created without the NumpyAllocator being set (OpenCV creates +// internal Mat objects for various operations that use whatever the default +// allocator is). Similar to above, the `StdMatAllocator` gets allocated on the +// GC heap, so if a soft reset occurs, the GC gets reset and overwrites the +// memory location, causing problems +// +// Instead of ensuring the `StdMatAllocator` is allocated on the C heap, we just +// set the NumpyAllocator as the default allocator. `Mat::setDefaultAllocator()` +// does not return anything, so this wrapper function returns a dummy value so +// we can use it to initialize a global variable, ensuring it gets run before +// `main()` gets called +bool setNumpyAllocator() { + try { + Mat::setDefaultAllocator(&GetNumpyAllocator()); + return true; + } catch (const Exception& e) { + return false; + } +} +volatile bool defaultAllocatorSet = setNumpyAllocator(); + mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_dst, ARG_alpha, ARG_beta }; From 01931af820efef8c7a7543606ab6623ac96b99e6 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 10 Jun 2025 16:56:08 -0600 Subject: [PATCH 035/158] Move global initializers to new boot function --- src/core.cpp | 63 ++++++++++++++++++++++++----------------------- src/imgcodecs.cpp | 19 -------------- 2 files changed, 32 insertions(+), 50 deletions(-) diff --git a/src/core.cpp b/src/core.cpp index be6e19e..8d42c5c 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -1,5 +1,6 @@ // C++ headers #include "opencv2/core.hpp" +#include "opencv2/imgcodecs.hpp" #include "convert.h" #include "numpy.h" @@ -11,46 +12,46 @@ extern "C" { using namespace cv; -// Fix for https://github.com/sparkfun/micropython-opencv/issues/13 +// The function below is a workaround for memory management issues between +// OpenCV and the MicroPython GC. OpenCV allocates some objects on the heap, +// whenever the first function that needs the objects happen to be called. That +// only happens from the user's code after the GC has been initialized, meaning +// they get allocated on the GC heap (see `__wrap_malloc()`). If a soft reset +// occurs, the GC gets reset and the memory locations get overwritten, but the +// same memory locations are still referenced for the objects, resulting in bad +// values and problems (crashes and freezes, `CV_Assert()` calls fail, etc.). // -// TLDR; The CoreTLSData object gets allocated once, whenever the first OpenCV -// function that needs it happens to be called. That will only happen from the -// user's code, after the GC has been initialized, meaning it gets allocated on -// the GC heap (see `__wrap_malloc()`). If a soft reset occurs, the GC gets -// reset and overwrites the memory location, but the same memory location is -// still referenced for the CoreTLSData object, resulting in bogus values and -// subsequent `CV_Assert()` calls fail +// The solution here is to ensure those objects are allocated in the C heap +// instead of the GC heap. The function below calls various OpenCV functions +// that subsequently allocate the problematic objects. To ensure they are +// allocated on the C heap, this needs to happen before the GC is initialized +// (before `main()` is called), so __wrap_malloc() will use __real_malloc() +// instead of the GC. // -// The solution here is to create a global variable that subsequently calls -// `getCoreTlsData()` to allocate the CoreTLSData object before the GC has -// been initialized, so it gets allocated on the C heap and persists through -// soft resets. `getCoreTlsData()` is not publicly exposed, but `theRNG()` is -// exposed, which just runs `return getCoreTlsData().rng` -volatile RNG rng = theRNG(); - -// Fix for https://github.com/sparkfun/micropython-opencv/issues/17 -// -// TLDR; The `StdMatAllocator` gets allocated once, whenever the first time a -// Mat object is created without the NumpyAllocator being set (OpenCV creates -// internal Mat objects for various operations that use whatever the default -// allocator is). Similar to above, the `StdMatAllocator` gets allocated on the -// GC heap, so if a soft reset occurs, the GC gets reset and overwrites the -// memory location, causing problems -// -// Instead of ensuring the `StdMatAllocator` is allocated on the C heap, we just -// set the NumpyAllocator as the default allocator. `Mat::setDefaultAllocator()` -// does not return anything, so this wrapper function returns a dummy value so -// we can use it to initialize a global variable, ensuring it gets run before -// `main()` gets called -bool setNumpyAllocator() { +// The function below returns a dummy value that we use to initialize a global +// variable, ensuring it gets run before `main()` gets called. This also means +// it can be used as a general boot function for anything else that needs to +// happen before `main()` is called, such as setting the default Mat allocator. +bool upyOpenCVBoot() { try { + // Initializes `CoreTLSData` on the C heap, see: + // https://github.com/sparkfun/micropython-opencv/issues/13 + theRNG(); + + // Initializes all image codecs on the C heap, see: + // https://github.com/sparkfun/micropython-opencv/issues/17 + haveImageWriter(".bmp"); + + // Sets the NumpyAllocator as the default Mat object allocator, see + // https://github.com/sparkfun/micropython-opencv/issues/17 Mat::setDefaultAllocator(&GetNumpyAllocator()); + return true; } catch (const Exception& e) { return false; } } -volatile bool defaultAllocatorSet = setNumpyAllocator(); +volatile bool bootSuccess = upyOpenCVBoot(); mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index 35795c7..710364e 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -13,25 +13,6 @@ extern "C" { using namespace cv; -// Fix for https://github.com/sparkfun/micropython-opencv/issues/17 -// -// TLDR; The `g_codecs` object (which stores all image encoders and decoders) is -// allocated once, whenever the first OpenCV function that needs it happens to -// be called. That will only happen from the user's code, after the GC has been -// initialized, meaning it gets allocated on the GC heap (see `__wrap_malloc()`) -// If a soft reset occurs, the GC gets reset and overwrites the memory location, -// but the same memory location is still referenced for the the `g_codecs` -// object, resulting in bogus values and subsequent `imread()` and `imwrite()` -// calls fail -// -// The solution here is to create a global variable that subsequently creates -// the `g_codecs` object before the GC has been initialized, so it's allocated -// on the C heap and persists through soft resets. `g_codecs` is initialized -// when calling `getCodecs()`, which is not publicly exposed. The next best -// option is to call `haveImageWriter()`, which calls `findEncoder()`, which -// calls `getCodecs()` -volatile bool haveImageWriterPNG = haveImageWriter(".png"); - // Helper macro to create an empty mp_map_t, derived from MP_DEFINE_CONST_MAP. // Primarily used for function calls with no keyword arguments, since we can't // just pass `NULL` or mp_const_none (crash occurs otherwise) From 37756bb443bcefce53f5cafef7b607feeb27c55d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 10 Jun 2025 17:17:31 -0600 Subject: [PATCH 036/158] Don't set NumPy allocator as default Caused issues with Canny, which internally creates 64-bit float Mat, which ulab does not support Replaced with call to ensure StdMatAllocator is allocated on the C heap --- src/core.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core.cpp b/src/core.cpp index 8d42c5c..f3a247d 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -42,9 +42,16 @@ bool upyOpenCVBoot() { // https://github.com/sparkfun/micropython-opencv/issues/17 haveImageWriter(".bmp"); - // Sets the NumpyAllocator as the default Mat object allocator, see + // Initializes `StdMatAllocator` on the C heap, see: // https://github.com/sparkfun/micropython-opencv/issues/17 - Mat::setDefaultAllocator(&GetNumpyAllocator()); + // Alternatively, we could set the NumpyAllocator as the default Mat + // allocator with Mat::setDefaultAllocator(&GetNumpyAllocator()), + // however that actually causes some issues. For example, Canny() + // creates an temporary 64-bit float Mat, which is not supported by + // ulab NumPy and therefore fails if we use the NumpyAllocator. The + // StdMatAllocator is fine, because it calls `malloc()`, which we catch + // with `__wrap_malloc()` to ensure the data is allocated on the GC heap + Mat::getDefaultAllocator(); return true; } catch (const Exception& e) { From 918cb58d936ee9121216eb370f0ff43131a1ec8a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 14:23:22 -0600 Subject: [PATCH 037/158] Change ST7789 SPI driver to require SPI bus instead of initializing it Improves behavior when using the same SPI bus with multiple devices, like an SD card Also modify DC pin behavior to restore previous mode/alt parameters after communicating with display, in case the pin is used by another device (eg. SPI by SD card) --- drivers/display/st7789_spi.py | 76 ++++++++++++++++++++++++++--------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index 6134350..a675271 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -118,15 +118,11 @@ class ST7789_SPI(): OpenCV SPI driver for ST7789 displays Args: - width (int): display width **Required** + width (int): display width **Required** height (int): display height **Required** - spi_id (int): SPI bus ID - spi_baudrate (int): SPI baudrate, default 24MHz - pin_sck (pin): SCK pin number - pin_mosi (pin): MOSI pin number - pin_miso (pin): MISO pin number - pin_cs (pin): Chip Select pin number - pin_dc (pin): Data/Command pin number + spi (SPI): SPI bus **Required** + pin_dc (Pin): Data/Command pin number **Required** + pin_cs (Pin): Chip Select pin number rotation (int): Orientation of display - 0-Portrait, default - 1-Landscape @@ -142,24 +138,17 @@ def __init__( self, width, height, - spi_id, - spi_baudrate=24000000, - pin_sck=None, - pin_mosi=None, - pin_miso=None, + spi, + pin_dc, pin_cs=None, - pin_dc=None, rotation=0, color_order=BGR, reverse_bytes_in_word=True, ): # Store SPI arguments - self.spi = SPI(spi_id, baudrate=spi_baudrate, - sck=Pin(pin_sck, Pin.OUT) if pin_sck else None, - mosi=Pin(pin_mosi, Pin.OUT) if pin_mosi else None, - miso=Pin(pin_miso, Pin.IN) if pin_miso else None) + self.spi = spi + self.dc = Pin(pin_dc) # Don't change mode/alt self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - self.dc = Pin(pin_dc, Pin.OUT, value=1) if pin_dc else None # Initial dimensions and offsets; will be overridden when rotation applied self.width = width self.height = height @@ -359,8 +348,54 @@ def clear(self): # Write the buffer to the display self._write(None, self.buffer) + def saveDcPin(self): + """ + Saves the current `mode` and `alt` of the DC pin so it can be restored + later. Mostly used to restore the SPI mode (MISO) of the DC pin after + communication with the display in case another device is using the same + SPI bus. + + Returns: + tuple: (mode, alt) + """ + # There's no way to get the mode and alt of a pin directly, so we + # convert the pin to a string and parse it. Example format: + # "Pin(GPIO16, mode=ALT, alt=SPI)" + dcStr = str(self.dc) + + # Extract the "mode" parameter from the pin string + if "mode=" in dcStr: + # Split between "mode=" and the next comma or closing parenthesis + modeStr = dcStr.split("mode=")[1].split(",")[0].split(")")[0] + + # Look up the mode in Pin class dictionary + mode = Pin.__dict__[modeStr] + else: + # No mode specified, just set to None + mode = None + + # Extrct the "alt" parameter from the pin string + if "alt=" in dcStr: + # Split between "alt=" and the next comma or closing parenthesis + altStr = dcStr.split("alt=")[1].split(",")[0].split(")")[0] + + # Look up the alt in Pin class dictionary (with "ALT_" prefix) + alt = Pin.__dict__["ALT_" + altStr] + else: + # No alt specified, just set to None + alt = None + + # Return the mode and alt as a tuple + return (mode, alt) + def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" + # Save the current mode and alt of the DC pin in case it's used by + # another device on the same SPI bus + mode, alt = self.saveDcPin() + # Temporarily set the DC pin to output mode + self.dc.init(mode=Pin.OUT) + if self.cs: self.cs.off() if command is not None: @@ -371,3 +406,6 @@ def _write(self, command=None, data=None): self.spi.write(data) if self.cs: self.cs.on() + + # Restore the DC pin to its original mode and alt + self.dc.init(mode=mode, alt=alt) From 660668fd35bfcb7c72925453d41441f1b076551d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 14:24:17 -0600 Subject: [PATCH 038/158] Add boot.py example to initialize display and mount SD card Also update Example 1 to remove display driver dependency, now requires hardware to be initialized by boot.py --- examples/boot.py | 60 +++++++++++++++++++++++++++++++++++ examples/ex01_hello_opencv.py | 38 +++++++++------------- 2 files changed, 75 insertions(+), 23 deletions(-) create mode 100644 examples/boot.py diff --git a/examples/boot.py b/examples/boot.py new file mode 100644 index 0000000..3baf032 --- /dev/null +++ b/examples/boot.py @@ -0,0 +1,60 @@ +# Import the machine module to access hardware features +import machine + +# Initialize SPI bus, assuming default pins on bus 0. You may need to adjust +# this based on your specific board and configuration +spi = machine.SPI(0) + +# Initialize display, if available +try: + # Import a display driver module. This example assumes the ST7789, which is + # a very popular display driver for embedded systems. Moreover, this example + # uses an SPI-based driver, so it should work on any platform, but it's not + # always the fastest option + import st7789_spi + + # Create a display object. This will depend on the display driver you are + # using, and you may need to adjust the parameters based on your specific + # display and board configuration + display = st7789_spi.ST7789_SPI(width=240, + height=320, + spi=spi, + pin_dc=16, + pin_cs=17, + rotation=1) +except ImportError: + print("boot.py - Display driver module not found, skipping display initialization.") + +# Initialize SD card, if available +try: + # Import the SD card module. This is often not installed by default in + # MicroPython, so you may need to install it manually. For example, you can + # use `mpremote mip install sdcard` + import sdcard + + # This example assumes the SD card is on the same SPI bus as the display + # with a different chip select pin. You may need to adjust this based on + # your specific board and configuration + sd_cs = machine.Pin(7, machine.Pin.OUT) + sd = sdcard.SDCard(spi, sd_cs) + + # Mount the SD card to the filesystem under the "/sd" directory, which makes + # it accessible just like the normal MicroPython filesystem + import uos + vfs = uos.VfsFat(sd) + uos.mount(vfs, "/sd") +except ImportError: + print("boot.py - sdcard module not found, skipping SD card initialization.") +except OSError: + print("boot.py - Failed to mount SD card, skipping SD card initialization.") + +# Set the SPI bus baudrate (note - the sdcard module overrides the baudrate upon +# initialization, so the baudrate should be set after that). It is recommended +# to use the fastest baudrate supported by your board, display, and SD card to +# minimize latency +spi.init(baudrate=24_000_000) + +# Clear the display to wipe any previous content. This is optional, but it's +# recommended to ensure a clean slate +if hasattr(display, 'clear'): + display.clear() diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 288fb32..2993cee 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -6,22 +6,6 @@ # implementation; ulab NumPy is a lightweight version of standard NumPy from ulab import numpy as np -# Standard OpenCV leverages the host operating system to display images, but we -# don't have that luxury in MicroPython. Instead, we need to import a display -# driver. Any display driver can be used, as long as it implements an `imshow()` -# method that takes an NumPy array as input -from st7789_spi import ST7789_SPI - -# Create a display object. This will depend on the display driver you are using, -# and you may need to adjust the parameters based on your specific display and -# board configuration -display = ST7789_SPI(width=240, - height=320, - spi_id=0, - pin_cs=17, - pin_dc=16, - rotation=1) - # Initialize an image (NumPy array) to be displayed, just like in any other # Python environment! Here we create a 240x320 pixel image with 3 color channels # (BGR order, like standard OpenCV) and a data type of `uint8` (you should @@ -39,17 +23,25 @@ # Note - Most OpenCV functions return the resulting image. It's redundant for # the drawing functions and often ignored, but if you call those functions from # the REPL without assigning it to a variable, the entire array will be printed. -# To avoid this, you can simply re-assign the image, which has no effect other -# than preventing the output from being printed +# To avoid this, you can simply re-assign the image variable (for example, +# `img = cv2.function(...)`) # And the obligatory "Hello OpenCV" text! This time in red img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # Once we have an image ready to show, just call `cv2.imshow()`, almost like any -# other Python environment! The only difference is that we need to pass the -# display object we created earlier as the first argument, instead of a window -# name string. Alternatively, you can call `display.imshow(img)` directly -cv2.imshow(display, img) +# other Python environment! However, there is one important difference: +# +# Standard OpenCV leverages the host operating system to display images in +# windows, but we don't have that luxury in MicroPython. So there is an API +# change to `cv2.imshow()` to accommodate this: instead of passing a window name +# string as the first argument to `cv2.imshow()`, we pass a display driver. Any +# display driver can be used, as long as it implements an `imshow()` method that +# takes a NumPy array as input +# +# This example assumes a display driver called `display` has been initialized by +# a `boot.py` script. See the example `boot.py` script for more details +cv2.imshow(display, img) # Can alternatively call `display.imshow(img)` # Standard OpenCV requires a call to `cv2.waitKey()` to process events and # actually display the image. However the display driver shows the image @@ -61,4 +53,4 @@ # # Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses # until you hit Enter on your keyboard -key = cv2.waitKey(1) # Not necessary to display image, can remove if desired +key = cv2.waitKey(0) # Not necessary to display image, can remove if desired From 18e938d1b09d15545c83aad692f13b1432217b7c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 15:08:14 -0600 Subject: [PATCH 039/158] Add Example 2 - imread() and _imwrite() --- examples/ex02_imread_imwrite.py | 37 +++++++++++++++++++++++++ examples/test_images/sparkfun_logo.png | Bin 0 -> 31075 bytes 2 files changed, 37 insertions(+) create mode 100644 examples/ex02_imread_imwrite.py create mode 100644 examples/test_images/sparkfun_logo.png diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py new file mode 100644 index 0000000..50a431a --- /dev/null +++ b/examples/ex02_imread_imwrite.py @@ -0,0 +1,37 @@ +# Import OpenCV +import cv2 + +# Call `cv2.imread()` to read an image from the MicroPython filesystem, just +# like in any other Python environment! Make sure to copy the image to the +# MicroPython filesystem first, and set the path to the image file as needed +# +# If your board can mount an SD card, you can instead load the image to the SD +# card and change the path to point to the SD card +# +# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +img = cv2.imread("test_images/sparkfun_logo.png") + +# Show the image for a moment +cv2.imshow(display, img) +key = cv2.waitKey(1000) + +# Let's modify the image! Here we use `cv2.Canny()` to perform edge detection +# on the image, which is a common operation in computer vision +edges = cv2.Canny(img, 100, 200) + +# Display the modified image +cv2.imshow(display, edges) + +# Now we'll save the modified image to the MicroPyhton filesystem using +# `cv2.imwrite()`, just like in any other Python environment! +# +# Again, SD cards are supported, just change the path to point to the SD card +# +# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +success = cv2.imwrite("test_images/sparkfun_logo_edges.png", edges) + +# Check if the image was saved successfully +if success: + print("Image saved successfully!") +else: + print("Failed to save the image!") diff --git a/examples/test_images/sparkfun_logo.png b/examples/test_images/sparkfun_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..645f8f905f79c0ddf6ca201656d57f83f755b6d0 GIT binary patch literal 31075 zcmd2>V`E)iw~fscouG|vCrxA9wr$%wvDMhN?KHOCIE|eKH_!Vk?uV@pd#|;})R<$f zNCi1@WCT0}FfcG=NeK}pFfed|uls8_h_Ca9Jk8Iq3%IkAxDZ&)G{NcD1C+U-tRNUz zeH`MOA@tWXyn}?MGZ+}k;C~1BsC}6U7?{Viq==x3hu(!RY#!F|!_#B;;e!yYpdj5U zEjHU@P<8}w=1|URN?ASPBX;4)P>p0A=ihqkzYsxXc^W70OWG}dy{zU}PWp$uL@)nt zr{X4h*4E0+<+9e+blH)FMTLY4%!Q&pq$DKn1th>z<4^kIA?S$bf{CFugbHAV5$Zxj zP{S|{;3K~yQTG7=yg;P@axyQbA~0!*dxQTEFIdCU_A1c|fxjcaP#TH;{Dj-@7{U|w zBh)C!3OEsbW*eq_AhZxd@fCf*lvT$RhBM29`)$iEC9vee2ut0=&Whz@Nh zF9LJN`GTYaRzIQ&LbJ0HAU6d>5Lk3G*%ISMMbTk z%IM%a2!(T2g!6?5>i_USHG(Om?J^dxBB2zLcN89W*zVemeyjZ4mSI)ST*>2rEzv7j zjp;cqav?u1%4YgTmd4hp|GZt`TJh9=+u2t4A#vh^eu&CPiz9ew-sp|ws5F1K&UMab^Z%i1k@XA@|W`l;ow4(%S zogZ+GMUSIc$dH^s!_uTcY^qG`L~?a_MP7hz(0{Ok&m$5mW2rmG5Gk-vs4gD)e{(q5 zeUTm?pc}$xDYMKZgt3CNMleK?Qc79CX4tw9tF%JprqQxTNx-VYIk_G{o~&>T3qCz_ zSt`Q(`sBSjSqJPlnP6PTqk+1* zOJYP;Zz9T#(KSfq+(Q_I#Jstgfw z6QV#u$c14J@IM3f5=jN(B~y+6J=O!3L5cPBv#4kO*4Snv*CF6K|F{Jcm8^&@sr*sJ z<}s~4_|wORoXv+kL)iv)xPK1?tfNr{3Iq^HpGcrBAlOhfFQA;7LoJ7d}j_{YJGY!M- zf){(eDPUNnr{l?MrkD+UWfVxm%h)s30{Xu&6TUYp<;-PNs=#;b5IoWjY`VI@ih-B=0Jle@jYZgW?{Dej zs|w~!p^8Qg)UWsD)?s46vmC^){;hja+_2T=TdeQ2B*WXrhUd^x)__kC!}VYHF5N>$c^;e($m?k_iJgp z5yH?BoVR7f@GPyO1<*y%Uq5GnXcz+j7H@|(zxD08$*asl515`QeXgMeeDx%kI0>6q zcse77ty<`s4DwWyfm~Rg#bDePbi2+7d!vF0+jzv%SHHs}OZyJFj3th$%dXPgch9!7 zkl1Jqb8&x*=qWll^aIJDXX7$4l}MsN!UbxTzir&a4-&wyuWXsivTr$&BB-5dD|#nP zI9;8e#jTD!PV?k-`r9*wTuhGIRVAXb^!&LHnye|EIZM#O2&MW2cVY~WYPOz>V$rE1 zmA$GfRjAyJp_WcV5uG@xhm{{^0YZ|ir4_koxZNmTHH(zS0BfC@M{Q+A-fs$>#X$Wtofv&p~aD* zo~m}~X?^V=2#@XJ{iKSuV&@-Iwthj$-&xj@pod5J-qY^qQUsnqR>&@-lJZO8k?V@2G1aZeLhO>L`>+)$GvzOR zdFGeQQxC2f0Q09yET|?=Naejtfl&BmSJi$p?(-L9SBuCq*idmwGA-eG(E{GGiV~|R zLT{BU_Uidb?jKba7B6DJ616rdufZ5gf6Qtt>YCG|WE-X5)Pk{kE$CLbvJ*5fe?i>T z5{;G~_fw5E5Hte8%ugqN%IP!5ESi&JSP_g{%&8yFJYTeW4ke@tD);Bx3Nn3T)!NE{ ziC*K0b+BPink9qxKS;7b#Sr((P%$A%i*}X`dBJNT9MAV6Bfp1deregACs+C=kpx|6 zC?HAYA40kxNX*0KO&Br-ykGX!fUlneL4sJ$JMTGn!t5xFGvq9?dQAVJ?N*tZr24;W z{?TTf?f%BQ2+3FpM22U#PF$_*P^vg-SJxl&y*9K|dJ0x3om$Y(7%v)8!^|baKWWrtX~uGEGG}t%C4iLsCyZ7uv3f zJ)*J!uk_6OKShX0qp=MMz~?#&B{n6cm#gGCr> z$Qs(DGF+Dk!^=I4xD5+;pSRMJ0+!1`P`>0knOPiP<`INIk_!ME#I)`~pI?DV`ECA7 z<#jpBa#DhRefEU*$KTo#n5jVRR2gpB7lk=t+IF&BTZJw$MtiIAco$L65o-A0Ux%E) zz|wO``9f3ExAROe$R0ustzFz#bJbw6-usTA`NggRjTREULdq`=Rw^kHM^`2kZD`^- zlBunirhu%>3Gs@AK*yvMYKT_ivS2-4A!bp6rLL)$F*ht4#gfLyPnb z2b36sr@?|H!Y1S+^MEC6C8ca_4c&^I} zE1?OLo?%kJ{LTKs^}Mz(2*p`?I))FEnX|C;^KYx^inf^U{Iq5Z_DO0fDCNLz;7N5! zXTNf3!&aKlHBSSSAHGk%2(bO)X+m&x2Qpn$YC!29fqfC)oDQXsx<2Jd2~7f3VfS2y zVD%hGJ1W8+%O!=HC^{ERhTdxP%r2P{l?=SI@;eHr9pED&(=1b6wHzytr=1H_>;9s( zM}oR<8i^wRB`hi?@R#YgdJ;Gtu*iH=$_Q&R;rlcZbUBV8_|knt?+$HiD5(J)M1%aq z>w%_YP4LOGt2tWt6N>n;b!;JXOrbYe)=M9T9U#rSe|?fsQMu_4?zv=v%MzRaT9be= z5rhN~)bxr_Z(Se+tQjU;o%GDpTzDHZExXS!0w%AE@De=R%o4A>JMR=Sds=i0$@ZrV z;{qCV>qq^J+!C?K!#s#4+QUlG`7XjZPORULDj5zS$#(&k$ONd%Gk{7Y7S6YaLdaN( zK_elUca!w`rM}|ZGf||WC!zmD0guCs2_LRidZA2qiAj;K=89PkJ@ABb-_py(#w0IR zOx1@Z%)f{yyLCM%Q<%DG?r(Zx7E6f79m8h`w#950f9M#a@Jpyd=$((58b zBB7mV2_w@Qa{WaKTwWceKza8rMZF|&lv$6WFrcH>*U-$`@Yvc;T;&I03@NDJfO~-? zKIPxGuG6?V`}5{#mauG++*_;rKghKH<5Qey@QP$*bM~qc7})Q5P;4_RIydE5q!_P~ zTN2#se{IaV7yPQLTI+>(a8>tEuOAbG)DkR^zv%WSbf08tu18gx-~MBA4Xjvs;c5D@ zmR-E%IF!^vUCc7~9|HB8soACksv_PTvg<^7o&d7wA%FPmK6c~1VmgP#+Hf;UQ2gHQSnCjxUNoBpHkcBI&pP|xapiuJ?zC_@b zo~7uyR5LCJ#1wS)np1=@LOjmHmYB;`;Qg_ct#ISVpVD@3bJ=*oA(DRy@-G(HA9qj5cj zMYx&8)?xt;Le`6SF=iiyL_F@J7f=4D+|F4S-}ud8;&=MOtAfcbe^*R3sAj>z3;c%# z7_rj>ll`kFhIuA#S*mpI0V?~Ofx#VUKO_?tHw^5b*Y25Mo+ne6)^3HTJmO?mTKZ1a zB6(#?hQm~>zb_ZvRmu&~BYgc7BCBrwZ|Q;wv^&hm&tAbOQ6pdAKuJRU8ED_%@4la% zwO3VmC#V-`p}fCkAO;y?7*R#`N%nQTRcOExiuld)X0_+YD!tFD2|6k@6Dy9dB5{F@ za@R@_`N&=N zdHF9zC7Gt;?1xO1UXyKk41{exzJ%I$%W(I+pqo%!w;cs3uzK4}f)x(`Tz zfkf)X16O8Op2eiXBlosqQ875|#Bj-M%GqCwbbBjwMRbKha+fm0FYQ(Kb*v1vj4p*d)#oRfN1f+9V65t=7 zm_1j*DhB909j1SPfz};jV3Er*3k{irzlE7?Luz@8g~e)8 z1N04(ULqv0)lmw`!3OGoO1q#d1$>Tz_eThcUjK|Um4uVST!U%aEeg>ZV9h=<@FaOq zwRFmj#K=l6DkXQ2EPbEKk!}rP)-TDDu7siq{w+>~Z7k-aML<1=H5NngIT27R9EJ0AwuJb--fq@B{A7#WN?;=c@&hi9=jBA$s^??Gt$JRdwlN4*I!HLjSCAV2 zuZ7E?_|q-uJBe~^YAiCw{$?OM5f(9DU4e2U9O4mUS#7QTG+SqEmJG#$faH1u-J2{K zK#?SpAx}3``rc+%861MxmT3evNM?hEsH3lmT7OYR_4BlaUlCzlpPYkLXvjwv`7~d^ zk!P$a$Z7@gt~_kSWpGsRjySG+R~5yP*DI;B1S;P!gvJ+zt2o^aIRB?_O!eB|chbCE zgt%L3_Z3C@s!^W5ec^FKy=dY6^=ZSQ5V>{R>pX!cdebLrD!iJ%O25~Sot?dP+X*9$ zXioIYN`xV+EfNEej-k#7|6nW2D@GcI5#4u8(_~v$ZPq}C?5e5~f7%%mm8`rX0)+O} z=-`dirpRQ-FH(HejtLxQ-lb|ksAh}4;mpES5v^KMc;e(sScBnpe}L4Ahh((ttB3Tj zdT0%CVF`bj-5%!_%(G0;v|<#u=%z{}Qvy_;9(jGEG~;RSA15Bze|6Wgj#vBl+$*UpMvwA5vi3b z0!@Cx$>{zK(kq7|>wu!}`DEzAS6nblH{b+lJDcShTmu*VP#8*Rm00R z?3hSYg0bf;#Mr>w6IWV-lllIPNs9R(lm0)zWbIzX4DsOAFY9)H*%&eEJqHCa;x!h z723ed`D5q}NCfj!QHr*4xVO8xNa_W*&7XK+;hJ>Pg$DHw#UZ$(-6&OyYJ$d=C>UUj|Clzv>g#9Bi$ z#N~WM1>I93F};wZV;#22%WDl``yNifPst4UiwPFcnUHlDZdZoLw!igwU%d}ZFh+&Y z2NW%qGX&6H{b~{^m>!J?Y*>B-HUq!y@()68FYfK%~j*uiDKOQ3+2-{EE^y zk5HNgr4rF8;|(w9_vi^^q3q+a%#s3na#1aXqSm2F7ZeiQcNcE4K+C?`XY%(2)dz+2LkL8O@u*a^~(w&EgfSCLISoc*8gFIT|kp z4^ds-Y)~t&vJQA=4Y%j$@?!ou>I(bRT~~yOi#KlQHS_B*X2cKtcg;u?Ee!DA8>zvB zWy67OBp-7`QTo4Fq$s*R)GX23Sp+9`{4uhX^VkKfsH9=$y5Vd^sbDAt0$5*rP?0Yd zyhR-U{=LCS27W?FNp40K#{Jz7cLJfCVnl3^T8zu0t*O36RCWki97KSycUgcse3r2s z0&^z?tIhK4O7e#82;>gGzY$;BFFF{$6xt0`MM9M07^RL!p77^JJcK(x$LoMWb9VCzsd z3d;k0i;TFibzm#9OV9AmbMvnkIXxw^YW zGX8jbd3bopIHI^_1}(bqCe$ffih6por~=3VTY=9b+L#J-Uj2s2ge$}1GtRExn#dc# zba-&<1za+2XOjXtB;6HHRTRzqIlWI4$xoE7^tzXES-2S9W+72B*-qQpF4{;SmC6#U zjC>hZE(w6-tf7tvEpuDzs{J=QOMz!pn9zZ1(v*mkBfnLVn-Z zrj8N{IqbrUDwp6r{9mrq?ZzX~ilGbv0CuFUhjOJh2gMGQfK;aRlanj&t8Vw5?kC^d zHOeS#hH>tTI--Cg-+%X~3$0g?JOu(bb&g0+co>3%R#Ub+ zKJT)svZ}VS^bwFmVHy9L)%Cmo&QFxc1J zQS{rz+xEvNQy6Zv>!8qJpV>@zwHuGd7W2q{i5->NdYqbyTKfz8`Ozvn z;Ww-#cy4d;GMg{_@99c@9YYtb;pri|)~@|#gVoh`c(XjvY?mvT!I7(|?fj{2d}s2Wc6Rk0+fft>@%a$&}CnB3Tw_ zT5aCO={gsg{!k8isj6*#uJjHU503&*uoSr9&3##&=bdNt9Ui$i?AleYtQrowy&p!f z>|jJ%=~BdsiYP6CRD_GCu2;y_@i&^spUjI~9HvP_R3pUXPM!;d*ThbvW zaOgko|HyxDJKi>qLHTH6sb+(DUOwFdzM*1>nL$1TdnZmnAu+Sj3rnAGB z+0kFc^bm8QMXNIF=zK5N#zl_2D_9r}mPw@|IO~H;>UyGwAUqsKt*%3}Ea#sd--kv~ z=>ql3Q%+Qs6wN5nXW%(@Y#NKB(ui06f#7}!k9GeDCUF@WH%x$m3ssfV`jUnsJi_6uXVAZ{!K+C%jd&zfy27!t@%WlM-8ECk98Ro73I zXbX;L8M~vquHD*r%BEd6jT)4~bu}=xfWw-`!hh7XZ6c#P3XE*VF)ZP1GqQyOv3>*C z;1McmGO9w3q{!NPRpHr2YM3yVA`=lIzOg6uL=Za#P^`{7!;qrI8pOW82+-F z$_N7P0d%zj-vA^3XOIQ(TP**P#t@lHV*!*C;*CUF#tB!MtlYm`yPgaHybD>M={H2C z18gk_Oo8X8(odA%G)l$A@J6#rE}BfHq7dVAMwdP>p*RVEt^Rt$7lfhYbPkVzq`3%0 z@GzspIYDU}g-qKt3L7P#pa3osUj&5urer+uyX#>WOwmoz0m2(BJOw?a%!a@H!7z#> zNQ5wD7MohZsZE0)&D|Dtf6LI`n0#j4f#NDRhxd#LFn@?xLUv|eu@g+;6NoeiP9?c1 z${O76o}bXY~oz-m$X zoxYzO(o(^^0{QMKvh2N{a_vWG`{q=SN1V)Pz)T#*bzY*9^xAt#{6mvSn&bNR4r&1^T8O zYDh@^qXLFl=n&)L9`kFBl>9hs%5Sp!<+5z|p5t2oKr(aQd9*y_W_cT}On&?AG=1Mg zHunBt0rNT2>yCUH3Upz|7Yu`nhQVcR-s`bJ_S3|od{`8O(ZxgpX?hjIT+utxFpe)S zD%^bidJG{EnYU(CfU-aM-EfSt{bYaR>e<%7CrVa2V83G0w8o#D+%Arhps<-E7DNm2~r#4W^#)CXqQ;D1(>}c8@f-mRA=C z5S9>!R-OWLwx+)q)cuyEu#&HGKZ{Af9|~N1?4<2eou5{aS7mZg8G6Ii#a^`8C>$+cFs!}`;FI?!Zac<mIxU2m-K;|S=GTVURkz?f+vh+68afCeVUOORP`6yUWO4t&zd4@OBE z%**p!Sp-CAI21@2^d$q)1>vGAfkG7!3pCfT^{DVE6{mQM*VMiIf38q}1^{(s&LU8S zCj-lsR05Puh#!UM#*6bym0*zxu3I}l-p;b@1F3R^?OpPtf#}Id`w3Phpf+X|Y*V3p zOvsWl2HMVp23m&h10!Sv!G-*+SwRw0^jRrgPC{>+{lIVnm?v=D1b{5MdK;UT`7+WV zPGx@q!)yR1EQM3LSwtw+z4Byk2n-!Py)5oXBxUaF$G@xoXd+^E{68^{40NRliID2~ zbm)0NN0Yrx+r=_ezJFaT*+Kin`4}N20P%|gXY^K@CK(Zzk*-(^I3?IH+nC<{iptsy zg#?*CP9f>|0JEI^4B}{TEA1Rxucr(5$2E7tA%l)ikN$z%3K|cBbXMis{w}uoA$YX6 zHb%@mfC3CM)vF{T@mJ&cDz}AmYJ^sqhbE}&I8wijmMQMcV^WLvjkF5p2KsiAfQsCl`w>p(K2m4F}9szm%lgzurVLAp;n zpI~QY>Fv(+y79fL220S_VEAF4tHiMFeqbBWFyn^=6AkGf7KmX%K+e8jbDBBUHSXE3 zJ~heH0SMP|xhbw=>?V6(GIis@_0#az(*=vRm^0|`cb z3?(n8l({*6>sPe%2TK8fvGC+EB)kQdsG2!tD~t4aDzrGy*zpjvm3tiqZCyRLr6r8- z{%?x1th0#1Rj(j25!6L<;Bs_r4*}2PSVaRsrcr^K8K0{JjD?40WNC`~#J&XmeaaNy znVVVq!DRu{4fp^GL=fI+%F4`Q0xcC6cShrda%m7Nz=ZBbD62UyYJ;l;Dud8R*DES| z`LB1gu6ndnM|;TqGd_?#|BCem+b(lf!Fou9xN*GKjwq zg6$H(-)5}$_&)MF+>_(zc}%IQ>%E0Lh7)*=c@2iMx5+v4h1VpO1C*fh1nRh|ewZ11 zUp3?Gms2^az7;EfZ8}qei8VZESWC8rs*#uDX@6+?p?PJpyC@iP*0>h^2aR=p(2DFm zUWh5s$Wrv{hYI0U52Tw|A(?M5R_aXZDG&8H@N$w0#HRfR&W z8gT;hsVULVor5KGxPkst)A2TsIKzK+t^b|F<@t50W+Qn*!LuwIR&o|W3v3`hydQM{ zme50iEe4+|EI=8w-dWfrg20%Oeta=Jk9hx7`bQwDXef6pO@_TVg$-+Gg!ORtFH*kM*!Oj}x zY^j~sQhsuqHaV9as2XBd*-~+qyp_I??-J0QnMh3KWCR(-WfS02)B;mQw)`)X3lSk3 z^KQxNsI)1*;sOwC`%k?*@6$Y&Av=hz8F~dFFLbaMVEh>3xTb4QO*RP04T zfivLkSCxJ7`P}Y)T6M1FeOj+8G0Ha!#C1OlyJZsCDe$nBd=j3u$FCdzhL4L&^1%XNF_Y3NCR zqUV*F57f~H^Q=!OGHAW5Xs&3aWL>Acgu{xIBo~8i%r_W0DNm9$OCAS!s_WW2Y<(8)Ho2sOuE|wa`^Jszsb zBM(zm)(d1KO2|Xr+iKkwGmg(`9KtOulx^L&QnGBDm1NGMk*CxQ(b}7rW{8opX-A+( zh{P^H7aQmQJasF|_hv4*VwCejNjN-pDYu*heN&vIX`QNYp|RB}Ls2)MM6+&b`YGR< zSr!``xQ5xFl9rq@rwdkrgS`7ag%Ut*G4j~gbC|Z!E3!QwB3e7q0N8B>g=m^(n7WbJ zW9`Z#z!uRolf@ zUj;kZA~4qZct)NwX>m#7MF>17AfKM&yG{F+o5LVeQwKPE@NbMM|2)2Yiap+fOz*2+ z)ZcmC2E0QJ*${{*kRh51LMw#X&+>|7FJ}ZNa0-@92#hLR6;EAnCy=O*A%n12$zoU? zzha3uyRc%A@$K6jg*k$a@{QUDE%?^M!f=hYP6`ki4~#@_LS8#M)^>am|D{f`C52+r zL@+3X4~mWY?uFtw9JSYdXPXJUPP80^r(YG(h9mUZ&8L_VW&M5Y;07MD%ST^b`$yS= z3&jPZz+a-aZop^b;a!9VdZw|c>-K&@?)_je?&$5>SXlRcUo`6YgYD@MNB_tDZg8Q^ zx8RLI#F*K@^y&A1J69K+uYKc*0Ga7BLZd@E#ikoe})TVlgJ=eFl@dcF9pl>{1@l0!dS9p~?6rn5P(9(gV|y*h1St60mLx~f{#Z9fCwpSIm|r&=~4Sg;Olkx39G z6Sxc&1)m#d__iEJvn>a%xH1E8$k$+)`!gkS-S0GKw3+OucZttEzf)W*bTt`3E`@Pp z5*Q@Pml>1L(rX78la^p%VoBBZ4Re_eMu0I!gBSf!T}1qZW{YmXn%x&^YQb6`sp4*v z5v+nc0XNfwW8;2N_PaaZ$#Jqm^J2mjau@&CA&BCF92iddfq`MlgtnDtcrGs8*yKxp z<%yPY)ciUh!~pML%*v|?fi8;3Bw--^D#24Cmd*n&ejjFC(~rJc?!~b6OAzgSSYmFS zUS2<0S&rucG{Mk&RB^MS$NSN7gF&I=iRHJVx3Qjb+9{*XHl4G0qk9%=5-IvlV^&=diY~Jr%xWzGyL=$0(TUZLy~f zECinA8J(zT)bUC{!XH>j8{F|=@i1rP zDh~o=Xt(7)?0+h&zev9wD}#p16$YzbriYOUv-t0$t|~$X?XDvEUT}$8%Mv+8SIRZf z35oSNVJQ@93eYHj0N;LG5!*k}+Va98_&GBf0Rh+qxRe4S3W#$-Op0l!c#eW!ZJOcp zT8}`S_FtDlPL6^5F66C|&Q7S28u!AXvpq&bDqJdMH#Odu!|<76Py(Ztn8T*?y9r48 z>>rsFE5#oTA4_@HKBM6X%X^MQ{qw_74kD_dAJu%TdesouNiKslvJ|TIq3NswR01w# z7bcQkmcH-gIQmeAnCcPSZR#h0a9p2e-2h`QK#VIZuP0Vj0s(bGanW_ghif6EaTu#Ewpu3(La8wEOcAiw z<FsjffdNsgg712ltk-w&35#^_rcJrb$P$=I6Iy63W@rcP|)>_7KMA z;-_Rkjm}20Y(Nskb>pp#X2{z9@ADxs#FSga^Q7S=__46(Cv0C31jT$iD7pBNieC_qua1@gNLIxG_7$Z9>0i?D?z~`ke`5 zs{ANL3QjDMiKB)S9s*H`+H*nQ1z)rSlKlMMPeO1~i3RnA$=Ck$L6k#av@d01;|&F= z0~EPnM~W~ihJr)m`I!ZJ2($@u{TWFcG;GW^f0jGcj2IXqhCMe*Y>R2yY%1KJku3Wy zyUvULzgf0F_gk{;{GO&-Z`Z0&^rioDLg4S6B-zU@h<-Q*8W2k`l1cq?g(>1cElNTQ zXu@QN#SuV*Xe44F2f#mSpl~N&=J3dUw;7j$g7cXXm|*k$eTyZX#*jZU2f~`V%(JE@ zI+@7KNsdCpR|1D-MPza{${2#{eyH_-s61tVi7JO**dLl}UV>;4eyW7Qy6t407ILLN>E(-SMOh69p21x z3i3~XgEja6qVxEYqYWG|lol84Stm*KLIN`&P zaokicKVdskLyn45K7LazBsvvAfGtMCkb~p#tgYPSEt8Q*(O!f=bm}-#+UjFshFWsw zc5}0;>Ts6+SS2DeKm5;n{73P6kQny2n*DY%JbS{FiMb!^T{Y*n z&wVb&%#S~l$YIE|l4Sn%$2l>yXXCQ#2p|;^S}k@BUC9zzMlodu|66-M+UZ%E$s)RJ zp8`9a2o5yl*+iy1^F#~ddigGmSv47x5NWpewmeqz%{t z^fs!k={Rqcf?MPu(FZlv`6ml71s1i4>b{$=JuYJso`n+0!hHA(+%VPTRk&(f{tHbF zYe%l(C_L{)80rU&i*Sy{L~b~aMP`O};Gc_#W8Nh%-Fm4KDn&|`6XP80{P%JGA5MQ) zKi>bXsHtr^{gI8rrXRQu(|4EJ0{9I2UPLfZVI6=C3Now^^8KpvaBo`i8Vu{NrF7KN z-h5xJLBdS*3Jxw_ivKI_-9yuHH?0y_o&%Xe5A)dq((%7FYAWd7i zAjPLh8wDDsTAHZ?g$|4FuTvKyV3(L&#+CG^%yj)GL5t5(tDjY4Z(RX)jj=>nNDy$H zOWb`$T&bxk9emvEqCY#HDuAVS2|H(cUQc|!yVKD%)g;WwHNfS|{iOJmj?;;67!5RB-c|RhI zlzj&uFltVi2NDNg`SK_Up*fVk;mn{;z#Zzy7c*hf0C zC&9DCj`s7*PDd!mQ7bUyK)1QKeSP42S^Y5DAKYY($H|PHZQqq_hekn65wWySe#kr2 zXW}{NQ)my{sJJ0TlEj}C~4bV9S zWq0&XHg|g|-ih4kAg;r2oJDq)NDtQp3(+s4jI1*E(p6Ity5ERSxyGcpbpV1u#uYLy zNd#!=;wR;O`J7OJbG)PMvcZoWffPk9W%2ArzBdr0CJK?-3s_kZ6YR+Ts28D zwnS<ezdW4Ru=kPZ&hbZa!R zd7TapPKh--gas+0WPlzh-2qzx$R&IVS^eNmId*+HoeC$JLEJ!F{H}Y@1j@F#zLqp7 zgtG$+>ie*#h}#gh45djlgz&bnJQb4 zGoUbWF)^I%B~%UdxZH*vGD%S6Tw#|7u+ut^9j=7?PQV@953mzV+$Wv)8<`nryig7B&aVd5oE@eI!D06JE>v83wMKi zMQ4paW`v7ph_pkAEV0V<{rGLBZKIl*SM0jbG)qdGaP6~>6`^C|bJ_!Bsg#|{IDDDB zl`%GESV*5LnvfDF^wyh#FB!{rr#!D6T4So$eCsJF{S#@J~ z@u}VZ4U=yV$_mW`gF*7Qpi^0w{P>NtaM}K^R-s{6P{#9Q*&?m(htLLk*)e$eimHv?t_UKv4r>)NV^gpyA`?22&V=C}sy8_ONr#-@A6bshe?9-qeLsau z%5PF6XL9d1cB*>!U}9!U1Q7%ZyujmBvv*+Ze4*+wxkqEY`F;n#rH3!GZtCLv= zm~2ATu#g|!4+er@&1&+etodhNtyt$wsq2x{@pCDk8D%k$b}ba|-q zUQP=zfuO-R$)QnHYk^2~`_61?v|FL(g;^1_Z$6B4+|UFpY@FPKF~rw-9p)EEUd>k( zDB{Y;Yz}`L(2G0emr!}iAnjA*?C_W`#UDX$#O28(1I4QGXThU;iBjIbVs@uwBg0ED zKR`xUDpm_;oWDpW-qhKN%Am&C2t zN}52h?WAQdH)qP)=2dv_;B#Kv)hPSzy35A0X+!^sMPt~AOuvCDQ`zZX;{f{? zdkIkVTZe4xXR4~dSR4l2u8whTodh$f^2Tp<8zR{*7Ivvh`Zh7NmugJV=Tj6X^x(E`EEnS=t7&TBc9Zd0 zXzwze#cUqn($efndRp)Fy4$~jf_)F%Cq%*PKk#qYrP$=7RGTeyV*^;pbx6qu$a;7X z029}V#WazeNnI#q$jK|H(JZQU8vOqNLCS03zPP51o3gf(=F1wd_3I|l(_Xx;w$o+; z-kSD1e5~Sp;WT7oNMVE(o&$9YIyG~2SJ@-y3+wQ%{lac zhmRHKuMV)9LULvC4~7d#N)S}fyKiqdg19HkS%U8mUdi8d*jAbEXLO5S{`-xa>Lupx z8Rq42`Ln4owR(@dR63!TXgL-jVA5NBtM1bA=R25f9=X{e+vb=)qgYVB9Loj-6dx!q zazdkzK3yT$>#tTrSbZZQakF0}EHi0@^*n{ascUTrG;#RT)(Ag8hd(3=)O`o>$^@z3|dpvDVhNK9`bLOJPlhcILDou{0 zE{=@MmaPR%1!(q=E;aW4Zq)qHOH)dxfR8{981Le!8} zHyppdXD8T{;=Oi|AN~G$VE=B^5V;lAlzs_s=u1z)0U8{nyGE^Xs5BJ(mtwBF7{%vX z60c*koT_`caoVNGid58BTtHN>&thm(PENs#ucw8*$%dPSq>r}VL`;5lXe@u*FIqVs zrnXHN9{Z+~=L;v``c&BZ#|}DkWJiy{kI6^UX24TFyZA?nVa*TP(h`%cL;`sfj93A~dS7J>{^o!Z+)-nC%idPr`kSYT1 zf~ETNoRsP7x2uWsYW8Zya=I2Jxdrln?}}v!52K(Z2o9Ksz>Ys6njOLGr&M;tk=#h6 z7=-GC>WU}rm++q2zGcO!rAVw+sBVA$H^UH0Goi$}>rZ)i6ku}3LL_Jg2C8nW3}>)N zB+pusM~Z>2ZZtp(TU>E|--c#6B;=}2?r_kdx(LZM*k<+{-8Ie+TU0!hoa3S5&v_=z zw&-{B_-_TZLYM}VoWq+6eCa|~&<(4YGDH!QLeR)_@XOZ_nrXQu2)+uDAgao9vq=*# z6VK7L&Owtz|80grlD*8HHR*q%L!0luTZfWRKkmuxdiFzzX!{6>r2uMlChB>=?r?d- zv9%8hv}%#g8f1M(life2%{Iwc#Hn;}CjKU6fi^k~mzMls5F)KUD79wv)(}h*4|l~Z zka%&wU3z}deC1zi@l_Y1Qj-db0>pQQj>)g0x6KUR3QBF9)!edN`%Pz$_oBqNW zz$?W<&hrj0K>-OL;2!#J_@1^g@Vw>yWg17Xz_%=EM#2IoRp!m&94xc69&tRvlPZMM z_D9os4_o!Hj{+#Ps?xw`c+hT!SE*U%AsFwCmPo*o7!A^&8yXOKKq#KjU<51w!uAjV zn05no+2;LPv_o}nzESwz3>!)~P)yG5Inidaf-mT#9T8iinX=tHQ{8+5nRDktx6#&m z{}uRARNP%@1I{tKZ4aiVrS$>+wH{TUcHVLsVt3M`7ruB#a-pAXDaKmk6TeBe7XsaB zsmt?;-;^_?aTtP@00VTDo1f{4-0Ho3#q*n58J-IwD8GeEhfm%V-u{DfQt~$0U0g6z z28qoOj-KL@qN`RAG&&s)rfrP>(=+-F9-+UYH!st~X70xOf}EP`Gni49>yDAll0C?{ zjZ=c>5ag+T!*-4j%!y@ZTr~DSLrG(?8@nNGyiv9u{jT+NBFVN!;V09mg%rE8a%Z~8 z^Hu~6($|dTuL2IJX4aGoMqVZteqS5H_-Um5jk71A0&A7PF-R4S*z? zxUc}t)UH*Z&#w?f{8sGwsXh1f1~>Ls2djU?W{JB`0;s>gWqbr*@y zJ8ZU$pT(lWABdgD(^dNt`8N96(1u@hnv#p_z<`1|k$T-cShVh8Mpjnr=`Tro=T(#;Kklp8`sLu%)gZ;n=6UNk(y zu=5?@!)9ugBGOS$+q&9YM|V|rQfc2>mq{W=HqG#qpQ%e-EA+fl=Z8&+zdlI_Hu$_< z7kqCtpPx`(cGgE#BVb44m95$ADsM~F)@So&{pCyBkVhiiHOmzdh3=CefRRu5t|wo} zSWG(o!s_DUoWS*O_w!BL;mp`8c$SkS@2zv*+Kt@!kFq>dd*&^Vzm!9@b_`T?5YH{i zA%*56T2w2dXZ?W4T-MNeGVrmE-f8_dwvp2>d255EwAs>vP@LE2E!5K>0C~k_>(`?d zEe&; zey5>7>cN6`)67Zff#q`owr-eQiO!6pl@)>6qC;i3%t=(TAuzo|THY)6DEN!l%J-m)ks+@&AJj}O zjPJZ^Q|Gj0iH=uzsqRD|@Gq`3>pINc{OFBh49Y7}bb=&pB!xCx>fBoXiC?pXw zfBGt(kjY~DgWqYn9*;pGGL=NN`(t28VeR>$J-b*G!(1rnje0FD5aa?}) z)UT~Uf7)Y#8a!MY$S*s z8U_9y07EUzbglT|P+y`OD?p;pqFmXOdgc#B+HwDj>AULK~^SFN`YZZb)C~ z!2Zh~1_pj0)@mR$Hfg%d44nzlI^!T7!YnU_esot@dOmE*axH3_VHzGKrt!HRC7cu z5HB9t*x9)|&wAl`sn1Cvvgp6&?OeZq)7eCM75nusLLGL9-;jEiFiE&aXFfORJfI1R z|E8@cLq}huq<@W;=zd?LS)*LaMlhl*3Xwtw^IUnM8Oa&#eZ>aql|y0s-4}y{iqziu zUR@F_*iJTrdA?WEB^frpt3$oU={N2LEUvis8|j{Yft%gZ(DS0Evw~C@ z3jPk{yTB%|=t*#3K(03ooed8|*HJ}kgz)ScePJ4dSxfR`V#>qU-h;Tb`sHwK0s0?V zL{U~`zM1N2%-TdF-|D6FXoqd;eprzg<`yKh3-kfa2IK$rdm(H3Vfcai(Jl*edWfB9 z<@VMT+%k4TG$F7cLK0bhp$B%|XUmWYD8CFq=lM*0nWqjAu@GyddUr7$ zL2V!6x?`tec6EsOk%g3c%UeEfKVF(BB!WTN)_rYx`S9H(F)c@ZxNE}p=LTdo$%k7% ztd{JfmM&))3_hWn$ z=PG9a3^YDXBAV&U_U2FWKP`yfTYj~oXKec3v(=n93F*&{EsvH&gApRjiXaQOrZZU5 zezPX6#!!EKxsAA!x25+>QGrU=ma&+pnRpFMeb_Lq3?B z7o4zH*q^jt&nx)43AZQ-WY-tUeo4RZz($hi@0S;T+xhi<&FNc_$;hoENxt*xf%-x8 zNDRBrXibnHLI{I}A$Or-e}O8)Hyoj%lMIlnF>0|( zq~90%RF8l=^ReF<(~rk*UCuwG#Lg_bnH>;GV}yD(QnhTyaI4$jw1dQtDZ6{5+%&n$ zbLXetAJ58&bSzLbOsAWP#E(ESwcCA}y|(U2#P|>H)r3|+1G}`(o5JJtJscG>6Zkkw zotGzFKbU(vQ1vIa*_6U*IDho`<+nK%muZnO7LvIMq~pQEE%KqMsm?0pCE_cH zIlhyMqcf067%@zi=n0BtEiFeF#}QL?b?MKO$yUNqWSB67neC2e`2G7OT4%XG{j=Uqw`9gO*P-9*9IqRXOCWnZ8r(Y9SgEZ1t<@Y| zomHj8rR}dS7pj5)7eQeHt_IjwPbV#tUD7rT5X2SPRU+Vd@j1Js+WCH0#>61{Lm@@O z0sb6j7ZvOMrAo?}dhfP;<~KRM7cogB>P;ELo=e-G7!d0*Viz8NQa&ay6;(fFR&+_t z+9FYu9GReo|H-iWcTyq#&G;4Pw?UJ}Tzj(#5;C9|6s0h!Ct28xfYW-|WRcBX&rwbL zVARS>a78rR8!l&8xjLb4GX#$BZKnz@Thb;hVF^#$rR%e)XLNN`G_YOaG)IHB1X@NsCL8U4dcjY)VK&!UYB#>O`n1ooS?I2 z?{!Lk81Na`r!%4i>J|8f=a9NkNUOY4`(y2R_m6gV4ZSNqRHagVar zPFU&%shgN|JSR^cTgJgJ`J}D1Grbc+)0fCGBqjk9VVxbk_qSWU{;$VgO{?zO6>la= zNe?BR$@&{xTOV*pOd|1NC`De=fO@1EvoKAn8S%T#&`+v4Y~Nw@ojsoB1}Xj$Uc=@Q z$8}>|@p}g8*!>GJ01YHrE7Ey*zR%-%_;DG`s>^9U@)fT{l{BUONa%BB6V1DeMazW0 zv0<7c0`O2bZ_x30<(7|zfkKjs7_Lsk4mT%e@r3N-Sy+4l!kj-CAzxlB*uUJL^?7YP zr!{SmMuW7KNJ>hzJ5 zo-2HiZ=y9kd{$jDAT&-$P6Ln3`zn>d+vhSc)8ojg>F?#e`0eT?pr}DrXf|IJB?88p&MuDM-+AFpd8^x4|+L8th|7p94?6D1^;6g;pHbD!sZ` zasUni4=B;;As#3se(0z)w@+5D|Zs8Tv5KYjy z65*6bmLNr#5c6gk`k9x23;d~lr~H&9cH z%eNQFp%(K#W6FxvciDGU2S|yA+G%2CDsw$o&>9OKGT_R40?Gp}wlJ&alK_ZT4CR^^ z5k=+L-O*G!MOj)O@$yZ1htru2m<+m{5!5yr=hf*9m#AURb^Qbj(?1_s&J!fZbAjQR48_Dal?Xz$(h#G_fEw~LIa)s6~+ImP?;P9 zj&JdZ8t6X8I?qGtG_l!wY(wtEO5M^CV_8^VmjP)$W-DzhQ0aB-S+B4L9d(jgAu=3m z`GKDxPk=gAQkbpg+3j zOpm$gM|~{M0);p?lI%-K2|mesJqOds##L@JgnrwuVi8Y`9V`-IYjj067x}C68F)dL zq1Q|>`24q+cLE`rLnfQlDg-soBa$74*Y|fj(%H=dC7iSa)ArS2zike$xH|!O#YVK8 zKAyt_{fGVfgOE+77B!VtiqSP$&H+yW1f-;4-UKklXvrCMV$cQ?U~jU_{6wz$H#UXh zRPq~3_fG)GgLe!3D{v)RGQakot+)OI&LqG9IcPupWapqLizOs;wp89&0#!I1!iSf@ z03)*^$v2jJXfzN;h?pj|N3h>fMs~$g+xf`^FgXS$Mq=i}4GSGRL`MNkYl=Ai96t zKr{4GOGpQ<7=?65br)*FN#CUg>Jmev!EKT2JP7y<+~G8TV)h1$`T;4Zj3}C&i_ud{ z+YD^5iCS9}4?ILYgdK)Syz38ia8o^sFhV>)Fqhqg;*JdC%n*cIy);cJDYl(D)gv>8>ydOof zyvc45v`ze8%B6SF1X?9;lF9ilS%=PafL;bl*g9|Q%S8RZUhQMPr3DsZel9ei|MlJ? zmi;NTUS70Spb%f#4Cc{+=XRH&ttDzGca2WC@U@oot-ac+4fa_QwIaK6f|B}sZlZz+)}Z^~QWKGwD@954*WL@%zzfhKHnc{J~hqWhBfRL_#4JfKb8_-L@tgiY7 zKawse!MC(-?5pX&uGbehvvnqE8NQEXOv^0$psIb@al3Xqn;>~}GW$4O6o0tUCAr2x zw9Kr`eIh$^3)8HzUF;81&ld7z^FIlEetdmc=;GomQL{8=3PqFlVR@9K4n*2nwoWtg zRz#9V?}gpXX=^|Jn!%JP)uluf4hnNa@}igaq5>Pq+ui2pE+T`=`?I)@GH?^d^+M7IFYY|{tJXoC1W zc2z%mbwf}aH_5hIC0|!PCMG6~99#Y>P`HGB$%Jt_+rIJrWjkG%_NjEu4;~!F;z@x_ zCbE{>{2G#R-GPtCdOA8^r9PTfXn{6XPJIoDc z1&_$wT&bzw(@IrFmCwpRUEe;EEA|J8Z?m%*)PodrY3O02V>R7>w)*DFfNQoEOBl>K zN}uRzs;iWlwOJ;F4lHrDC%XV?-iWN3(E#Q^xa%mXxxKKR(h0e#R0SGdQDJ{Z2|Mk2 z4J<~urNt~erBoW}XnXB^OzJ4CqMT4-Og5XKaBhao_S2rihx#v3_QE@{%ws&$!J`)# z^~iuOzEdDevNCb88SJf~K;cuEH4cpks4Sli-?dEQ@gKi&ofAb!n6|UnEqU2#`z3bu zcMOptpAtqSo?G4_%Fs%Xddu_8%Jv7gR5l_jX$x`&dTj%1YwK(d6a}#yb(e-ey5JAC zd(}l>5T2)5@mlyg_F6SvaT|(rNd4}%BDWOW0YgDg+cdV!_Ny+Z0j#pPzE}7$XA?ow z-Uw5YuVMovxJgR2{es)f-6N=fE8zuvMTN@DRcR;xq;RNqHD?v0V66~f3{3+yMb=yJ z%c~SSfBuTiRBk{&o}!eX1xLZZ9{nRYS3W@U=?VT}!PwZfz!Ia1hOr8my4W0;8RiK2 znDUuK-HYy`vVWKO|C5QFA}(k$n;YaulJJ48Du zel=Pn$&jPRyr(hGXD3fjCpERkmW*d{_0aDg)jGgqMVL$H!$a7oyxPfbX{+bqqPeZ8 z?4Xx>?K1VanO`l$HN#B=HzUlsV7NEsBJGFUkM{PuA3uJ0ovv$0-0uuhxQrkCu+l$B zcRv%-!5Vlu>%&RT%?mrB#deR}PEw?P=L(g=ML>oT9&Z(F^-4p+ob(pv@&lVmg^{D6 zvCW}quF7&U474@owjK5k_dm4~ahbFBmbkg1DDm=Z92^{YPo`U>zG^D2euMFrFn9_Vtx5hG zbDsajn%9PO_|egUEYhY755j&dQsU)EGIP*b52{5Q_c1<|kR$2hf2c>Eh|g$kfox0$ zQ9DrYa?EZvjl&@1u!619x$F6h@s_d?!uQd+`skS%c}x;6lXmaXy}Xt1=^C$7cCIc; zJuUo8cFPr~re|0Tcd2TI82pa?53%9EoX>z$lki&rr2XaJuc1PbZ++iq@4i4%5>b() z;Kj}(he_)d{1Hv+!6WbyVwfE$s6|8A#`7B&c;{_4d)yIFjylCQ4VKRnDaW zY;bH#l)lK$Pn2v91I-SH?sSakDZZQ=S{oHQJndTV9kU63Gl#WN62XMs|~ zPb!(>vriX68_o|Kb_yM=SH1X?#%-cNW*9OHa1({xd+b5Lw40JN_kM(5Q!Es;V|!## zEcgEl(EG-ln}|3_tl`ydreIT~ap2|#_WsIgO0vcvYZH(2-mHcsR*8^RPLFFdG%XZc zIPn5$>+!TR;hj2PYJWlsjNleVxFPp<^=IzyfQIw;HCqc6V}i{0V-9U#X+61_N8u?Q{Zim z7pGxuq6yVT5d;z+o1>2s{ssde!|aiaCft`m%~t5rMhBZ#q>)F5fArEB8{?VxaBhhH zTQ%{MED9wnb=khLR%f=lONpSSa?h#EyQ;hU;i&tpD(NIFx}WPa#bBW37*S2uvg7ZG z{4(fp61?%0Cb^;2<&R7vlM^QzL8_A>%6zNdJppLuuRwFq_}st1GR_SQzz8Rx!cib}LsG zYxMc1l$MsHg8GS!%$>T3A-kA)KM}m&)q1lC;$n{s5HuyyysSpx&8!sg-q63tbWNbO zu*Dh5|01WL!+=ozZZaQ&m;}1HgbyP^91xpAe%m_$FmmS#&N_dntl?eTWGz-v(G(z3 zSGhnUoHSc!alSvKv%h-fY-W~IR2_7r;KBvM&)$h$S#EaOS2+tCTmWl2E*`HT&>i7- z54yU zc@Q{l@96yEcCyM|-HxAp7$>xNmYQaY5(WFhhp3jp_qF|mXVo%6+?Bj3dL}BxwV0QS zCi<=4kPpO|3nu{Syz^$+VS2-n3zflt9hs!N;pO=>2!{qAairq6;KzNuuKHr6dHhli z^J%{jUPcH;Jbt~j@zLnv3Su+zWN($r=Mwyp&VWaF^ZBiLI!(aCuN)<)Bf>t@NROT* zXZq`ZX`Eb#-!1ijVLR+`Gy2*Cg$2d6&t@_c*}4gyUJ+=BB~9`k@m-~T@+JEXD@yo( z{rI(@jc1!ceu6lT(jEKl&vf0qw0eoApm#u5xGH|h-LwwY=I`j3N2Ke)`o-jcnNEa1 z)_XRdBZ)IBv~XQ_FXr}bj|x28^L7LLyI}T6nzqj*L!%LiD4-APZFg!2F1`PW&)!e{ zU$9g?Q(`?k!k7r8BD7ou&bMn|0wx&^v2&z5^!Aq!Z|9a&H`AElOvD5~i_RwSeH}xB z$mqo05e2#L^#NkX@2SAYm+OH9`_DtAKuAi z=zfITXWyUU*O<_S6I96{z?HHGh@RGm%n_^2sBySAMD6()y=zeOP+kZiII|x^nwwLg zU1Dv0X0<4%ktcMV0p~si>}D!~9{r-=^kIj{ysvHpXI7h(nLhw+$_n!-@ zi}TX#ZFU54o)P>R$l*n|NIh$$H+J(@DMebf{>?da-V;1o4=?^H>OBlkhr*HoTbe4o z3}29|L`{AE62(G^^q@{}2xGRgC4+k4kve3)aR%+)e^NlTmd}bd3qHA!f}Gr}8Wo#- zIG0m|g3+W>N(IWhC?)ZHNfl&T=f8wX6X3n0rO0j4N;umxt1eD-zAv%gEvNrE$J2W^ z#DGhEDHVV#``tjRR6Qp;9z-QVxCOIGrY1xQSgN!Bob6d!_)TguNAxx!%{ZhItr%?d&c%I_=!t8;mZFWvlH$wD?dQAzA=dRT#G&=M>7Canvk@6v zIZYe!IXUo7?%x?nQ}7epu|P_NCcA~kL!y#}5J+}WM=Zv;L%#YbHZlD3|}!9=We z>p7ON?MfB|bBTb*i6%{@Jkt)Z#lW3;Bj<+gVJZ|X(#5Wg;9rWd13HUFNesO$d)Y6f|Bs83E_x#4UtfY)JG@MC zd$awQ#8i$#d6v{}@S9l~=POyby%XiuoG%muSvHV48nL1f%_N zvTO4S4(hC1PKhg?c1eDsS`3oPTQ+Tv{QBblWORl8hqh&4|`or7rxaP1ds(Np6byqeRt#fyoF zR@{RVCE@JY%>KIz8=HjX)fFcjO*9XJ-kY=nD4o1RL;AlQ-<1;C_C>}Cm0r@0$N2pN z?0)i(c!j^F9#;FU!+9il09}=sgOaEqJFhQU9C4Cx>!)95*CG}nqZA*zLm$%5{Lkn< zu4Tb@qTQTMMgN5DOi7}W$P~DPntv~Ti!33yNXr*k*Ru{GYj|1T}SAR41XDa-cfJ2IgbOA-R+6(SF>9DZ2n z$?c8dRu#EPx0Jj)>-(=T*@m!cmlay+4}ddoB4|OHm_&bzlhtKN>!O!9fcu#KNa-8! zoL9DDYs2EE6J8<8*2=%fE06nVRG$tW=N)RlIjo%0$Rx~aiOlSkaszyw)mmgp*%*~z zAZsdPaJ^CQPUCfjxya4LE-qpJ8B7AHM;$4JT3~wU_m0j-bLCfgpWF;@&=}N#(Vqe} zQAaj~-Tjdy%gZaoHiHp)*dcoE-7iI2NKRu!!6Bgx;bKnA43%-^1(LE-i1sKQtyF!W zUlqr*6jb4Y{UyyC=dLguu&7OY;y9JpD&7N5<^_Dh5NX4{GhEBV z7X8;v7BfOZq}@;H-;i!{(ehN57Z`nU3)k`q)P=Wp;91i-{I|U5y(v3NX|pQ!CEh9` zao5T65ghkD35{&!uapBpxZfHxb~X!!Ug39B~;?>Om<<-^;vMi#BY}K?Wdq^tiEup zub#ce*mlvgefoIped8910lt3M_uE{-B$`Ir1ESv3;jk+=;k>Z|V%4k|hBj;y{z%<| z3%HGuQ@6`I7?2f{7@*)#kAt^kL8Q!JZlN(2PJBr^qFR1+YRf+scrkuq92v&Mo^XR?SIZAO zxV8f~MuhyyeCJ`4T}o6K3*(HVoaIUc9PcmEpM(BRbQjbLa~?rE9{iW(z7epnm-3alg zab%+N=`1KUQwz?y67*9ayJbA1x%9NT-0fwp3&zu&CAb4IW!9 zjNeAEgci;E@F@OVoqW|M2h#OrTdW*68M!izP_f9y4`LL98h)Wv zF9(shiAo#WAR~#mH(5A4AoqvbJKZC;0+QUsD(#ho?YE*6X}NIw`W${cIY{%RBybEI zw1nNUDFVxp%YIrUvOA-sg(A7B>Pc}D3w}wd_Uix=A zgHGF4!hZo4Fbjlu#W#q&!o-(|_#T$gnEh@Octi`or3BcaB%8y!Qb1!M$sh=#9ltZH zyQ#G&0I(1q*K>>NgIS0o>T*(4 z2JaS#(0*sH?jTCUGe`{18vT2ggc+-1lTX==e^-*@;@R{rQ^&7r0y=usD*=4r(Eq))xBm+_@&12&B+CMpSDapG2LoNnayxu6 PGT^ Date: Wed, 11 Jun 2025 15:29:11 -0600 Subject: [PATCH 040/158] ST7789 SPI driver - Rename saveDcPin() to savePinModeAlt() --- drivers/display/st7789_spi.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index a675271..a0e49f9 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -348,9 +348,9 @@ def clear(self): # Write the buffer to the display self._write(None, self.buffer) - def saveDcPin(self): + def savePinModeAlt(self, pin): """ - Saves the current `mode` and `alt` of the DC pin so it can be restored + Saves the current `mode` and `alt` of the pin so it can be restored later. Mostly used to restore the SPI mode (MISO) of the DC pin after communication with the display in case another device is using the same SPI bus. @@ -361,12 +361,12 @@ def saveDcPin(self): # There's no way to get the mode and alt of a pin directly, so we # convert the pin to a string and parse it. Example format: # "Pin(GPIO16, mode=ALT, alt=SPI)" - dcStr = str(self.dc) + pinStr = str(pin) # Extract the "mode" parameter from the pin string - if "mode=" in dcStr: + if "mode=" in pinStr: # Split between "mode=" and the next comma or closing parenthesis - modeStr = dcStr.split("mode=")[1].split(",")[0].split(")")[0] + modeStr = pinStr.split("mode=")[1].split(",")[0].split(")")[0] # Look up the mode in Pin class dictionary mode = Pin.__dict__[modeStr] @@ -375,9 +375,9 @@ def saveDcPin(self): mode = None # Extrct the "alt" parameter from the pin string - if "alt=" in dcStr: + if "alt=" in pinStr: # Split between "alt=" and the next comma or closing parenthesis - altStr = dcStr.split("alt=")[1].split(",")[0].split(")")[0] + altStr = pinStr.split("alt=")[1].split(",")[0].split(")")[0] # Look up the alt in Pin class dictionary (with "ALT_" prefix) alt = Pin.__dict__["ALT_" + altStr] @@ -392,10 +392,12 @@ def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" # Save the current mode and alt of the DC pin in case it's used by # another device on the same SPI bus - mode, alt = self.saveDcPin() + dcMode, dcAlt = self.savePinModeAlt(self.dc) + # Temporarily set the DC pin to output mode self.dc.init(mode=Pin.OUT) + # Write to the display if self.cs: self.cs.off() if command is not None: @@ -408,4 +410,4 @@ def _write(self, command=None, data=None): self.cs.on() # Restore the DC pin to its original mode and alt - self.dc.init(mode=mode, alt=alt) + self.dc.init(mode=dcMode, alt=dcAlt) From 7f7590b587d54345ab1eb9dec4d505668fcfab9a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 15:29:42 -0600 Subject: [PATCH 041/158] Add prints to end of Example 1 --- examples/ex01_hello_opencv.py | 4 +++ examples/main.py | 47 +++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 examples/main.py diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 2993cee..18e149e 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -53,4 +53,8 @@ # # Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses # until you hit Enter on your keyboard +print("Press any key to continue") key = cv2.waitKey(0) # Not necessary to display image, can remove if desired + +# Print the key pressed +print("Key pressed:", chr(key)) diff --git a/examples/main.py b/examples/main.py new file mode 100644 index 0000000..df373dd --- /dev/null +++ b/examples/main.py @@ -0,0 +1,47 @@ +import cv2 +from ulab import numpy as np +from st7789_spi import ST7789_SPI +import time + +display = ST7789_SPI(width=240, + height=320, + spi_id=0, + pin_cs=17, + pin_dc=16, + rotation=1) + +x = 0 +y = 0 +vx = 10 +vy = 10 + +while True: + img = np.zeros((240, 320, 3), dtype=np.uint8) + img[0:50, :] = (255, 0, 0) + img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) + img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + cv2.imshow(display, img) + key = cv2.waitKey(1000) + + edge_img = cv2.Canny(img, 100, 200) + cv2.imshow(display, edge_img) + key = cv2.waitKey(2000) + + img = np.zeros((240, 320, 3), dtype=np.uint8) + t0 = time.time() + while time.time() - t0 < 5: + # Update position + x += vx + y += vy + if x <= 0 or x >= 320 - 50: + vx = -vx # Reverse direction on x-axis + if y <= 0 or y >= 240 - 50: + vy = -vy # Reverse direction on y-axis + + # Draw a square + img[y:y+50, x:x+50] = (0, 0, 255) + + cv2.imshow(display, img) + + # Clear the square area for the next frame + img[y:y+50, x:x+50] = (0, 0, 0) \ No newline at end of file From 95996770fd1e012e2475ff0290e62b2e22857793 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 15:37:09 -0600 Subject: [PATCH 042/158] Comment typo and cleanup --- src/core.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/core.cpp b/src/core.cpp index f3a247d..75f2a06 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -44,13 +44,14 @@ bool upyOpenCVBoot() { // Initializes `StdMatAllocator` on the C heap, see: // https://github.com/sparkfun/micropython-opencv/issues/17 + // // Alternatively, we could set the NumpyAllocator as the default Mat - // allocator with Mat::setDefaultAllocator(&GetNumpyAllocator()), + // allocator with `Mat::setDefaultAllocator(&GetNumpyAllocator())`, // however that actually causes some issues. For example, Canny() - // creates an temporary 64-bit float Mat, which is not supported by - // ulab NumPy and therefore fails if we use the NumpyAllocator. The - // StdMatAllocator is fine, because it calls `malloc()`, which we catch - // with `__wrap_malloc()` to ensure the data is allocated on the GC heap + // creates a temporary 64-bit float Mat, which is not supported by ulab + // NumPy and therefore fails with the NumpyAllocator. StdMatAllocator is + // fine to use, because it calls `malloc()`, which we catch with + // `__wrap_malloc()` to ensure the data is allocated on the GC heap Mat::getDefaultAllocator(); return true; From f3d8c116b2e6d069d07e46f65f05775487788b17 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 15:43:36 -0600 Subject: [PATCH 043/158] Add note to Example 2 about large/small images --- examples/ex02_imread_imwrite.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py index 50a431a..bd70e22 100644 --- a/examples/ex02_imread_imwrite.py +++ b/examples/ex02_imread_imwrite.py @@ -11,7 +11,11 @@ # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV img = cv2.imread("test_images/sparkfun_logo.png") -# Show the image for a moment +# Show the image for 1 second +# +# Note - If the image is larger or smaller than the display, the behavior will +# depend on the display driver. For example, the default ST7789 display driver +# will crop large images, and show small images in the top-left corner cv2.imshow(display, img) key = cv2.waitKey(1000) From 8696ea7e0f081226b1659e2f2e840caee4b98892 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 15:50:06 -0600 Subject: [PATCH 044/158] Remove main.py Didn't mean to commit it :) --- examples/main.py | 47 ----------------------------------------------- 1 file changed, 47 deletions(-) delete mode 100644 examples/main.py diff --git a/examples/main.py b/examples/main.py deleted file mode 100644 index df373dd..0000000 --- a/examples/main.py +++ /dev/null @@ -1,47 +0,0 @@ -import cv2 -from ulab import numpy as np -from st7789_spi import ST7789_SPI -import time - -display = ST7789_SPI(width=240, - height=320, - spi_id=0, - pin_cs=17, - pin_dc=16, - rotation=1) - -x = 0 -y = 0 -vx = 10 -vy = 10 - -while True: - img = np.zeros((240, 320, 3), dtype=np.uint8) - img[0:50, :] = (255, 0, 0) - img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) - img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) - cv2.imshow(display, img) - key = cv2.waitKey(1000) - - edge_img = cv2.Canny(img, 100, 200) - cv2.imshow(display, edge_img) - key = cv2.waitKey(2000) - - img = np.zeros((240, 320, 3), dtype=np.uint8) - t0 = time.time() - while time.time() - t0 < 5: - # Update position - x += vx - y += vy - if x <= 0 or x >= 320 - 50: - vx = -vx # Reverse direction on x-axis - if y <= 0 or y >= 240 - 50: - vy = -vy # Reverse direction on y-axis - - # Draw a square - img[y:y+50, x:x+50] = (0, 0, 255) - - cv2.imshow(display, img) - - # Clear the square area for the next frame - img[y:y+50, x:x+50] = (0, 0, 0) \ No newline at end of file From 8b98132659d036a3550b4432a6406a4254b40809 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 11 Jun 2025 16:21:01 -0600 Subject: [PATCH 045/158] Add splash screen support to boot.py --- examples/boot.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/boot.py b/examples/boot.py index 3baf032..7fa8d1d 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -54,7 +54,13 @@ # minimize latency spi.init(baudrate=24_000_000) -# Clear the display to wipe any previous content. This is optional, but it's -# recommended to ensure a clean slate -if hasattr(display, 'clear'): - display.clear() +# Attempt to put something on the display to clear the previous content +try: + # Load and display a splash image, if it's available + import cv2 + splash_image = cv2.imread("splash.png") + cv2.imshow(display, splash_image) +except Exception: + # Clear the display, if the driver supports it + if hasattr(display, 'clear'): + display.clear() From 79fd3a1585e82fa465c8246b47487a505656b9e6 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 12 Jun 2025 17:31:36 -0600 Subject: [PATCH 046/158] Initial HM01B0 PIO implementation Works, but having some sync issues --- drivers/camera/hm01b0_pio.py | 314 +++++++++++++++++++++++++++++++++++ 1 file changed, 314 insertions(+) create mode 100644 drivers/camera/hm01b0_pio.py diff --git a/drivers/camera/hm01b0_pio.py b/drivers/camera/hm01b0_pio.py new file mode 100644 index 0000000..e342c9a --- /dev/null +++ b/drivers/camera/hm01b0_pio.py @@ -0,0 +1,314 @@ +import rp2 +from machine import Pin, I2C +from ulab import numpy as np +from time import sleep_us +import time + +# Derived from: +# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c +class HM01B0_PIO(): + # Derived from: + # https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0_regs.h + + # Read only registers + MODEL_ID_H = 0x0000 + MODEL_ID_L = 0x0001 + FRAME_COUNT = 0x0005 + PIXEL_ORDER = 0x0006 + # Sensor mode control + MODE_SELECT = 0x0100 + IMG_ORIENTATION = 0x0101 + SW_RESET = 0x0103 + GRP_PARAM_HOLD = 0x0104 + # Sensor exposure gain control + INTEGRATION_H = 0x0202 + INTEGRATION_L = 0x0203 + ANALOG_GAIN = 0x0205 + DIGITAL_GAIN_H = 0x020E + DIGITAL_GAIN_L = 0x020F + # Frame timing control + FRAME_LEN_LINES_H = 0x0340 + FRAME_LEN_LINES_L = 0x0341 + LINE_LEN_PCK_H = 0x0342 + LINE_LEN_PCK_L = 0x0343 + # Binning mode control + READOUT_X = 0x0383 + READOUT_Y = 0x0387 + BINNING_MODE = 0x0390 + # Test pattern control + TEST_PATTERN_MODE = 0x0601 + # Black level control + BLC_CFG = 0x1000 + BLC_TGT = 0x1003 + BLI_EN = 0x1006 + BLC2_TGT = 0x1007 + # Sensor reserved + DPC_CTRL = 0x1008 + SINGLE_THR_HOT = 0x100B + SINGLE_THR_COLD = 0x100C + # VSYNC,HSYNC and pixel shift register + VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 + # Automatic exposure gain control + AE_CTRL = 0x2100 + AE_TARGET_MEAN = 0x2101 + AE_MIN_MEAN = 0x2102 + CONVERGE_IN_TH = 0x2103 + CONVERGE_OUT_TH = 0x2104 + MAX_INTG_H = 0x2105 + MAX_INTG_L = 0x2106 + MIN_INTG = 0x2107 + MAX_AGAIN_FULL = 0x2108 + MAX_AGAIN_BIN2 = 0x2109 + MIN_AGAIN = 0x210A + MAX_DGAIN = 0x210B + MIN_DGAIN = 0x210C + DAMPING_FACTOR = 0x210D + FS_CTRL = 0x210E + FS_60HZ_H = 0x210F + FS_60HZ_L = 0x2110 + FS_50HZ_H = 0x2111 + FS_50HZ_L = 0x2112 + FS_HYST_TH = 0x2113 + # Motion detection control + MD_CTRL = 0x2150 + I2C_CLEAR = 0x2153 + WMEAN_DIFF_TH_H = 0x2155 + WMEAN_DIFF_TH_M = 0x2156 + WMEAN_DIFF_TH_L = 0x2157 + MD_THH = 0x2158 + MD_THM1 = 0x2159 + MD_THM2 = 0x215A + MD_THL = 0x215B + STATISTIC_CTRL = 0x2000 + MD_LROI_X_START_H = 0x2011 + MD_LROI_X_START_L = 0x2012 + MD_LROI_Y_START_H = 0x2013 + MD_LROI_Y_START_L = 0x2014 + MD_LROI_X_END_H = 0x2015 + MD_LROI_X_END_L = 0x2016 + MD_LROI_Y_END_H = 0x2017 + MD_LROI_Y_END_L = 0x2018 + MD_INTERRUPT = 0x2160 + # Sensor timing control + QVGA_WIN_EN = 0x3010 + SIX_BIT_MODE_EN = 0x3011 + PMU_AUTOSLEEP_FRAMECNT = 0x3020 + ADVANCE_VSYNC = 0x3022 + ADVANCE_HSYNC = 0x3023 + EARLY_GAIN = 0x3035 + # IO and clock control + BIT_CONTROL = 0x3059 + OSC_CLK_DIV = 0x3060 + ANA_Register_11 = 0x3061 + IO_DRIVE_STR = 0x3062 + IO_DRIVE_STR2 = 0x3063 + ANA_Register_14 = 0x3064 + OUTPUT_PIN_STATUS_CONTROL = 0x3065 + ANA_Register_17 = 0x3067 + PCLK_POLARITY = 0x3068 + + # Useful values of Himax registers + HIMAX_RESET = 0x01 + HIMAX_MODE_STANDBY = 0x00 + HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable + HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames + HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger + # HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) + # HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) + + PCLK_RISING_EDGE = 0x00 + PCLK_FALLING_EDGE = 0x01 + AE_CTRL_ENABLE = 0x00 + AE_CTRL_DISABLE = 0x01 + + HIMAX_BOOT_RETRY = 10 + HIMAX_LINE_LEN_PCK_FULL = 0x178 + HIMAX_FRAME_LENGTH_FULL = 0x109 + + HIMAX_LINE_LEN_PCK_QVGA = 0x178 + HIMAX_FRAME_LENGTH_QVGA = 0x104 + + HIMAX_LINE_LEN_PCK_QQVGA = 0x178 + HIMAX_FRAME_LENGTH_QQVGA = 0x084 + + INIT_COMMANDS = ( + (0x3044, 0x0A), # Increase CDS time for settling + (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg + (0x3047, 0x0A), # Increase CDS time for settling + (0x3050, 0xC0), # Make negative offset up to 4x + (0x3051, 0x42), + (0x3052, 0x50), + (0x3053, 0x00), + (0x3054, 0x03), # tuning sf sig clamping as lowest + (0x3055, 0xF7), # tuning dsun + (0x3056, 0xF8), # increase adc nonoverlap clk + (0x3057, 0x29), # increase adc pwr for missing code + (0x3058, 0x1F), # turn on dsun + (0x3059, 0x1E), + (0x3064, 0x00), + (0x3065, 0x04), # pad pull 0 + (ANA_Register_17, 0x00), # Disable internal oscillator + + (0x1012, 0x00), # Sync. shift disable + + (AE_CTRL, 0x01), #Automatic Exposure + (AE_TARGET_MEAN, 0x80), #AE target mean [Def: 0x3C] + (AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] + (CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] + (CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] + (MAX_INTG_H, (HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] + (MAX_INTG_L, (HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] + (MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] + (MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] + (MAX_DGAIN, 0xC0), + + (INTEGRATION_H, 0x01), #Integration H [Def: 0x01] + (INTEGRATION_L, 0x08), #Integration L [Def: 0x08] + (ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] + (DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] + (DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] + (DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] + + (MD_CTRL, 0x00), + (FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH_QVGA >> 8), + (FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH_QVGA & 0xFF), + (LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK_QVGA >> 8), + (LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK_QVGA & 0xFF), + (QVGA_WIN_EN, 0x01), # Enable QVGA window readout + (0x3059, 0x22), # 1-bit mode + (OSC_CLK_DIV, 0x14), + (IMG_ORIENTATION, 0x00), # change the orientation + (0x0104, 0x01), + (MODE_SELECT, 0x01), # Streaming mode + ) + + def __init__( + self, + i2c, + pin_d0, + pin_vsync, + pin_hsync, + pin_pclk, + sm_id = 0, + i2c_address = 0x24, + ): + self.i2c = i2c + self.pin_d0 = pin_d0 + self.pin_vsync = pin_vsync + self.pin_hsync = pin_hsync + self.pin_pclk = pin_pclk + self.sm_id = sm_id + self.i2c_address = i2c_address + self.buffer = np.zeros((244, 324), dtype=np.uint8) + # self.buffer = bytearray(244 * 324) + + Pin(pin_d0, Pin.IN) + Pin(pin_vsync, Pin.IN) + Pin(pin_hsync, Pin.IN) + Pin(pin_pclk, Pin.IN) + + self.soft_reset() + self.send_init() + self.start_pio_dma() + + def is_connected(self): + try: + # Try to read the chip ID + # If it throws an I/O error - the device isn't connected + id = self.getChipID() + + # Confirm the chip ID is correct + if id == 0x01B0: + return True + else: + return False + except: + return False + + def getChipID(self): + """ + Reads the chip ID from the HM01B0 sensor. + Returns: + int: The chip ID as a 16-bit integer. + """ + data = self.readRegister(self.MODEL_ID_H, 2) + return (data[0] << 8) | data[1] + + def soft_reset(self): + """ + Performs a software reset of the HM01B0 sensor. + This resets the sensor to its default state. + """ + self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + + def send_init(self): + """ + Initializes the HM01B0 sensor with default settings. + This includes setting up exposure, gain, and frame timing. + """ + for reg, value in self.INIT_COMMANDS: + self.writeRegister(reg, value) + sleep_us(1000) + + # Ensure the sensor is in streaming mode + # self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING) + + def readRegister(self, reg, nbytes=1): + self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF])) + return self.i2c.readfrom(self.i2c_address, nbytes) + + def writeRegister(self, reg, data): + if isinstance(data, int): + data = bytes([data]) + elif isinstance(data, (list, tuple)): + data = bytes(data) + self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) + + def start_pio_dma(self): + program = self._pio_read_dvp + program[0][0] |= self.pin_hsync & 0x1F + program[0][1] |= self.pin_pclk & 0x1F + program[0][3] |= self.pin_pclk & 0x1F + self.sm = rp2.StateMachine( + self.sm_id, + program, + in_base = self.pin_d0 + ) + self.sm.active(1) + + self.dma = rp2.DMA() + req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 + dma_ctrl = self.dma.pack_ctrl( + size = 0, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + inc_read = False, + treq_sel = req_num + # irq_quiet = False + ) + self.dma.config( + read = self.sm, + write = self.buffer, + count = 244 * 324, + ctrl = dma_ctrl + ) + + Pin(self.pin_vsync).irq( + trigger = Pin.IRQ_FALLING, + handler = lambda pin: self._vsync_handler() + ) + + def _vsync_handler(self): + self.sm.restart() + self.dma.write = self.buffer + self.dma.active(True) + # print("new frame:", time.ticks_ms()) + + @rp2.asm_pio( + in_shiftdir = rp2.PIO.SHIFT_LEFT, + push_thresh = 8, + autopush = True + ) + def _pio_read_dvp(): + wait(1, gpio, 0) # Mask in HSYNC pin + wait(1, gpio, 0) # Mask in PCLK pin + in_(pins, 1) # Mask in number of pins + wait(0, gpio, 0) # Mask in PCLK pin From 8c1abbdc5d8e9c200b900bc9154f2b7a681c43d3 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 13 Jun 2025 11:14:29 -0600 Subject: [PATCH 047/158] Improve HM01B0 PIO sync Have to disable DMA to reset write address Also, restarting PIO SM doesn't clear RX buffer, have to read out manually Still not perfect. Start of frame seems consistently good, but DMA seems to loose sync or something part way through with a lot of frames --- drivers/camera/hm01b0_pio.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/camera/hm01b0_pio.py b/drivers/camera/hm01b0_pio.py index e342c9a..4c79ac9 100644 --- a/drivers/camera/hm01b0_pio.py +++ b/drivers/camera/hm01b0_pio.py @@ -200,7 +200,6 @@ def __init__( self.sm_id = sm_id self.i2c_address = i2c_address self.buffer = np.zeros((244, 324), dtype=np.uint8) - # self.buffer = bytearray(244 * 324) Pin(pin_d0, Pin.IN) Pin(pin_vsync, Pin.IN) @@ -286,7 +285,6 @@ def start_pio_dma(self): ) self.dma.config( read = self.sm, - write = self.buffer, count = 244 * 324, ctrl = dma_ctrl ) @@ -297,10 +295,18 @@ def start_pio_dma(self): ) def _vsync_handler(self): - self.sm.restart() + # Disable DMA before reconfiguring it + self.dma.active(False) + + # Ensure PIO RX FIFO is empty + while self.sm.rx_fifo() > 0: + self.sm.get() + + # Reset the DMA write address self.dma.write = self.buffer + + # Start the DMA self.dma.active(True) - # print("new frame:", time.ticks_ms()) @rp2.asm_pio( in_shiftdir = rp2.PIO.SHIFT_LEFT, From 2944b43884acc9c4967a9d976b7ece8cd0265ade Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 13 Jun 2025 12:49:52 -0600 Subject: [PATCH 048/158] Add Example 3 - Camera Update HM01B0 with methods `open()`, `read()`, and `release()` Add HM01B0 camera setup to example boot.py --- drivers/camera/hm01b0_pio.py | 35 +++++++++++++++++++++++++++----- examples/boot.py | 23 +++++++++++++++++++++ examples/ex03_camera.py | 39 ++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 5 deletions(-) create mode 100644 examples/ex03_camera.py diff --git a/drivers/camera/hm01b0_pio.py b/drivers/camera/hm01b0_pio.py index 4c79ac9..1f5925a 100644 --- a/drivers/camera/hm01b0_pio.py +++ b/drivers/camera/hm01b0_pio.py @@ -2,7 +2,7 @@ from machine import Pin, I2C from ulab import numpy as np from time import sleep_us -import time +import cv2 # Derived from: # https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c @@ -289,10 +289,35 @@ def start_pio_dma(self): ctrl = dma_ctrl ) - Pin(self.pin_vsync).irq( - trigger = Pin.IRQ_FALLING, - handler = lambda pin: self._vsync_handler() - ) + def active(self, active = None): + if active == None: + return self.sm.active() + + self.sm.active(active) + + if active: + Pin(self.pin_vsync).irq( + trigger = Pin.IRQ_FALLING, + handler = lambda pin: self._vsync_handler() + ) + else: + Pin(self.pin_vsync).irq( + handler = None + ) + + def open(self): + self.active(True) + + def release(self): + self.active(False) + + def read(self): + """ + Reads a frame from the camera. + Returns: + tuple: (success, frame) + """ + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR)) def _vsync_handler(self): # Disable DMA before reconfiguring it diff --git a/examples/boot.py b/examples/boot.py index 7fa8d1d..c38eb3d 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -5,6 +5,10 @@ # this based on your specific board and configuration spi = machine.SPI(0) +# Initialize I2C bus, assuming default pins on bus 0. You may need to adjust +# this based on your specific board and configuration +i2c = machine.I2C(0) + # Initialize display, if available try: # Import a display driver module. This example assumes the ST7789, which is @@ -64,3 +68,22 @@ # Clear the display, if the driver supports it if hasattr(display, 'clear'): display.clear() + +# Initialize camera, if available +try: + # Import a camera driver module. This example assumes the HM01B0, which is + # a popular camera module for embedded systems. This example uses a PIO + # driver, which is a peripheral interface only available on Raspberry Pi RP2 + # processors + import hm01b0_pio + + # Create a camera object. This will depend on the camera driver you are + # using, and you may need to adjust the parameters based on your specific + # camera and board configuration + camera = hm01b0_pio.HM01B0_PIO(i2c, + pin_d0=12, + pin_vsync=13, + pin_hsync=14, + pin_pclk=15) +except ImportError: + print("boot.py - Camera driver module not found, skipping camera initialization.") diff --git a/examples/ex03_camera.py b/examples/ex03_camera.py new file mode 100644 index 0000000..e672107 --- /dev/null +++ b/examples/ex03_camera.py @@ -0,0 +1,39 @@ +# Import OpenCV +import cv2 + +# Open a camera, similar to any other Python environment! In standard OpenCV, +# you would use `cv2.VideoCapture(0)` or similar, and OpenCV would leverage the +# host operating system to open a camera object and return it as a +# `cv2.VideoCapture` object. However, we don't have that luxury in MicroPython, +# so a camera driver is required instead. Any camera driver can be used, as long +# as it implements the same methods as the standard OpenCV `cv2.VideoCapture` +# class, such as `open()`, `read()`, and `release()` +# +# This example assumes a camera driver called `camera` has been initialized by a +# `boot.py` script. See the example `boot.py` script for more details +camera.open() + +# Loop to continuously read frames from the camera and display them +while True: + # Read a frame from the camera, just like any other Python environment! It + # returns a tuple, where the first element is a boolean indicating success, + # and the second element is the frame (NumPy array) read from the camera + success, frame = camera.read() + + # Check if the frame was read successfully + if success == False: + print("Error reading frame from camera") + break + + # Display the frame + cv2.imshow(display, frame) + + # Check for key presses + key = cv2.waitKey(1) + + # If the 'q' key is pressed, exit the loop + if key == ord('q'): + break + +# Release the camera, just like in any other Python environment! +camera.release() From dda0e32688ec74d1d244808e17dc2e2854c48acd Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 12:54:48 -0600 Subject: [PATCH 049/158] Update build.yml --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 581f2e9..2d7bbe9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,6 +13,7 @@ jobs: - name: make run: | sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi + make -C src/opencv PLATFORM=embedded/rp2350 make -C micropython/mpy-cross make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules make BOARD=SPARKFUN_XRP_CONTROLLER From 8ea6bc4c355e2a7f663371b8c085f6f3db62a0f8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:00:05 -0600 Subject: [PATCH 050/158] Update build.yml --- .github/workflows/build.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2d7bbe9..5f15b90 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,10 +10,13 @@ jobs: - uses: actions/checkout@v4 with: submodules: true - - name: make - run: | - sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi - make -C src/opencv PLATFORM=embedded/rp2350 - make -C micropython/mpy-cross - make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules - make BOARD=SPARKFUN_XRP_CONTROLLER + - name: Install packages + run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi + - name: Build OpenCV + run: make -C src/opencv PLATFORM=embedded/rp2350 + - name: Build MPY Cross + run: make -C micropython/mpy-cross + - name: Submodules + run: make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules + - name: Build firmware + run: make BOARD=SPARKFUN_XRP_CONTROLLER From 4d38135d74e9a3817d3b8fe97020481d2dad256e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:04:14 -0600 Subject: [PATCH 051/158] Update build.yml --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5f15b90..f423f18 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,6 +12,8 @@ jobs: submodules: true - name: Install packages run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi + - name: Set Pico SDK path + run: export PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk - name: Build OpenCV run: make -C src/opencv PLATFORM=embedded/rp2350 - name: Build MPY Cross From c1907640b6d27e4e0baf6a8fd3b42dee79049153 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:13:24 -0600 Subject: [PATCH 052/158] Update build.yml --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f423f18..6258228 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,7 +13,7 @@ jobs: - name: Install packages run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi - name: Set Pico SDK path - run: export PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk + run: echo "{PICO_SDK_PATH}={$GITHUB_WORKSPACE/micropython/lib/pico-sdk}" >> "$GITHUB_ENV" - name: Build OpenCV run: make -C src/opencv PLATFORM=embedded/rp2350 - name: Build MPY Cross From 39629a25107dc0c946d1a9f61d1a811d8c2d95b2 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:17:18 -0600 Subject: [PATCH 053/158] Update build.yml --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6258228..64fff02 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,7 +13,7 @@ jobs: - name: Install packages run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi - name: Set Pico SDK path - run: echo "{PICO_SDK_PATH}={$GITHUB_WORKSPACE/micropython/lib/pico-sdk}" >> "$GITHUB_ENV" + run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - name: Build OpenCV run: make -C src/opencv PLATFORM=embedded/rp2350 - name: Build MPY Cross From d5346a3a467c6c3cf6b0bd4a315b8dd16fba68d8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:20:25 -0600 Subject: [PATCH 054/158] Update build.yml --- .github/workflows/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 64fff02..d45aa1c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,13 +12,13 @@ jobs: submodules: true - name: Install packages run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi - - name: Set Pico SDK path - run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - - name: Build OpenCV - run: make -C src/opencv PLATFORM=embedded/rp2350 - name: Build MPY Cross run: make -C micropython/mpy-cross - name: Submodules run: make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules + - name: Set Pico SDK path + run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" + - name: Build OpenCV + run: make -C src/opencv PLATFORM=embedded/rp2350 - name: Build firmware run: make BOARD=SPARKFUN_XRP_CONTROLLER From 7e04ea8ffb375530c9308ffa8164fe255d572a89 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 13:29:51 -0600 Subject: [PATCH 055/158] Update OpenCV Makefile to use -C instead of cd --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index a7bc9ad..f15aee9 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit a7bc9ad59cb0981c7647a7598569baca40e7d593 +Subproject commit f15aee999dfa68c548fa61b87f916579fa403848 From 3471137026a4e5a0fccd998fcaedae8c34e3aebd Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 14:07:36 -0600 Subject: [PATCH 056/158] Update build.yml --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d45aa1c..619d38d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,6 +19,6 @@ jobs: - name: Set Pico SDK path run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - name: Build OpenCV - run: make -C src/opencv PLATFORM=embedded/rp2350 + run: make -C src/opencv PLATFORM=embedded/rp2350 --no-print-directory - name: Build firmware run: make BOARD=SPARKFUN_XRP_CONTROLLER From 72e08e519f1920e436b31f1e79108914ea8578c0 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 14:42:46 -0600 Subject: [PATCH 057/158] Update build.yml --- .github/workflows/build.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 619d38d..3cb2e2a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -11,7 +11,9 @@ jobs: with: submodules: true - name: Install packages - run: sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi + run: | + sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi + sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib - name: Build MPY Cross run: make -C micropython/mpy-cross - name: Submodules From 279b2d5a7ac85a861018cab3a92490e429a034e1 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 14:57:05 -0600 Subject: [PATCH 058/158] Update build.yml --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3cb2e2a..d17089c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,7 +5,7 @@ on: jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 with: From ab3e5a7cbad262e55407502e906cebb03aa3a0de Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 16:11:00 -0600 Subject: [PATCH 059/158] Update OpenCV with fixes for updated compiler --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index f15aee9..154f0e5 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit f15aee999dfa68c548fa61b87f916579fa403848 +Subproject commit 154f0e5446e7bf88c4bcf662ba784760fc86ae0b From 1ba836f71c1f8754cbfe3c006f134903a1d48a32 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 16:41:47 -0600 Subject: [PATCH 060/158] Fix OpenCV uint typedef --- src/opencv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opencv b/src/opencv index 154f0e5..6e15154 160000 --- a/src/opencv +++ b/src/opencv @@ -1 +1 @@ -Subproject commit 154f0e5446e7bf88c4bcf662ba784760fc86ae0b +Subproject commit 6e151546f430efe4d1dfb5c3ea3f2eaaad36e46c From c7b47b6488adec45fccf9c39ad2f67f0a3390068 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 16:56:02 -0600 Subject: [PATCH 061/158] Update build.yml --- .github/workflows/build.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d17089c..45a8a04 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,7 +7,8 @@ jobs: build: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v4 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: true - name: Install packages @@ -24,3 +25,8 @@ jobs: run: make -C src/opencv PLATFORM=embedded/rp2350 --no-print-directory - name: Build firmware run: make BOARD=SPARKFUN_XRP_CONTROLLER + - name: Upload UF2 + uses: actions/upload-artifact@v4 + with: + name: firmware.uf2 + path: micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER/firmware.uf2 From 175b13ee5018f0070899b77f970f4dcecdd21a48 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 16 Jun 2025 16:56:16 -0600 Subject: [PATCH 062/158] Update build.yml --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 45a8a04..03f8f1a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,9 +22,9 @@ jobs: - name: Set Pico SDK path run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - name: Build OpenCV - run: make -C src/opencv PLATFORM=embedded/rp2350 --no-print-directory + run: make -C src/opencv PLATFORM=embedded/rp2350 --no-print-directory -j4 - name: Build firmware - run: make BOARD=SPARKFUN_XRP_CONTROLLER + run: make BOARD=SPARKFUN_XRP_CONTROLLER -j4 - name: Upload UF2 uses: actions/upload-artifact@v4 with: From 1941bce1d92a605b96cc30ce393be2afa19e9392 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 17 Jun 2025 10:53:39 -0600 Subject: [PATCH 063/158] Add OpenCV Makefile and embedded platform definitions Helps minimize dependnecy on OpenCV fork --- .github/workflows/build.yml | 4 +- .gitmodules | 4 +- src/opencv | 1 - src/opencv/Makefile | 16 +++++++ src/opencv/opencv | 1 + src/opencv/platforms/common.cmake | 46 +++++++++++++++++++ .../platforms/include/rp2350_unsafe_cv_xadd.h | 13 ++++++ src/opencv/platforms/rp2350.toolchain.cmake | 17 +++++++ src/opencv_upy.cmake | 2 +- 9 files changed, 98 insertions(+), 6 deletions(-) delete mode 160000 src/opencv create mode 100644 src/opencv/Makefile create mode 160000 src/opencv/opencv create mode 100644 src/opencv/platforms/common.cmake create mode 100644 src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h create mode 100644 src/opencv/platforms/rp2350.toolchain.cmake diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 03f8f1a..5e61602 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,12 +17,12 @@ jobs: sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib - name: Build MPY Cross run: make -C micropython/mpy-cross - - name: Submodules + - name: MicroPython submodules run: make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules - name: Set Pico SDK path run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - name: Build OpenCV - run: make -C src/opencv PLATFORM=embedded/rp2350 --no-print-directory -j4 + run: make -C src/opencv PLATFORM=rp2350 --no-print-directory -j4 - name: Build firmware run: make BOARD=SPARKFUN_XRP_CONTROLLER -j4 - name: Upload UF2 diff --git a/.gitmodules b/.gitmodules index b1327c0..59bc0ec 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,5 +1,5 @@ -[submodule "src/opencv"] - path = src/opencv +[submodule "src/opencv/opencv"] + path = src/opencv/opencv url = https://github.com/sfe-SparkFro/opencv.git [submodule "src/ulab"] path = src/ulab diff --git a/src/opencv b/src/opencv deleted file mode 160000 index 6e15154..0000000 --- a/src/opencv +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6e151546f430efe4d1dfb5c3ea3f2eaaad36e46c diff --git a/src/opencv/Makefile b/src/opencv/Makefile new file mode 100644 index 0000000..c26c20e --- /dev/null +++ b/src/opencv/Makefile @@ -0,0 +1,16 @@ +ifndef PLATFORM +$(error PLATFORM not specified. Use 'make PLATFORM=rp2350' or similar.) +endif + +TOOLCHAIN_FILE = ../../platforms/${PLATFORM}.toolchain.cmake + +# TODO: For some reason, specifying this in the toolchain file doesn't work +CMAKE_ARGS += -DBUILD_LIST=core,imgproc,imgcodecs + +# Generic build +all: + cd opencv && mkdir -p build && cmake -S . -B build -DPICO_BUILD_DOCS=0 -DCMAKE_TOOLCHAIN_FILE=${TOOLCHAIN_FILE} ${CMAKE_ARGS} && make -C build -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) + +# Clean the OpenCV build +clean: + cd opencv && rm -rf build diff --git a/src/opencv/opencv b/src/opencv/opencv new file mode 160000 index 0000000..3dc189e --- /dev/null +++ b/src/opencv/opencv @@ -0,0 +1 @@ +Subproject commit 3dc189e3990d03a66335aa60d11f2e7df7ea01d1 diff --git a/src/opencv/platforms/common.cmake b/src/opencv/platforms/common.cmake new file mode 100644 index 0000000..e9fef23 --- /dev/null +++ b/src/opencv/platforms/common.cmake @@ -0,0 +1,46 @@ +# Derived from: +# https://github.com/joachimBurket/esp32-opencv/blob/master/esp32/doc/detailed_build_procedure.md +set(CMAKE_BUILD_TYPE Release) +set(BUILD_SHARED_LIBS OFF) +set(CV_DISABLE_OPTIMIZATION OFF) +set(WITH_IPP OFF) +set(WITH_TBB OFF) +set(WITH_OPENMP OFF) +set(WITH_PTHREADS_PF OFF) +set(WITH_QUIRC OFF) +set(WITH_1394 OFF) +set(WITH_CUDA OFF) +set(WITH_OPENCL OFF) +set(WITH_OPENCLAMDFFT OFF) +set(WITH_OPENCLAMDBLAS OFF) +set(WITH_VA_INTEL OFF) +set(WITH_EIGEN OFF) +set(WITH_GSTREAMER OFF) +set(WITH_GTK OFF) +set(WITH_JASPER OFF) +set(WITH_JPEG OFF) +set(WITH_OPENJPEG OFF) +set(WITH_WEBP OFF) +set(BUILD_ZLIB ON) +set(BUILD_PNG ON) +set(WITH_TIFF OFF) +set(WITH_V4L OFF) +set(WITH_LAPACK OFF) +set(WITH_ITT OFF) +set(WITH_PROTOBUF OFF) +set(WITH_IMGCODEC_HDR OFF) +set(WITH_IMGCODEC_SUNRASTER OFF) +set(WITH_IMGCODEC_PXM OFF) +set(WITH_IMGCODEC_PFM OFF) +# TODO: For some reason, specifying this in the toolchain file doesn't work +# set(BUILD_LIST core,imgproc,imgcodecs) +set(BUILD_JAVA OFF) +set(BUILD_opencv_python OFF) +set(BUILD_opencv_java OFF) +set(BUILD_opencv_apps OFF) +set(BUILD_PACKAGE OFF) +set(BUILD_PERF_TESTS OFF) +set(BUILD_TESTS OFF) +set(CV_ENABLE_INTRINSICS OFF) +set(CV_TRACE OFF) +set(OPENCV_ENABLE_MEMALIGN OFF) diff --git a/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h b/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h new file mode 100644 index 0000000..43234d7 --- /dev/null +++ b/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h @@ -0,0 +1,13 @@ +#ifndef RP2350_UNSAFE_CV_XADD_H +#define RP2350_UNSAFE_CV_XADD_H + +// Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 +static inline int unsafe_cv_xadd(int* addr, int delta) +{ + int tmp = *addr; + *addr += delta; + return tmp; +} +#define CV_XADD(addr, delta) unsafe_cv_xadd(addr, delta) + +#endif \ No newline at end of file diff --git a/src/opencv/platforms/rp2350.toolchain.cmake b/src/opencv/platforms/rp2350.toolchain.cmake new file mode 100644 index 0000000..1c420b4 --- /dev/null +++ b/src/opencv/platforms/rp2350.toolchain.cmake @@ -0,0 +1,17 @@ +# Define PICO_SDK_PATH in your environment before running this script +if(NOT DEFINED ENV{PICO_SDK_PATH}) + message(FATAL_ERROR "PICO_SDK_PATH environment variable is not set. Please define it before running this script.") +endif() + +# Include the RP2350 toolchain file +include("$ENV{PICO_SDK_PATH}/cmake/preload/toolchains/pico_arm_cortex_m33_gcc.cmake") + +# Include the common embedded OpenCV settings +include("${CMAKE_CURRENT_LIST_DIR}/common.cmake") + +# Set RP2350 specific settings +set(OPENCV_DISABLE_THREAD_SUPPORT ON) + +# Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 +set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") +set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index 0915d98..c06537c 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -39,7 +39,7 @@ target_compile_definitions(usermod INTERFACE ULAB_MAX_DIMS=4) include(${CMAKE_CURRENT_LIST_DIR}/ulab/code/micropython.cmake) # Include OpenCV -set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/opencv/build) +set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/opencv/opencv/build) find_package(OpenCV REQUIRED) target_include_directories(usermod INTERFACE ${OpenCV_INCLUDE_DIRS}) target_link_libraries(usermod INTERFACE ${OpenCV_LIBS}) From ce8c444653d0647e593f38fe1cd1802e1e7bb411 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 17 Jun 2025 11:00:19 -0600 Subject: [PATCH 064/158] Set workflow to build on push to development and PR to main --- .github/workflows/build.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5e61602..7295bb0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,6 +1,12 @@ name: Build firmware on: + pull: + branches: + - main + push: + branches: + - features_for_launch workflow_dispatch: jobs: From 7bdeb998c9e5ccebc0a0663b30ca98eef2bbbc4a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 17 Jun 2025 11:01:12 -0600 Subject: [PATCH 065/158] Fix typo in workflow --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7295bb0..3c2de1c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,7 +1,7 @@ name: Build firmware on: - pull: + pull_request: branches: - main push: From c02bd1ff68b0a7c83f9416a14afbb6ba5fcc551c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 17 Jun 2025 14:16:31 -0600 Subject: [PATCH 066/158] HM01B0 PIO driver: restart state machine in vsync handler Helps #19, but does not fully fix it If a desync occurs, the state machine's ISR would have a few extra bits in it, caused all subsequent frames to be corrupted Restarting the state machine clears the ISR --- drivers/camera/hm01b0_pio.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/camera/hm01b0_pio.py b/drivers/camera/hm01b0_pio.py index 1f5925a..edc9abf 100644 --- a/drivers/camera/hm01b0_pio.py +++ b/drivers/camera/hm01b0_pio.py @@ -240,6 +240,20 @@ def soft_reset(self): """ self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + def setMode(self, mode): + """ + Sets the operating mode of the HM01B0 sensor. + Args: + mode (int): The mode to set, e.g., MODE_STREAMING. + """ + self.writeRegister(self.MODE_SELECT, mode) + + def trigger(self): + self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) + + def set_n_frames(self, n_frames): + self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) + def send_init(self): """ Initializes the HM01B0 sensor with default settings. @@ -248,9 +262,6 @@ def send_init(self): for reg, value in self.INIT_COMMANDS: self.writeRegister(reg, value) sleep_us(1000) - - # Ensure the sensor is in streaming mode - # self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING) def readRegister(self, reg, nbytes=1): self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF])) @@ -323,7 +334,10 @@ def _vsync_handler(self): # Disable DMA before reconfiguring it self.dma.active(False) - # Ensure PIO RX FIFO is empty + # Reset state machine to ensure ISR is cleared + self.sm.restart() + + # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) while self.sm.rx_fifo() > 0: self.sm.get() From f99b1c8819aef758d5178c566b04d222e8ebfa9a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 17 Jun 2025 16:38:07 -0600 Subject: [PATCH 067/158] Change HM01B0 PIO driver to use 32-bit DMA transfers Fixes #19 --- drivers/camera/hm01b0_pio.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/camera/hm01b0_pio.py b/drivers/camera/hm01b0_pio.py index edc9abf..245bc91 100644 --- a/drivers/camera/hm01b0_pio.py +++ b/drivers/camera/hm01b0_pio.py @@ -289,14 +289,15 @@ def start_pio_dma(self): self.dma = rp2.DMA() req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 dma_ctrl = self.dma.pack_ctrl( - size = 0, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit inc_read = False, - treq_sel = req_num + treq_sel = req_num, + bswap = True # irq_quiet = False ) self.dma.config( read = self.sm, - count = 244 * 324, + count = 244 * 324 // 4, ctrl = dma_ctrl ) @@ -340,7 +341,7 @@ def _vsync_handler(self): # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) while self.sm.rx_fifo() > 0: self.sm.get() - + # Reset the DMA write address self.dma.write = self.buffer @@ -349,7 +350,7 @@ def _vsync_handler(self): @rp2.asm_pio( in_shiftdir = rp2.PIO.SHIFT_LEFT, - push_thresh = 8, + push_thresh = 32, autopush = True ) def _pio_read_dvp(): From 270f2aec8918b4deafc173d9cfd191eec57f0a3a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 18 Jun 2025 09:52:38 -0600 Subject: [PATCH 068/158] Update comment in savePinModeAlt() --- drivers/display/st7789_spi.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py index a0e49f9..7640a1b 100644 --- a/drivers/display/st7789_spi.py +++ b/drivers/display/st7789_spi.py @@ -358,8 +358,10 @@ def savePinModeAlt(self, pin): Returns: tuple: (mode, alt) """ + # See: https://github.com/micropython/micropython/issues/17515 # There's no way to get the mode and alt of a pin directly, so we - # convert the pin to a string and parse it. Example format: + # convert the pin to a string and parse it. Example formats: + # "Pin(GPIO16, mode=OUT)" # "Pin(GPIO16, mode=ALT, alt=SPI)" pinStr = str(pin) From 3b782a14b9435b4c16c05ead8a920d7e3d14423d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 19 Jun 2025 09:27:06 -0600 Subject: [PATCH 069/158] Restructure drivers Now includes manifest files to freeze all drivers into firmware Display drivers broken up into multiple classes New ST7789 PIO driver added (not working yet) --- Makefile | 3 + cv2_drivers/__init__.py | 2 + cv2_drivers/cameras/__init__.py | 1 + .../cameras}/hm01b0_pio.py | 4 +- cv2_drivers/displays/__init__.py | 4 + cv2_drivers/displays/cv2_display.py | 137 ++++++ cv2_drivers/displays/st7789.py | 257 +++++++++++ cv2_drivers/displays/st7789_pio.py | 123 ++++++ cv2_drivers/displays/st7789_spi.py | 67 +++ cv2_drivers/manifest.py | 9 + drivers/display/st7789_spi.py | 415 ------------------ examples/boot.py | 4 +- 12 files changed, 606 insertions(+), 420 deletions(-) create mode 100644 cv2_drivers/__init__.py create mode 100644 cv2_drivers/cameras/__init__.py rename {drivers/camera => cv2_drivers/cameras}/hm01b0_pio.py (98%) create mode 100644 cv2_drivers/displays/__init__.py create mode 100644 cv2_drivers/displays/cv2_display.py create mode 100644 cv2_drivers/displays/st7789.py create mode 100644 cv2_drivers/displays/st7789_pio.py create mode 100644 cv2_drivers/displays/st7789_spi.py create mode 100644 cv2_drivers/manifest.py delete mode 100644 drivers/display/st7789_spi.py diff --git a/Makefile b/Makefile index 8cc8f78..e722085 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,9 @@ CURRENT_DIR = $(shell pwd) # Set the MicroPython user C module path to the OpenCV module MAKE_ARGS = USER_C_MODULES="$(CURRENT_DIR)/src/opencv_upy.cmake" +# Use the OpenCV driver manifest +MAKE_ARGS += FROZEN_MANIFEST="$(CURRENT_DIR)/cv2_drivers/manifest.py" + # Build MicroPython with the OpenCV module all: @cd micropython/ports/rp2 && export CMAKE_ARGS="$(CMAKE_ARGS)" && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) diff --git a/cv2_drivers/__init__.py b/cv2_drivers/__init__.py new file mode 100644 index 0000000..b7341bf --- /dev/null +++ b/cv2_drivers/__init__.py @@ -0,0 +1,2 @@ +from . import displays +from . import cameras \ No newline at end of file diff --git a/cv2_drivers/cameras/__init__.py b/cv2_drivers/cameras/__init__.py new file mode 100644 index 0000000..293266b --- /dev/null +++ b/cv2_drivers/cameras/__init__.py @@ -0,0 +1 @@ +from . import hm01b0_pio \ No newline at end of file diff --git a/drivers/camera/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py similarity index 98% rename from drivers/camera/hm01b0_pio.py rename to cv2_drivers/cameras/hm01b0_pio.py index 245bc91..83d40d9 100644 --- a/drivers/camera/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -1,5 +1,5 @@ import rp2 -from machine import Pin, I2C +from machine import Pin from ulab import numpy as np from time import sleep_us import cv2 @@ -7,8 +7,6 @@ # Derived from: # https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c class HM01B0_PIO(): - # Derived from: - # https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0_regs.h # Read only registers MODEL_ID_H = 0x0000 diff --git a/cv2_drivers/displays/__init__.py b/cv2_drivers/displays/__init__.py new file mode 100644 index 0000000..8710847 --- /dev/null +++ b/cv2_drivers/displays/__init__.py @@ -0,0 +1,4 @@ +from . import cv2_display +from . import st7789 +from . import st7789_spi +from . import st7789_pio \ No newline at end of file diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py new file mode 100644 index 0000000..78e445a --- /dev/null +++ b/cv2_drivers/displays/cv2_display.py @@ -0,0 +1,137 @@ +import cv2 +from ulab import numpy as np +from machine import Pin + +class CV2_Display(): + def __init__(self, buffer_size): + # Create the frame buffer + self.buffer = np.zeros(buffer_size, dtype=np.uint8) + + def _get_common_roi_with_buffer(self, image): + """ + Get the common region of interest (ROI) between the image and the + display's internal buffer. + + Args: + image (ndarray): Image to display + + Returns: + tuple: (image_roi, buffer_roi) + """ + # Ensure image is a NumPy ndarray + if type(image) is not np.ndarray: + raise TypeError("Image must be a NumPy ndarray") + + # Determing number of rows and columns in the image + image_rows = image.shape[0] + if image.ndim < 2: + image_cols = 1 + else: + image_cols = image.shape[1] + + # Get the common ROI between the image and the buffer + row_max = min(image_rows, self.height) + col_max = min(image_cols, self.width) + img_roi = image[:row_max, :col_max] + buffer_roi = self.buffer[:row_max, :col_max] + return img_roi, buffer_roi + + def _convert_image_to_uint8(self, image): + """ + Convert the image to uint8 format if necessary. + + Args: + image (ndarray): Image to convert + + Returns: + Image: Converted image + """ + # Check if the image is already in uint8 format + if image.dtype is np.uint8: + return image + + # Convert to uint8 format. This unfortunately requires creating a new + # buffer for the converted image, which takes more memory + if image.dtype == np.int8: + return cv2.convertScaleAbs(image, alpha=1, beta=127) + elif image.dtype == np.int16: + return cv2.convertScaleAbs(image, alpha=1/255, beta=127) + elif image.dtype == np.uint16: + return cv2.convertScaleAbs(image, alpha=1/255) + elif image.dtype == np.float: + # This implementation creates an additional buffer from np.clip() + # TODO: Find another solution that avoids an additional buffer + return cv2.convertScaleAbs(np.clip(image, 0, 1), alpha=255) + else: + raise ValueError(f"Unsupported image dtype: {image.dtype}") + + def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): + """ + Convert the image ROI to BGR565 format and write it to the buffer ROI. + + Args: + image_roi (ndarray): Image region of interest + buffer_roi (ndarray): Buffer region of interest + """ + # Determine the number of channels in the image + if image_roi.ndim < 3: + ch = 1 + else: + ch = image_roi.shape[2] + + if ch == 1: # Grayscale + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) + elif ch == 2: # Already in BGR565 format + buffer_roi[:] = image_roi + elif ch == 3: # BGR + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) + else: + raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") + + def savePinModeAlt(self, pin): + """ + Saves the current `mode` and `alt` of the pin so it can be restored + later. Mostly used to restore the SPI mode (MISO) of the DC pin after + communication with the display in case another device is using the same + SPI bus. + + Returns: + tuple: (mode, alt) + """ + # See: https://github.com/micropython/micropython/issues/17515 + # There's no way to get the mode and alt of a pin directly, so we + # convert the pin to a string and parse it. Example formats: + # "Pin(GPIO16, mode=OUT)" + # "Pin(GPIO16, mode=ALT, alt=SPI)" + pinStr = str(pin) + + # Extract the "mode" parameter from the pin string + if "mode=" in pinStr: + # Split between "mode=" and the next comma or closing parenthesis + modeStr = pinStr.split("mode=")[1].split(",")[0].split(")")[0] + + # Look up the mode in Pin class dictionary + mode = Pin.__dict__[modeStr] + else: + # No mode specified, just set to None + mode = None + + # Extrct the "alt" parameter from the pin string + if "alt=" in pinStr: + # Split between "alt=" and the next comma or closing parenthesis + altStr = pinStr.split("alt=")[1].split(",")[0].split(")")[0] + + # Sometimes the value comes back as a number instead of a valid + # "ALT_xyz" string, so we need to check it + if "ALT_" + altStr in Pin.__dict__: + # Look up the alt in Pin class dictionary (with "ALT_" prefix) + alt = Pin.__dict__["ALT_" + altStr] + else: + # Convert the altStr to an integer + alt = int(altStr) + else: + # No alt specified, just set to None + alt = None + + # Return the mode and alt as a tuple + return (mode, alt) diff --git a/cv2_drivers/displays/st7789.py b/cv2_drivers/displays/st7789.py new file mode 100644 index 0000000..ec28b41 --- /dev/null +++ b/cv2_drivers/displays/st7789.py @@ -0,0 +1,257 @@ +from .cv2_display import CV2_Display +from time import sleep_ms +import struct + +# Derived from: +# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py +class ST7789(CV2_Display): + """ + OpenCV driver for ST7789 displays + + Args: + width (int): display width **Required** + height (int): display height **Required** + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words + """ + + # ST7789 commands + _ST7789_SWRESET = b"\x01" + _ST7789_SLPIN = b"\x10" + _ST7789_SLPOUT = b"\x11" + _ST7789_NORON = b"\x13" + _ST7789_INVOFF = b"\x20" + _ST7789_INVON = b"\x21" + _ST7789_DISPOFF = b"\x28" + _ST7789_DISPON = b"\x29" + _ST7789_CASET = b"\x2a" + _ST7789_RASET = b"\x2b" + _ST7789_RAMWR = b"\x2c" + _ST7789_VSCRDEF = b"\x33" + _ST7789_COLMOD = b"\x3a" + _ST7789_MADCTL = b"\x36" + _ST7789_VSCSAD = b"\x37" + _ST7789_RAMCTL = b"\xb0" + + # MADCTL bits + _ST7789_MADCTL_MY = 0x80 + _ST7789_MADCTL_MX = 0x40 + _ST7789_MADCTL_MV = 0x20 + _ST7789_MADCTL_ML = 0x10 + _ST7789_MADCTL_BGR = 0x08 + _ST7789_MADCTL_MH = 0x04 + _ST7789_MADCTL_RGB = 0x00 + + RGB = 0x00 + BGR = 0x08 + + # 8 basic color definitions + BLACK = 0x0000 + BLUE = 0x001F + RED = 0xF800 + GREEN = 0x07E0 + CYAN = 0x07FF + MAGENTA = 0xF81F + YELLOW = 0xFFE0 + WHITE = 0xFFFF + + _ENCODE_POS = ">HH" + + # Rotation tables + # (madctl, width, height, xstart, ystart)[rotation % 4] + + _DISPLAY_240x320 = ( + (0x00, 240, 320, 0, 0), + (0x60, 320, 240, 0, 0), + (0xc0, 240, 320, 0, 0), + (0xa0, 320, 240, 0, 0)) + + _DISPLAY_170x320 = ( + (0x00, 170, 320, 35, 0), + (0x60, 320, 170, 0, 35), + (0xc0, 170, 320, 35, 0), + (0xa0, 320, 170, 0, 35)) + + _DISPLAY_240x240 = ( + (0x00, 240, 240, 0, 0), + (0x60, 240, 240, 0, 0), + (0xc0, 240, 240, 0, 80), + (0xa0, 240, 240, 80, 0)) + + _DISPLAY_135x240 = ( + (0x00, 135, 240, 52, 40), + (0x60, 240, 135, 40, 53), + (0xc0, 135, 240, 53, 40), + (0xa0, 240, 135, 40, 52)) + + _DISPLAY_128x128 = ( + (0x00, 128, 128, 2, 1), + (0x60, 128, 128, 1, 2), + (0xc0, 128, 128, 2, 1), + (0xa0, 128, 128, 1, 2)) + + # Supported displays (physical width, physical height, rotation table) + _SUPPORTED_DISPLAYS = ( + (240, 320, _DISPLAY_240x320), + (170, 320, _DISPLAY_170x320), + (240, 240, _DISPLAY_240x240), + (135, 240, _DISPLAY_135x240), + (128, 128, _DISPLAY_128x128)) + + # init tuple format (b'command', b'data', delay_ms) + _ST7789_INIT_CMDS = ( + ( b'\x11', b'\x00', 120), # Exit sleep mode + ( b'\x13', b'\x00', 0), # Turn on the display + ( b'\xb6', b'\x0a\x82', 0), # Set display function control + ( b'\x3a', b'\x55', 10), # Set pixel format to 16 bits per pixel (RGB565) + ( b'\xb2', b'\x0c\x0c\x00\x33\x33', 0), # Set porch control + ( b'\xb7', b'\x35', 0), # Set gate control + ( b'\xbb', b'\x28', 0), # Set VCOMS setting + ( b'\xc0', b'\x0c', 0), # Set power control 1 + ( b'\xc2', b'\x01\xff', 0), # Set power control 2 + ( b'\xc3', b'\x10', 0), # Set power control 3 + ( b'\xc4', b'\x20', 0), # Set power control 4 + ( b'\xc6', b'\x0f', 0), # Set VCOM control 1 + ( b'\xd0', b'\xa4\xa1', 0), # Set power control A + # Set gamma curve positive polarity + ( b'\xe0', b'\xd0\x00\x02\x07\x0a\x28\x32\x44\x42\x06\x0e\x12\x14\x17', 0), + # Set gamma curve negative polarity + ( b'\xe1', b'\xd0\x00\x02\x07\x0a\x28\x31\x54\x47\x0e\x1c\x17\x1b\x1e', 0), + ( b'\x21', b'\x00', 0), # Enable display inversion + ( b'\x29', b'\x00', 120) # Turn on the display + ) + + def __init__( + self, + width, + height, + rotation=0, + color_order=BGR, + reverse_bytes_in_word=True, + ): + # Initial dimensions and offsets; will be overridden when rotation applied + self.width = width + self.height = height + self.xstart = 0 + self.ystart = 0 + # Check display is known and get rotation table + self.rotations = self._find_rotations(width, height) + if not self.rotations: + supported_displays = ", ".join( + [f"{display[0]}x{display[1]}" for display in self._SUPPORTED_DISPLAYS]) + raise ValueError( + f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") + # Colors + self.color_order = color_order + self.needs_swap = reverse_bytes_in_word + # Reset the display + self.soft_reset() + # Yes, send init twice, once is not always enough + self.send_init(self._ST7789_INIT_CMDS) + self.send_init(self._ST7789_INIT_CMDS) + # Initial rotation + self._rotation = rotation % 4 + # Apply rotation + self.rotation(self._rotation) + # Create the framebuffer for the correct rotation + super().__init__((self.height, self.width, 2)) + + def send_init(self, commands): + """ + Send initialisation commands to display. + """ + for command, data, delay in commands: + print(command, data, delay) + self._write(command, data) + sleep_ms(delay) + + def soft_reset(self): + """ + Soft reset display. + """ + self._write(self._ST7789_SWRESET) + sleep_ms(150) + + def _find_rotations(self, width, height): + """ Find the correct rotation for our display or return None """ + for display in self._SUPPORTED_DISPLAYS: + if display[0] == width and display[1] == height: + return display[2] + return None + + def rotation(self, rotation): + """ + Set display rotation. + + Args: + rotation (int): + - 0-Portrait + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + """ + if ((rotation % 2) != (self._rotation % 2)) and (self.width != self.height): + # non-square displays can currently only be rotated by 180 degrees + # TODO: can framebuffer of super class be destroyed and re-created + # to match the new dimensions? or it's width/height changed? + return + + # find rotation parameters and send command + rotation %= len(self.rotations) + ( madctl, + self.width, + self.height, + self.xstart, + self.ystart, ) = self.rotations[rotation] + if self.color_order == self.BGR: + madctl |= self._ST7789_MADCTL_BGR + else: + madctl &= ~self._ST7789_MADCTL_BGR + self._write(self._ST7789_MADCTL, bytes([madctl])) + # Set window for writing into + self._write(self._ST7789_CASET, + struct.pack(self._ENCODE_POS, self.xstart, self.width + self.xstart - 1)) + self._write(self._ST7789_RASET, + struct.pack(self._ENCODE_POS, self.ystart, self.height + self.ystart - 1)) + self._write(self._ST7789_RAMWR) + # TODO: Can we swap (modify) framebuffer width/height in the super() class? + self._rotation = rotation + + def imshow(self, image): + """ + Display a NumPy image on the screen. + + Args: + image (ndarray): Image to display + """ + # Get the common ROI between the image and internal display buffer + image_roi, buffer_roi = self._get_common_roi_with_buffer(image) + + # Ensure the image is in uint8 format + image_roi = self._convert_image_to_uint8(image_roi) + + # Convert the image to BGR565 format and write it to the buffer + self._write_image_to_buffer_bgr565(image_roi, buffer_roi) + + # Write buffer to display. Swap bytes if needed + if self.needs_swap: + self._write(None, self.buffer[:, :, ::-1]) + else: + self._write(None, self.buffer) + + def clear(self): + """ + Clear the display by filling it with black color. + """ + # Clear the buffer by filling it with zeros (black) + self.buffer[:] = 0 + # Write the buffer to the display + self._write(None, self.buffer) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py new file mode 100644 index 0000000..b95111d --- /dev/null +++ b/cv2_drivers/displays/st7789_pio.py @@ -0,0 +1,123 @@ +from .st7789 import ST7789 +from machine import Pin +import rp2 + +# Derived from: +# https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd +class ST7789_PIO(ST7789): + """ + OpenCV PIO driver for ST7789 displays + + Args: + width (int): display width **Required** + height (int): display height **Required** + sm_id (int): State Machine ID for PIO **Required** + pin_clk (Pin): Clock pin number **Required** + pin_tx (Pin): Transmit pin number **Required** + pin_dc (Pin): Data/Command pin number **Required** + pin_cs (Pin): Chip Select pin number + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words + """ + def __init__( + self, + width, + height, + sm_id, + pin_clk, + pin_tx, + pin_dc, + pin_cs=None, + rotation=0, + color_order=ST7789.BGR, + reverse_bytes_in_word=True, + ): + # Store PIO arguments + self.sm_id = sm_id + self.pin_clk = pin_clk + self.pin_tx = pin_tx + # self.pin_dc = pin_dc + # self.pin_cs = pin_cs + + self.clk = Pin(pin_clk, Pin.OUT) # Don't change mode/alt + self.tx = Pin(pin_tx, Pin.OUT) # Don't change mode/alt + self.clk = Pin(pin_clk, Pin.ALT, alt=Pin.ALT_PIO0) # Don't change mode/alt + self.tx = Pin(pin_tx, Pin.ALT, alt=Pin.ALT_PIO0) # Don't change mode/alt + self.dc = Pin(pin_dc, Pin.OUT) # Don't change mode/alt + self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + + program = self._pio_write_spi + # program[0][0]=0x6001 + # program[0][4]=0xb042 + print(program) + + self.sm = rp2.StateMachine( + self.sm_id, + program, + out_base = self.pin_tx, + sideset_base = self.pin_clk, + # out_shiftdir = rp2.PIO.SHIFT_LEFT, + ) + + super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) + + def _write(self, command=None, data=None): + """SPI write to the device: commands and data.""" + # Save the current mode and alt of the DC pin in case it's used by + # another device on the same SPI bus + # dcMode, dcAlt = self.savePinModeAlt(self.dc) + + # Temporarily set the DC pin to output mode + self.dc.init(mode=Pin.OUT) + + # Write to the display + if self.cs: + self.cs.off() + if command is not None: + self.dc.off() + self._pio_write(command) + if data is not None: + self.dc.on() + self._pio_write(data) + if self.cs: + self.cs.on() + + # Restore the DC pin to its original mode and alt + # self.dc.init(mode=dcMode, alt=dcAlt) + + def _pio_write(self, data): + """Write data to the display using PIO.""" + # Start the state machine + self.sm.active(1) + + # Write data to the state machine + self.sm.put(data) + + # Stop the state machine + self.sm.active(0) + + @rp2.asm_pio( + # fifo_join = rp2.PIO.JOIN_TX, + out_init = rp2.PIO.OUT_LOW, + sideset_init = rp2.PIO.OUT_LOW, + out_shiftdir = rp2.PIO.SHIFT_LEFT, + autopull = True, + pull_thresh = 8, + ) + def _pio_write_spi(): + out(pins, 1).side(0) + nop() + nop() + nop() + nop().side(1) + nop() + nop() + nop() diff --git a/cv2_drivers/displays/st7789_spi.py b/cv2_drivers/displays/st7789_spi.py new file mode 100644 index 0000000..11b64d3 --- /dev/null +++ b/cv2_drivers/displays/st7789_spi.py @@ -0,0 +1,67 @@ +from .st7789 import ST7789 +from machine import Pin + +# Derived from: +# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py +class ST7789_SPI(ST7789): + """ + OpenCV SPI driver for ST7789 displays + + Args: + width (int): display width **Required** + height (int): display height **Required** + spi (SPI): SPI bus **Required** + pin_dc (Pin): Data/Command pin number **Required** + pin_cs (Pin): Chip Select pin number + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words + """ + def __init__( + self, + width, + height, + spi, + pin_dc, + pin_cs=None, + rotation=0, + color_order=ST7789.BGR, + reverse_bytes_in_word=True, + ): + # Store SPI arguments + self.spi = spi + self.dc = Pin(pin_dc) # Don't change mode/alt + self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + + super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) + + def _write(self, command=None, data=None): + """SPI write to the device: commands and data.""" + # Save the current mode and alt of the DC pin in case it's used by + # another device on the same SPI bus + dcMode, dcAlt = self.savePinModeAlt(self.dc) + + # Temporarily set the DC pin to output mode + self.dc.init(mode=Pin.OUT) + + # Write to the display + if self.cs: + self.cs.off() + if command is not None: + self.dc.off() + self.spi.write(command) + if data is not None: + self.dc.on() + self.spi.write(data) + if self.cs: + self.cs.on() + + # Restore the DC pin to its original mode and alt + self.dc.init(mode=dcMode, alt=dcAlt) diff --git a/cv2_drivers/manifest.py b/cv2_drivers/manifest.py new file mode 100644 index 0000000..c556dbf --- /dev/null +++ b/cv2_drivers/manifest.py @@ -0,0 +1,9 @@ +# The manifest gets overwritten by the Makefile, so re-include the board's +# original manifest file +include("$(BOARD_DIR)/manifest.py") + +# Include this directory as one package +package("cv2_drivers", base_path="..") + +# Include the SD card module +require("sdcard") diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py deleted file mode 100644 index 7640a1b..0000000 --- a/drivers/display/st7789_spi.py +++ /dev/null @@ -1,415 +0,0 @@ -# Modified from: -# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py - -import struct -from time import sleep_ms -from machine import Pin, SPI -from ulab import numpy as np -import cv2 - -# ST7789 commands -_ST7789_SWRESET = b"\x01" -_ST7789_SLPIN = b"\x10" -_ST7789_SLPOUT = b"\x11" -_ST7789_NORON = b"\x13" -_ST7789_INVOFF = b"\x20" -_ST7789_INVON = b"\x21" -_ST7789_DISPOFF = b"\x28" -_ST7789_DISPON = b"\x29" -_ST7789_CASET = b"\x2a" -_ST7789_RASET = b"\x2b" -_ST7789_RAMWR = b"\x2c" -_ST7789_VSCRDEF = b"\x33" -_ST7789_COLMOD = b"\x3a" -_ST7789_MADCTL = b"\x36" -_ST7789_VSCSAD = b"\x37" -_ST7789_RAMCTL = b"\xb0" - -# MADCTL bits -_ST7789_MADCTL_MY = const(0x80) -_ST7789_MADCTL_MX = const(0x40) -_ST7789_MADCTL_MV = const(0x20) -_ST7789_MADCTL_ML = const(0x10) -_ST7789_MADCTL_BGR = const(0x08) -_ST7789_MADCTL_MH = const(0x04) -_ST7789_MADCTL_RGB = const(0x00) - -RGB = 0x00 -BGR = 0x08 - -# 8 basic color definitions -BLACK = const(0x0000) -BLUE = const(0x001F) -RED = const(0xF800) -GREEN = const(0x07E0) -CYAN = const(0x07FF) -MAGENTA = const(0xF81F) -YELLOW = const(0xFFE0) -WHITE = const(0xFFFF) - -_ENCODE_POS = const(">HH") - -# Rotation tables -# (madctl, width, height, xstart, ystart)[rotation % 4] - -_DISPLAY_240x320 = ( - (0x00, 240, 320, 0, 0), - (0x60, 320, 240, 0, 0), - (0xc0, 240, 320, 0, 0), - (0xa0, 320, 240, 0, 0)) - -_DISPLAY_170x320 = ( - (0x00, 170, 320, 35, 0), - (0x60, 320, 170, 0, 35), - (0xc0, 170, 320, 35, 0), - (0xa0, 320, 170, 0, 35)) - -_DISPLAY_240x240 = ( - (0x00, 240, 240, 0, 0), - (0x60, 240, 240, 0, 0), - (0xc0, 240, 240, 0, 80), - (0xa0, 240, 240, 80, 0)) - -_DISPLAY_135x240 = ( - (0x00, 135, 240, 52, 40), - (0x60, 240, 135, 40, 53), - (0xc0, 135, 240, 53, 40), - (0xa0, 240, 135, 40, 52)) - -_DISPLAY_128x128 = ( - (0x00, 128, 128, 2, 1), - (0x60, 128, 128, 1, 2), - (0xc0, 128, 128, 2, 1), - (0xa0, 128, 128, 1, 2)) - -# Supported displays (physical width, physical height, rotation table) -_SUPPORTED_DISPLAYS = ( - (240, 320, _DISPLAY_240x320), - (170, 320, _DISPLAY_170x320), - (240, 240, _DISPLAY_240x240), - (135, 240, _DISPLAY_135x240), - (128, 128, _DISPLAY_128x128)) - -# init tuple format (b'command', b'data', delay_ms) -_ST7789_INIT_CMDS = ( - ( b'\x11', b'\x00', 120), # Exit sleep mode - ( b'\x13', b'\x00', 0), # Turn on the display - ( b'\xb6', b'\x0a\x82', 0), # Set display function control - ( b'\x3a', b'\x55', 10), # Set pixel format to 16 bits per pixel (RGB565) - ( b'\xb2', b'\x0c\x0c\x00\x33\x33', 0), # Set porch control - ( b'\xb7', b'\x35', 0), # Set gate control - ( b'\xbb', b'\x28', 0), # Set VCOMS setting - ( b'\xc0', b'\x0c', 0), # Set power control 1 - ( b'\xc2', b'\x01\xff', 0), # Set power control 2 - ( b'\xc3', b'\x10', 0), # Set power control 3 - ( b'\xc4', b'\x20', 0), # Set power control 4 - ( b'\xc6', b'\x0f', 0), # Set VCOM control 1 - ( b'\xd0', b'\xa4\xa1', 0), # Set power control A - # Set gamma curve positive polarity - ( b'\xe0', b'\xd0\x00\x02\x07\x0a\x28\x32\x44\x42\x06\x0e\x12\x14\x17', 0), - # Set gamma curve negative polarity - ( b'\xe1', b'\xd0\x00\x02\x07\x0a\x28\x31\x54\x47\x0e\x1c\x17\x1b\x1e', 0), - ( b'\x21', b'\x00', 0), # Enable display inversion - ( b'\x29', b'\x00', 120) # Turn on the display -) - -class ST7789_SPI(): - """ - OpenCV SPI driver for ST7789 displays - - Args: - width (int): display width **Required** - height (int): display height **Required** - spi (SPI): SPI bus **Required** - pin_dc (Pin): Data/Command pin number **Required** - pin_cs (Pin): Chip Select pin number - rotation (int): Orientation of display - - 0-Portrait, default - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - color_order (int): - - RGB: Red, Green Blue, default - - BGR: Blue, Green, Red - reverse_bytes_in_word (bool): - - Enable if the display uses LSB byte order for color words - """ - def __init__( - self, - width, - height, - spi, - pin_dc, - pin_cs=None, - rotation=0, - color_order=BGR, - reverse_bytes_in_word=True, - ): - # Store SPI arguments - self.spi = spi - self.dc = Pin(pin_dc) # Don't change mode/alt - self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - # Initial dimensions and offsets; will be overridden when rotation applied - self.width = width - self.height = height - self.xstart = 0 - self.ystart = 0 - # Check display is known and get rotation table - self.rotations = self._find_rotations(width, height) - if not self.rotations: - supported_displays = ", ".join( - [f"{display[0]}x{display[1]}" for display in _SUPPORTED_DISPLAYS]) - raise ValueError( - f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") - # Colors - self.color_order = color_order - self.needs_swap = reverse_bytes_in_word - # Reset the display - self.soft_reset() - # Yes, send init twice, once is not always enough - self.send_init(_ST7789_INIT_CMDS) - self.send_init(_ST7789_INIT_CMDS) - # Initial rotation - self._rotation = rotation % 4 - # Apply rotation - self.rotation(self._rotation) - # Create the framebuffer for the correct rotation - self.buffer = np.zeros((self.height, self.width, 2), dtype=np.uint8) - - def send_init(self, commands): - """ - Send initialisation commands to display. - """ - for command, data, delay in commands: - self._write(command, data) - sleep_ms(delay) - - def soft_reset(self): - """ - Soft reset display. - """ - self._write(_ST7789_SWRESET) - sleep_ms(150) - - def _find_rotations(self, width, height): - """ Find the correct rotation for our display or return None """ - for display in _SUPPORTED_DISPLAYS: - if display[0] == width and display[1] == height: - return display[2] - return None - - def rotation(self, rotation): - """ - Set display rotation. - - Args: - rotation (int): - - 0-Portrait - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - """ - if ((rotation % 2) != (self._rotation % 2)) and (self.width != self.height): - # non-square displays can currently only be rotated by 180 degrees - # TODO: can framebuffer of super class be destroyed and re-created - # to match the new dimensions? or it's width/height changed? - return - - # find rotation parameters and send command - rotation %= len(self.rotations) - ( madctl, - self.width, - self.height, - self.xstart, - self.ystart, ) = self.rotations[rotation] - if self.color_order == BGR: - madctl |= _ST7789_MADCTL_BGR - else: - madctl &= ~_ST7789_MADCTL_BGR - self._write(_ST7789_MADCTL, bytes([madctl])) - # Set window for writing into - self._write(_ST7789_CASET, - struct.pack(_ENCODE_POS, self.xstart, self.width + self.xstart - 1)) - self._write(_ST7789_RASET, - struct.pack(_ENCODE_POS, self.ystart, self.height + self.ystart - 1)) - self._write(_ST7789_RAMWR) - # TODO: Can we swap (modify) framebuffer width/height in the super() class? - self._rotation = rotation - - def _get_common_roi_with_buffer(self, image): - """ - Get the common region of interest (ROI) between the image and the - display's internal buffer. - - Args: - image (ndarray): Image to display - - Returns: - tuple: (image_roi, buffer_roi) - """ - # Ensure image is a NumPy ndarray - if type(image) is not np.ndarray: - raise TypeError("Image must be a NumPy ndarray") - - # Determing number of rows and columns in the image - image_rows = image.shape[0] - if image.ndim < 2: - image_cols = 1 - else: - image_cols = image.shape[1] - - # Get the common ROI between the image and the buffer - row_max = min(image_rows, self.height) - col_max = min(image_cols, self.width) - img_roi = image[:row_max, :col_max] - buffer_roi = self.buffer[:row_max, :col_max] - return img_roi, buffer_roi - - def _convert_image_to_uint8(self, image): - """ - Convert the image to uint8 format if necessary. - - Args: - image (ndarray): Image to convert - - Returns: - Image: Converted image - """ - # Check if the image is already in uint8 format - if image.dtype is np.uint8: - return image - - # Convert to uint8 format. This unfortunately requires creating a new - # buffer for the converted image, which takes more memory - if image.dtype == np.int8: - return cv2.convertScaleAbs(image, alpha=1, beta=127) - elif image.dtype == np.int16: - return cv2.convertScaleAbs(image, alpha=1/255, beta=127) - elif image.dtype == np.uint16: - return cv2.convertScaleAbs(image, alpha=1/255) - elif image.dtype == np.float: - # This implementation creates an additional buffer from np.clip() - # TODO: Find another solution that avoids an additional buffer - return cv2.convertScaleAbs(np.clip(image, 0, 1), alpha=255) - else: - raise ValueError(f"Unsupported image dtype: {image.dtype}") - - def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): - """ - Convert the image ROI to BGR565 format and write it to the buffer ROI. - - Args: - image_roi (ndarray): Image region of interest - buffer_roi (ndarray): Buffer region of interest - """ - # Determine the number of channels in the image - if image_roi.ndim < 3: - ch = 1 - else: - ch = image_roi.shape[2] - - if ch == 1: # Grayscale - buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) - elif ch == 2: # Already in BGR565 format - buffer_roi[:] = image_roi - elif ch == 3: # BGR - buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) - else: - raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") - - def imshow(self, image): - """ - Display a NumPy image on the screen. - - Args: - image (ndarray): Image to display - """ - # Get the common ROI between the image and internal display buffer - image_roi, buffer_roi = self._get_common_roi_with_buffer(image) - - # Ensure the image is in uint8 format - image_roi = self._convert_image_to_uint8(image_roi) - - # Convert the image to BGR565 format and write it to the buffer - self._write_image_to_buffer_bgr565(image_roi, buffer_roi) - - # Write buffer to display. Swap bytes if needed - if self.needs_swap: - self._write(None, self.buffer[:, :, ::-1]) - else: - self._write(None, self.buffer) - - def clear(self): - """ - Clear the display by filling it with black color. - """ - # Clear the buffer by filling it with zeros (black) - self.buffer[:] = 0 - # Write the buffer to the display - self._write(None, self.buffer) - - def savePinModeAlt(self, pin): - """ - Saves the current `mode` and `alt` of the pin so it can be restored - later. Mostly used to restore the SPI mode (MISO) of the DC pin after - communication with the display in case another device is using the same - SPI bus. - - Returns: - tuple: (mode, alt) - """ - # See: https://github.com/micropython/micropython/issues/17515 - # There's no way to get the mode and alt of a pin directly, so we - # convert the pin to a string and parse it. Example formats: - # "Pin(GPIO16, mode=OUT)" - # "Pin(GPIO16, mode=ALT, alt=SPI)" - pinStr = str(pin) - - # Extract the "mode" parameter from the pin string - if "mode=" in pinStr: - # Split between "mode=" and the next comma or closing parenthesis - modeStr = pinStr.split("mode=")[1].split(",")[0].split(")")[0] - - # Look up the mode in Pin class dictionary - mode = Pin.__dict__[modeStr] - else: - # No mode specified, just set to None - mode = None - - # Extrct the "alt" parameter from the pin string - if "alt=" in pinStr: - # Split between "alt=" and the next comma or closing parenthesis - altStr = pinStr.split("alt=")[1].split(",")[0].split(")")[0] - - # Look up the alt in Pin class dictionary (with "ALT_" prefix) - alt = Pin.__dict__["ALT_" + altStr] - else: - # No alt specified, just set to None - alt = None - - # Return the mode and alt as a tuple - return (mode, alt) - - def _write(self, command=None, data=None): - """SPI write to the device: commands and data.""" - # Save the current mode and alt of the DC pin in case it's used by - # another device on the same SPI bus - dcMode, dcAlt = self.savePinModeAlt(self.dc) - - # Temporarily set the DC pin to output mode - self.dc.init(mode=Pin.OUT) - - # Write to the display - if self.cs: - self.cs.off() - if command is not None: - self.dc.off() - self.spi.write(command) - if data is not None: - self.dc.on() - self.spi.write(data) - if self.cs: - self.cs.on() - - # Restore the DC pin to its original mode and alt - self.dc.init(mode=dcMode, alt=dcAlt) diff --git a/examples/boot.py b/examples/boot.py index c38eb3d..1900662 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -15,7 +15,7 @@ # a very popular display driver for embedded systems. Moreover, this example # uses an SPI-based driver, so it should work on any platform, but it's not # always the fastest option - import st7789_spi + from cv2_drivers.displays import st7789_spi # Create a display object. This will depend on the display driver you are # using, and you may need to adjust the parameters based on your specific @@ -75,7 +75,7 @@ # a popular camera module for embedded systems. This example uses a PIO # driver, which is a peripheral interface only available on Raspberry Pi RP2 # processors - import hm01b0_pio + from cv2_drivers.cameras import hm01b0_pio # Create a camera object. This will depend on the camera driver you are # using, and you may need to adjust the parameters based on your specific From fc54ace8eeccd0e68b55884d26c2d2e716838809 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 19 Jun 2025 14:51:17 -0600 Subject: [PATCH 070/158] HM01B0 PIO driver: shift data 24 bits in sm.put() Fixes #20 --- cv2_drivers/displays/st7789_pio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index b95111d..5c953a6 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -99,7 +99,7 @@ def _pio_write(self, data): self.sm.active(1) # Write data to the state machine - self.sm.put(data) + self.sm.put(data, 24) # Stop the state machine self.sm.active(0) From 199796d2699ee4f46245315ac35eac57c0f64e87 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 19 Jun 2025 15:43:45 -0600 Subject: [PATCH 071/158] Clean up display drivers Clean up ST7789 PIO driver Remove debug print from ST7789 base class Fix display base class to have correct default for Pin mode and alt --- cv2_drivers/displays/cv2_display.py | 8 ++-- cv2_drivers/displays/st7789.py | 1 - cv2_drivers/displays/st7789_pio.py | 65 ++++++++++++++++------------- 3 files changed, 39 insertions(+), 35 deletions(-) diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index 78e445a..d3107df 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -113,8 +113,8 @@ def savePinModeAlt(self, pin): # Look up the mode in Pin class dictionary mode = Pin.__dict__[modeStr] else: - # No mode specified, just set to None - mode = None + # No mode specified, just set to -1 (default) + mode = -1 # Extrct the "alt" parameter from the pin string if "alt=" in pinStr: @@ -130,8 +130,8 @@ def savePinModeAlt(self, pin): # Convert the altStr to an integer alt = int(altStr) else: - # No alt specified, just set to None - alt = None + # No alt specified, just set to -1 (default) + alt = -1 # Return the mode and alt as a tuple return (mode, alt) diff --git a/cv2_drivers/displays/st7789.py b/cv2_drivers/displays/st7789.py index ec28b41..ca3a01f 100644 --- a/cv2_drivers/displays/st7789.py +++ b/cv2_drivers/displays/st7789.py @@ -169,7 +169,6 @@ def send_init(self, commands): Send initialisation commands to display. """ for command, data, delay in commands: - print(command, data, delay) self._write(command, data) sleep_ms(delay) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 5c953a6..a2d7b66 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -16,6 +16,7 @@ class ST7789_PIO(ST7789): pin_tx (Pin): Transmit pin number **Required** pin_dc (Pin): Data/Command pin number **Required** pin_cs (Pin): Chip Select pin number + freq (int): State machine frequency in Hz, default -1 (system clock) rotation (int): Orientation of display - 0-Portrait, default - 1-Landscape @@ -36,47 +37,56 @@ def __init__( pin_tx, pin_dc, pin_cs=None, + freq=-1, rotation=0, color_order=ST7789.BGR, reverse_bytes_in_word=True, ): # Store PIO arguments self.sm_id = sm_id - self.pin_clk = pin_clk - self.pin_tx = pin_tx - # self.pin_dc = pin_dc - # self.pin_cs = pin_cs - - self.clk = Pin(pin_clk, Pin.OUT) # Don't change mode/alt - self.tx = Pin(pin_tx, Pin.OUT) # Don't change mode/alt - self.clk = Pin(pin_clk, Pin.ALT, alt=Pin.ALT_PIO0) # Don't change mode/alt - self.tx = Pin(pin_tx, Pin.ALT, alt=Pin.ALT_PIO0) # Don't change mode/alt - self.dc = Pin(pin_dc, Pin.OUT) # Don't change mode/alt + self.clk = Pin(pin_clk) # Don't change mode/alt + self.tx = Pin(pin_tx) # Don't change mode/alt + self.dc = Pin(pin_dc) # Don't change mode/alt self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + self.freq = freq - program = self._pio_write_spi - # program[0][0]=0x6001 - # program[0][4]=0xb042 - print(program) + # Get the current mode and alt of the pins so they can be restored + txMode, txAlt = self.savePinModeAlt(self.tx) + clkMode, clkAlt = self.savePinModeAlt(self.clk) + # Initialize the PIO state machine self.sm = rp2.StateMachine( self.sm_id, - program, - out_base = self.pin_tx, - sideset_base = self.pin_clk, - # out_shiftdir = rp2.PIO.SHIFT_LEFT, + self._pio_write_spi, + freq = self.freq, + out_base = self.tx, + sideset_base = self.clk, ) + + # The tx and clk pins just got their mode and alt set for PIO0 or PIO1, + # so we need to save them again to restore later when _write() is called + self.txMode, self.txAlt = self.savePinModeAlt(self.tx) + self.clkMode, self.clkAlt = self.savePinModeAlt(self.clk) + + # Now restore the original mode and alt of the pins + self.tx.init(mode=txMode, alt=txAlt) + self.clk.init(mode=clkMode, alt=clkAlt) + # Call the parent class constructor super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" - # Save the current mode and alt of the DC pin in case it's used by + # Save the current mode and alt of the spi pins in case they're used by # another device on the same SPI bus - # dcMode, dcAlt = self.savePinModeAlt(self.dc) + dcMode, dcAlt = self.savePinModeAlt(self.dc) + txMode, txAlt = self.savePinModeAlt(self.tx) + clkMode, clkAlt = self.savePinModeAlt(self.clk) - # Temporarily set the DC pin to output mode + # Temporarily set the SPI pins to the correct mode and alt for PIO self.dc.init(mode=Pin.OUT) + self.tx.init(mode=self.txMode, alt=self.txAlt) + self.clk.init(mode=self.clkMode, alt=self.clkAlt) # Write to the display if self.cs: @@ -90,8 +100,10 @@ def _write(self, command=None, data=None): if self.cs: self.cs.on() - # Restore the DC pin to its original mode and alt - # self.dc.init(mode=dcMode, alt=dcAlt) + # Restore the SPI pins to their original mode and alt + self.dc.init(mode=dcMode, alt=dcAlt) + self.tx.init(mode=txMode, alt=txAlt) + self.clk.init(mode=clkMode, alt=clkAlt) def _pio_write(self, data): """Write data to the display using PIO.""" @@ -105,7 +117,6 @@ def _pio_write(self, data): self.sm.active(0) @rp2.asm_pio( - # fifo_join = rp2.PIO.JOIN_TX, out_init = rp2.PIO.OUT_LOW, sideset_init = rp2.PIO.OUT_LOW, out_shiftdir = rp2.PIO.SHIFT_LEFT, @@ -114,10 +125,4 @@ def _pio_write(self, data): ) def _pio_write_spi(): out(pins, 1).side(0) - nop() - nop() - nop() nop().side(1) - nop() - nop() - nop() From e9e992e57d7e598d1df400edfdb29970da3a547c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 19 Jun 2025 16:03:54 -0600 Subject: [PATCH 072/158] Optimize savePinModeAlt() See https://github.com/micropython/micropython/issues/17515#issuecomment-2988757150 --- cv2_drivers/displays/cv2_display.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index d3107df..3467002 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -103,33 +103,33 @@ def savePinModeAlt(self, pin): # convert the pin to a string and parse it. Example formats: # "Pin(GPIO16, mode=OUT)" # "Pin(GPIO16, mode=ALT, alt=SPI)" - pinStr = str(pin) + pin_str = str(pin) # Extract the "mode" parameter from the pin string - if "mode=" in pinStr: + try: # Split between "mode=" and the next comma or closing parenthesis - modeStr = pinStr.split("mode=")[1].split(",")[0].split(")")[0] + mode_str = pin_str[pin_str.index("mode=") + 5:].partition(",")[0].partition(")")[0] # Look up the mode in Pin class dictionary - mode = Pin.__dict__[modeStr] - else: + mode = Pin.__dict__[mode_str] + except (ValueError, KeyError): # No mode specified, just set to -1 (default) mode = -1 # Extrct the "alt" parameter from the pin string - if "alt=" in pinStr: + try: # Split between "alt=" and the next comma or closing parenthesis - altStr = pinStr.split("alt=")[1].split(",")[0].split(")")[0] + alt_str = pin_str[pin_str.index("alt=") + 4:].partition(",")[0].partition(")")[0] # Sometimes the value comes back as a number instead of a valid # "ALT_xyz" string, so we need to check it - if "ALT_" + altStr in Pin.__dict__: + if "ALT_" + alt_str in Pin.__dict__: # Look up the alt in Pin class dictionary (with "ALT_" prefix) - alt = Pin.__dict__["ALT_" + altStr] + alt = Pin.__dict__["ALT_" + alt_str] else: # Convert the altStr to an integer - alt = int(altStr) - else: + alt = int(alt_str) + except (ValueError, KeyError): # No alt specified, just set to -1 (default) alt = -1 From 6150a9e43342456e58790f4642522a2671d78429 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 09:48:17 -0600 Subject: [PATCH 073/158] Change HM01B0 PIO driver to use DMA transfers Transfers now complete in ~17.6ms (theoretical limit is ~16.4ms) Transfers used to take ~30ms without DMA --- cv2_drivers/displays/st7789_pio.py | 31 +++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index a2d7b66..ef5ddf5 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -1,6 +1,7 @@ from .st7789 import ST7789 from machine import Pin import rp2 +# import time # Derived from: # https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd @@ -67,11 +68,24 @@ def __init__( # so we need to save them again to restore later when _write() is called self.txMode, self.txAlt = self.savePinModeAlt(self.tx) self.clkMode, self.clkAlt = self.savePinModeAlt(self.clk) - + # Now restore the original mode and alt of the pins self.tx.init(mode=txMode, alt=txAlt) self.clk.init(mode=clkMode, alt=clkAlt) - + + # Set up DMA to transfer to the PIO state machine + self.dma = rp2.DMA() + req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + dma_ctrl = self.dma.pack_ctrl( + size = 0, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + inc_write = False, + treq_sel = req_num + ) + self.dma.config( + write = self.sm, + ctrl = dma_ctrl + ) + # Call the parent class constructor super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) @@ -109,9 +123,16 @@ def _pio_write(self, data): """Write data to the display using PIO.""" # Start the state machine self.sm.active(1) - - # Write data to the state machine - self.sm.put(data, 24) + + # Configure DMA to read from the buffer and write to the state machine + self.dma.read = data + count = len(data) if isinstance(data, (bytes, bytearray)) else data.size + self.dma.count = count + + # Start the DMA transfer and wait for it to finish + self.dma.active(True) + while self.dma.active(): + pass # Stop the state machine self.sm.active(0) From e1dd199b096724a9e58a2742ecb6811fa5a54a40 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 10:39:01 -0600 Subject: [PATCH 074/158] Change HM01B0 PIO driver to use 32-bit transfers after initialization This now operates at the theoretical best rate of 16.4ms --- cv2_drivers/displays/st7789_pio.py | 59 ++++++++++++++++++------------ 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index ef5ddf5..46d7769 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -55,14 +55,8 @@ def __init__( txMode, txAlt = self.savePinModeAlt(self.tx) clkMode, clkAlt = self.savePinModeAlt(self.clk) - # Initialize the PIO state machine - self.sm = rp2.StateMachine( - self.sm_id, - self._pio_write_spi, - freq = self.freq, - out_base = self.tx, - sideset_base = self.clk, - ) + # Start the PIO state machine and DMA with 1 bytes per transfer + self._setup_sm_and_dma(1) # The tx and clk pins just got their mode and alt set for PIO0 or PIO1, # so we need to save them again to restore later when _write() is called @@ -73,11 +67,34 @@ def __init__( self.tx.init(mode=txMode, alt=txAlt) self.clk.init(mode=clkMode, alt=clkAlt) - # Set up DMA to transfer to the PIO state machine - self.dma = rp2.DMA() + # Call the parent class constructor + super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) + + # Change the transfer size to 4 bytes for faster throughput + self._setup_sm_and_dma(4) + + def _setup_sm_and_dma(self, bytes_per_transfer): + # Store the bytes per transfer for later use + self.bytes_per_transfer = bytes_per_transfer + + # Initialize the PIO state machine + self.sm = rp2.StateMachine( + self.sm_id, + self._pio_write_spi, + freq = self.freq, + out_base = self.tx, + sideset_base = self.clk, + pull_thresh = bytes_per_transfer * 8 + ) + + # Instantiate a DMA controller if not already done + if not hasattr(self, 'dma'): + self.dma = rp2.DMA() + + # Configure up DMA to write to the PIO state machine req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) dma_ctrl = self.dma.pack_ctrl( - size = 0, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + size = {1:0, 2:1, 4:2}[bytes_per_transfer], # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit inc_write = False, treq_sel = req_num ) @@ -86,9 +103,6 @@ def __init__( ctrl = dma_ctrl ) - # Call the parent class constructor - super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) - def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" # Save the current mode and alt of the spi pins in case they're used by @@ -121,15 +135,13 @@ def _write(self, command=None, data=None): def _pio_write(self, data): """Write data to the display using PIO.""" - # Start the state machine - self.sm.active(1) - - # Configure DMA to read from the buffer and write to the state machine - self.dma.read = data + # Configure the DMA transfer count and read address count = len(data) if isinstance(data, (bytes, bytearray)) else data.size - self.dma.count = count - - # Start the DMA transfer and wait for it to finish + self.dma.count = count // self.bytes_per_transfer + self.dma.read = data + + # Start the state machine and DMA transfer, and wait for it to finish + self.sm.active(1) self.dma.active(True) while self.dma.active(): pass @@ -141,8 +153,7 @@ def _pio_write(self, data): out_init = rp2.PIO.OUT_LOW, sideset_init = rp2.PIO.OUT_LOW, out_shiftdir = rp2.PIO.SHIFT_LEFT, - autopull = True, - pull_thresh = 8, + autopull = True ) def _pio_write_spi(): out(pins, 1).side(0) From 0e9857bc8ec6eca66550b8036f3f0880d2e3b612 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:04:21 -0600 Subject: [PATCH 075/158] Add optional image argument to camera read() Enables optimization by not having to create new images all the time --- cv2_drivers/cameras/hm01b0_pio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 83d40d9..26de674 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -321,13 +321,13 @@ def open(self): def release(self): self.active(False) - def read(self): + def read(self, image = None): """ Reads a frame from the camera. Returns: tuple: (success, frame) """ - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR)) + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) def _vsync_handler(self): # Disable DMA before reconfiguring it From d4a137bf4a0c87d6ebd27b23abd3be134a618abf Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:23:22 -0600 Subject: [PATCH 076/158] Fix ST7789 PIO driver swapping pairs of pixels Have to use 2 byte transfers with DMA. It still achieves the theoretical transfer time --- cv2_drivers/displays/st7789_pio.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 46d7769..16390ab 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -70,8 +70,9 @@ def __init__( # Call the parent class constructor super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) - # Change the transfer size to 4 bytes for faster throughput - self._setup_sm_and_dma(4) + # Change the transfer size to 2 bytes for faster throughput. Can't do 4 + # bytes, because then pairs of pixels get swapped + self._setup_sm_and_dma(2) def _setup_sm_and_dma(self, bytes_per_transfer): # Store the bytes per transfer for later use @@ -96,7 +97,8 @@ def _setup_sm_and_dma(self, bytes_per_transfer): dma_ctrl = self.dma.pack_ctrl( size = {1:0, 2:1, 4:2}[bytes_per_transfer], # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit inc_write = False, - treq_sel = req_num + treq_sel = req_num, + bswap = False ) self.dma.config( write = self.sm, From da813306e390558c75b91b74b6ff749da6f18942 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:31:47 -0600 Subject: [PATCH 077/158] Update waitKey() prompts in examples 2 and 3 --- examples/ex02_imread_imwrite.py | 5 ++++- examples/ex03_camera.py | 7 +++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py index bd70e22..72fb42e 100644 --- a/examples/ex02_imread_imwrite.py +++ b/examples/ex02_imread_imwrite.py @@ -17,7 +17,10 @@ # depend on the display driver. For example, the default ST7789 display driver # will crop large images, and show small images in the top-left corner cv2.imshow(display, img) -key = cv2.waitKey(1000) + +# Prompt the user to press a key to continue +print("Press any key to continue") +key = cv2.waitKey(0) # Let's modify the image! Here we use `cv2.Canny()` to perform edge detection # on the image, which is a common operation in computer vision diff --git a/examples/ex03_camera.py b/examples/ex03_camera.py index e672107..c52f901 100644 --- a/examples/ex03_camera.py +++ b/examples/ex03_camera.py @@ -13,6 +13,9 @@ # `boot.py` script. See the example `boot.py` script for more details camera.open() +# Prompt the user to press a key to continue +print("Press any key to continue") + # Loop to continuously read frames from the camera and display them while True: # Read a frame from the camera, just like any other Python environment! It @@ -31,8 +34,8 @@ # Check for key presses key = cv2.waitKey(1) - # If the 'q' key is pressed, exit the loop - if key == ord('q'): + # If any key is pressed, exit the loop + if key != -1: break # Release the camera, just like in any other Python environment! From 8144fe8f2fdb6f774aa8b6844f903a5dbca45a88 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:44:16 -0600 Subject: [PATCH 078/158] Fix ST7789 PIO driver pin modes Needs to restore the mode and alt of the PIO pins every time _setup_sm_and_dma() is called --- cv2_drivers/displays/st7789_pio.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 16390ab..34dba5d 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -50,23 +50,10 @@ def __init__( self.dc = Pin(pin_dc) # Don't change mode/alt self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None self.freq = freq - - # Get the current mode and alt of the pins so they can be restored - txMode, txAlt = self.savePinModeAlt(self.tx) - clkMode, clkAlt = self.savePinModeAlt(self.clk) # Start the PIO state machine and DMA with 1 bytes per transfer self._setup_sm_and_dma(1) - # The tx and clk pins just got their mode and alt set for PIO0 or PIO1, - # so we need to save them again to restore later when _write() is called - self.txMode, self.txAlt = self.savePinModeAlt(self.tx) - self.clkMode, self.clkAlt = self.savePinModeAlt(self.clk) - - # Now restore the original mode and alt of the pins - self.tx.init(mode=txMode, alt=txAlt) - self.clk.init(mode=clkMode, alt=clkAlt) - # Call the parent class constructor super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) @@ -78,6 +65,10 @@ def _setup_sm_and_dma(self, bytes_per_transfer): # Store the bytes per transfer for later use self.bytes_per_transfer = bytes_per_transfer + # Get the current mode and alt of the pins so they can be restored + txMode, txAlt = self.savePinModeAlt(self.tx) + clkMode, clkAlt = self.savePinModeAlt(self.clk) + # Initialize the PIO state machine self.sm = rp2.StateMachine( self.sm_id, @@ -88,7 +79,18 @@ def _setup_sm_and_dma(self, bytes_per_transfer): pull_thresh = bytes_per_transfer * 8 ) - # Instantiate a DMA controller if not already done + # The tx and clk pins just got their mode and alt set for PIO0 or PIO1. + # We need to save them again to restore later when _write() is called, + # if we haven't already + if not hasattr(self, 'txMode'): + self.txMode, self.txAlt = self.savePinModeAlt(self.tx) + self.clkMode, self.clkAlt = self.savePinModeAlt(self.clk) + + # Now restore the original mode and alt of the pins + self.tx.init(mode=txMode, alt=txAlt) + self.clk.init(mode=clkMode, alt=clkAlt) + + # Instantiate a DMA controller if not already done if not hasattr(self, 'dma'): self.dma = rp2.DMA() From 09aa23c6c22a3d9d4c379d83e8dd15138c9b0b81 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:45:46 -0600 Subject: [PATCH 079/158] Add helpful prints to example 2 --- examples/ex02_imread_imwrite.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py index 72fb42e..1850187 100644 --- a/examples/ex02_imread_imwrite.py +++ b/examples/ex02_imread_imwrite.py @@ -9,6 +9,7 @@ # card and change the path to point to the SD card # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +print("Loading image, this may take a few seconds...") img = cv2.imread("test_images/sparkfun_logo.png") # Show the image for 1 second @@ -24,6 +25,7 @@ # Let's modify the image! Here we use `cv2.Canny()` to perform edge detection # on the image, which is a common operation in computer vision +print("Performing edge detection...") edges = cv2.Canny(img, 100, 200) # Display the modified image @@ -35,6 +37,7 @@ # Again, SD cards are supported, just change the path to point to the SD card # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +print("Saving modified image...") success = cv2.imwrite("test_images/sparkfun_logo_edges.png", edges) # Check if the image was saved successfully From dfff6f28242fc80cf4660c30a959dbc23f5ca345 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:57:55 -0600 Subject: [PATCH 080/158] Add more helpful error messages to boot.py SD card initialization --- examples/boot.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/examples/boot.py b/examples/boot.py index 1900662..4b94039 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -49,8 +49,14 @@ uos.mount(vfs, "/sd") except ImportError: print("boot.py - sdcard module not found, skipping SD card initialization.") -except OSError: - print("boot.py - Failed to mount SD card, skipping SD card initialization.") +except OSError as e: + eStr = str(e) + if "no SD card" in eStr: + print("boot.py - no SD card found, skipping SD card initialization.") + elif "Errno 1" in eStr: + print("boot.py - SD card already mounted, skipping SD card initialization.") + else: + print("boot.py - Failed to mount SD card, skipping SD card initialization.") # Set the SPI bus baudrate (note - the sdcard module overrides the baudrate upon # initialization, so the baudrate should be set after that). It is recommended From 9e3ec3209558f973f7b4ca12a72b6bb304647b07 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:58:24 -0600 Subject: [PATCH 081/158] Tweak print in example 2 --- examples/ex02_imread_imwrite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py index 1850187..11b41ba 100644 --- a/examples/ex02_imread_imwrite.py +++ b/examples/ex02_imread_imwrite.py @@ -9,7 +9,7 @@ # card and change the path to point to the SD card # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV -print("Loading image, this may take a few seconds...") +print("Loading image...") img = cv2.imread("test_images/sparkfun_logo.png") # Show the image for 1 second From e959eb6341d3cb91f8770acd723ee7adcbffa96d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 20 Jun 2025 11:59:22 -0600 Subject: [PATCH 082/158] Add ST7789 PIO driver to example boot.py --- examples/boot.py | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/examples/boot.py b/examples/boot.py index 4b94039..c570f74 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -11,21 +11,38 @@ # Initialize display, if available try: - # Import a display driver module. This example assumes the ST7789, which is - # a very popular display driver for embedded systems. Moreover, this example - # uses an SPI-based driver, so it should work on any platform, but it's not - # always the fastest option - from cv2_drivers.displays import st7789_spi + # Import a display driver module. Multiple options are provided below, so + # you can choose the one that best fits your needs. You may need to adjust + # the parameters based on your specific display and board configuration + import cv2_drivers.displays as displays - # Create a display object. This will depend on the display driver you are - # using, and you may need to adjust the parameters based on your specific - # display and board configuration - display = st7789_spi.ST7789_SPI(width=240, - height=320, - spi=spi, - pin_dc=16, - pin_cs=17, - rotation=1) + ############################################################################ + # ST7789 - A very popular display for embedded systems + ############################################################################ + + # SPI interface. This should work on any platform, but it's not always the + # fastest option (24Mbps on RP2350) + display = displays.st7789_spi.ST7789_SPI( + width=240, + height=320, + spi=spi, + pin_dc=16, + pin_cs=17, + rotation=1 + ) + + # PIO interface. This is only available on Raspberry Pi RP2 processors, + # and is much faster than the SPI interface (up to 75Mbps on RP2350) + # display = displays.st7789_pio.ST7789_PIO( + # width=240, + # height=320, + # sm_id=1, + # pin_clk=18, + # pin_tx=19, + # pin_dc=16, + # pin_cs=17, + # rotation=1 + # ) except ImportError: print("boot.py - Display driver module not found, skipping display initialization.") From 1a4ee29fea7886ac1b93324058579078ae7ef108 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 10:45:51 -0600 Subject: [PATCH 083/158] Fix build on Ubuntu 24.02 (missing return) --- src/convert.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/convert.cpp b/src/convert.cpp index 6e12490..89ecb75 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -37,7 +37,7 @@ ndarray_obj_t *mat_to_ndarray(Mat& mat) // Derived from: // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L313-L328 if(mat.data == NULL) - mp_const_none; + return (ndarray_obj_t*) mp_const_none; Mat temp, *ptr = (Mat*)&mat; if(!ptr->u || ptr->allocator != &GetNumpyAllocator()) { From a34c4e9403ed46f56e0e8fbb76aa6c74e104042c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 10:46:15 -0600 Subject: [PATCH 084/158] Remove duplicate installs from GitHub build workflow --- .github/workflows/build.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c2de1c..c801370 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,7 +19,6 @@ jobs: submodules: true - name: Install packages run: | - sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib - name: Build MPY Cross run: make -C micropython/mpy-cross From 9bc2b2070e8e6d58a3b12ba165ab43fa6cc339e4 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 10:51:38 -0600 Subject: [PATCH 085/158] Refactor display driver folder Split HM01B0 driver into multiple files for easier addition of other cameras and interfaces --- cv2_drivers/cameras/cv2_camera.py | 5 + cv2_drivers/cameras/dvp_camera.py | 26 +++ cv2_drivers/cameras/dvp_rp2_pio.py | 115 ++++++++++ cv2_drivers/cameras/hm01b0.py | 268 ++++++++++++++++++++++ cv2_drivers/cameras/hm01b0_pio.py | 349 +---------------------------- 5 files changed, 422 insertions(+), 341 deletions(-) create mode 100644 cv2_drivers/cameras/cv2_camera.py create mode 100644 cv2_drivers/cameras/dvp_camera.py create mode 100644 cv2_drivers/cameras/dvp_rp2_pio.py create mode 100644 cv2_drivers/cameras/hm01b0.py diff --git a/cv2_drivers/cameras/cv2_camera.py b/cv2_drivers/cameras/cv2_camera.py new file mode 100644 index 0000000..77a7720 --- /dev/null +++ b/cv2_drivers/cameras/cv2_camera.py @@ -0,0 +1,5 @@ +from ulab import numpy as np + +class CV2_Camera(): + def __init__(self, buffer_size): + self.buffer = np.zeros(buffer_size, dtype=np.uint8) diff --git a/cv2_drivers/cameras/dvp_camera.py b/cv2_drivers/cameras/dvp_camera.py new file mode 100644 index 0000000..9105c70 --- /dev/null +++ b/cv2_drivers/cameras/dvp_camera.py @@ -0,0 +1,26 @@ +from .cv2_camera import CV2_Camera +from machine import Pin +from time import sleep_us + +class DVP_Camera(CV2_Camera): + def __init__( + self, + i2c, + i2c_address, + buffer_size + ): + super().__init__(buffer_size) + + self.i2c = i2c + self.i2c_address = i2c_address + + def readRegister(self, reg, nbytes=1): + self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF])) + return self.i2c.readfrom(self.i2c_address, nbytes) + + def writeRegister(self, reg, data): + if isinstance(data, int): + data = bytes([data]) + elif isinstance(data, (list, tuple)): + data = bytes(data) + self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py new file mode 100644 index 0000000..517673c --- /dev/null +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -0,0 +1,115 @@ +import rp2 +from machine import Pin, PWM + +class DVP_RP2_PIO(): + def __init__( + self, + pin_d0, + pin_vsync, + pin_hsync, + pin_pclk, + pin_xclk, + sm_id, + num_data_pins + ): + self.pin_d0 = pin_d0 + self.pin_vsync = pin_vsync + self.pin_hsync = pin_hsync + self.pin_pclk = pin_pclk + self.pin_xclk = pin_xclk + self.sm_id = sm_id + + for i in range(num_data_pins): + Pin(pin_d0+i, Pin.IN) + Pin(pin_vsync, Pin.IN) + Pin(pin_hsync, Pin.IN) + Pin(pin_pclk, Pin.IN) + + if self.pin_xclk is not None: + self.xclk = PWM(Pin(pin_xclk)) + # self.xclk.freq(25_000_000) + self.xclk.freq(15_000_000) + self.xclk.duty_u16(32768) + + self.start_pio_dma(num_data_pins) + + def start_pio_dma(self, num_data_pins): + program = self._pio_read_dvp + # Mask in the GPIO pins + program[0][0] |= self.pin_hsync & 0x1F + program[0][1] |= self.pin_pclk & 0x1F + program[0][3] |= self.pin_pclk & 0x1F + + # Mask in the number of data pins + program[0][2] &= 0xFFFFFFE0 + program[0][2] |= num_data_pins + + self.sm = rp2.StateMachine( + self.sm_id, + program, + in_base = self.pin_d0 + ) + self.sm.active(1) + + self.dma = rp2.DMA() + req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 + dma_ctrl = self.dma.pack_ctrl( + # size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + inc_read = False, + treq_sel = req_num, + bswap = False + # irq_quiet = False + ) + self.dma.config( + read = self.sm, + # count = 244 * 324 // 4, + count = 240 * 320 * 2 // 4, + ctrl = dma_ctrl + ) + + def active(self, active = None): + if active == None: + return self.sm.active() + + self.sm.active(active) + + if active: + Pin(self.pin_vsync).irq( + trigger = Pin.IRQ_FALLING, + handler = lambda pin: self._vsync_handler() + ) + else: + Pin(self.pin_vsync).irq( + handler = None + ) + + def _vsync_handler(self): + # print("VSYNC") + # Disable DMA before reconfiguring it + self.dma.active(False) + + # Reset state machine to ensure ISR is cleared + self.sm.restart() + + # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) + while self.sm.rx_fifo() > 0: + self.sm.get() + + # Reset the DMA write address + self.dma.write = self.buffer + + # Start the DMA + self.dma.active(True) + + @rp2.asm_pio( + in_shiftdir = rp2.PIO.SHIFT_LEFT, + push_thresh = 32, + autopush = True, + fifo_join = rp2.PIO.JOIN_RX + ) + def _pio_read_dvp(): + wait(1, gpio, 0) # Mask in HSYNC pin + wait(1, gpio, 0) # Mask in PCLK pin + in_(pins, 1) # Mask in number of pins + wait(0, gpio, 0) # Mask in PCLK pin diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py new file mode 100644 index 0000000..262d770 --- /dev/null +++ b/cv2_drivers/cameras/hm01b0.py @@ -0,0 +1,268 @@ +from .dvp_camera import DVP_Camera +from time import sleep_us +import cv2 + +# Derived from: +# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c +class HM01B0(DVP_Camera): + + # Read only registers + MODEL_ID_H = 0x0000 + MODEL_ID_L = 0x0001 + FRAME_COUNT = 0x0005 + PIXEL_ORDER = 0x0006 + # Sensor mode control + MODE_SELECT = 0x0100 + IMG_ORIENTATION = 0x0101 + SW_RESET = 0x0103 + GRP_PARAM_HOLD = 0x0104 + # Sensor exposure gain control + INTEGRATION_H = 0x0202 + INTEGRATION_L = 0x0203 + ANALOG_GAIN = 0x0205 + DIGITAL_GAIN_H = 0x020E + DIGITAL_GAIN_L = 0x020F + # Frame timing control + FRAME_LEN_LINES_H = 0x0340 + FRAME_LEN_LINES_L = 0x0341 + LINE_LEN_PCK_H = 0x0342 + LINE_LEN_PCK_L = 0x0343 + # Binning mode control + READOUT_X = 0x0383 + READOUT_Y = 0x0387 + BINNING_MODE = 0x0390 + # Test pattern control + TEST_PATTERN_MODE = 0x0601 + # Black level control + BLC_CFG = 0x1000 + BLC_TGT = 0x1003 + BLI_EN = 0x1006 + BLC2_TGT = 0x1007 + # Sensor reserved + DPC_CTRL = 0x1008 + SINGLE_THR_HOT = 0x100B + SINGLE_THR_COLD = 0x100C + # VSYNC,HSYNC and pixel shift register + VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 + # Automatic exposure gain control + AE_CTRL = 0x2100 + AE_TARGET_MEAN = 0x2101 + AE_MIN_MEAN = 0x2102 + CONVERGE_IN_TH = 0x2103 + CONVERGE_OUT_TH = 0x2104 + MAX_INTG_H = 0x2105 + MAX_INTG_L = 0x2106 + MIN_INTG = 0x2107 + MAX_AGAIN_FULL = 0x2108 + MAX_AGAIN_BIN2 = 0x2109 + MIN_AGAIN = 0x210A + MAX_DGAIN = 0x210B + MIN_DGAIN = 0x210C + DAMPING_FACTOR = 0x210D + FS_CTRL = 0x210E + FS_60HZ_H = 0x210F + FS_60HZ_L = 0x2110 + FS_50HZ_H = 0x2111 + FS_50HZ_L = 0x2112 + FS_HYST_TH = 0x2113 + # Motion detection control + MD_CTRL = 0x2150 + I2C_CLEAR = 0x2153 + WMEAN_DIFF_TH_H = 0x2155 + WMEAN_DIFF_TH_M = 0x2156 + WMEAN_DIFF_TH_L = 0x2157 + MD_THH = 0x2158 + MD_THM1 = 0x2159 + MD_THM2 = 0x215A + MD_THL = 0x215B + STATISTIC_CTRL = 0x2000 + MD_LROI_X_START_H = 0x2011 + MD_LROI_X_START_L = 0x2012 + MD_LROI_Y_START_H = 0x2013 + MD_LROI_Y_START_L = 0x2014 + MD_LROI_X_END_H = 0x2015 + MD_LROI_X_END_L = 0x2016 + MD_LROI_Y_END_H = 0x2017 + MD_LROI_Y_END_L = 0x2018 + MD_INTERRUPT = 0x2160 + # Sensor timing control + QVGA_WIN_EN = 0x3010 + SIX_BIT_MODE_EN = 0x3011 + PMU_AUTOSLEEP_FRAMECNT = 0x3020 + ADVANCE_VSYNC = 0x3022 + ADVANCE_HSYNC = 0x3023 + EARLY_GAIN = 0x3035 + # IO and clock control + BIT_CONTROL = 0x3059 + OSC_CLK_DIV = 0x3060 + ANA_Register_11 = 0x3061 + IO_DRIVE_STR = 0x3062 + IO_DRIVE_STR2 = 0x3063 + ANA_Register_14 = 0x3064 + OUTPUT_PIN_STATUS_CONTROL = 0x3065 + ANA_Register_17 = 0x3067 + PCLK_POLARITY = 0x3068 + + # Useful values of Himax registers + HIMAX_RESET = 0x01 + HIMAX_MODE_STANDBY = 0x00 + HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable + HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames + HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger + # HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) + # HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) + + PCLK_RISING_EDGE = 0x00 + PCLK_FALLING_EDGE = 0x01 + AE_CTRL_ENABLE = 0x00 + AE_CTRL_DISABLE = 0x01 + + HIMAX_BOOT_RETRY = 10 + HIMAX_LINE_LEN_PCK_FULL = 0x178 + HIMAX_FRAME_LENGTH_FULL = 0x109 + + HIMAX_LINE_LEN_PCK_QVGA = 0x178 + HIMAX_FRAME_LENGTH_QVGA = 0x104 + + HIMAX_LINE_LEN_PCK_QQVGA = 0x178 + HIMAX_FRAME_LENGTH_QQVGA = 0x084 + + INIT_COMMANDS = ( + (0x3044, 0x0A), # Increase CDS time for settling + (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg + (0x3047, 0x0A), # Increase CDS time for settling + (0x3050, 0xC0), # Make negative offset up to 4x + (0x3051, 0x42), + (0x3052, 0x50), + (0x3053, 0x00), + (0x3054, 0x03), # tuning sf sig clamping as lowest + (0x3055, 0xF7), # tuning dsun + (0x3056, 0xF8), # increase adc nonoverlap clk + (0x3057, 0x29), # increase adc pwr for missing code + (0x3058, 0x1F), # turn on dsun + (0x3059, 0x1E), + (0x3064, 0x00), + (0x3065, 0x04), # pad pull 0 + (ANA_Register_17, 0x00), # Disable internal oscillator + + (0x1012, 0x00), # Sync. shift disable + + (AE_CTRL, 0x01), #Automatic Exposure + (AE_TARGET_MEAN, 0x80), #AE target mean [Def: 0x3C] + (AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] + (CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] + (CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] + (MAX_INTG_H, (HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] + (MAX_INTG_L, (HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] + (MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] + (MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] + (MAX_DGAIN, 0xC0), + + (INTEGRATION_H, 0x01), #Integration H [Def: 0x01] + (INTEGRATION_L, 0x08), #Integration L [Def: 0x08] + (ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] + (DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] + (DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] + (DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] + + (MD_CTRL, 0x00), + (FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH_QVGA >> 8), + (FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH_QVGA & 0xFF), + (LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK_QVGA >> 8), + (LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK_QVGA & 0xFF), + (QVGA_WIN_EN, 0x01), # Enable QVGA window readout + (0x3059, 0x22), # 1-bit mode + (OSC_CLK_DIV, 0x14), + (IMG_ORIENTATION, 0x00), # change the orientation + (0x0104, 0x01), + (MODE_SELECT, 0x01), # Streaming mode + ) + + def __init__( + self, + i2c, + i2c_address = 0x24, + num_data_pins = 1 + ): + super().__init__(i2c, i2c_address, (244, 324)) + + # for i in range(len(self.INIT_COMMANDS)): + # if self.INIT_COMMANDS[i][0] == 0x3059: + # if num_data_pins == 1: + # self.INIT_COMMANDS[i][1] = 0x22 + # elif num_data_pins == 4: + # self.INIT_COMMANDS[i][1] = 0x42 + # else: + # self.INIT_COMMANDS[i][1] = 0x02 + + self.soft_reset() + self.send_init(num_data_pins) + + def is_connected(self): + try: + # Try to read the chip ID + # If it throws an I/O error - the device isn't connected + id = self.getChipID() + + # Confirm the chip ID is correct + if id == 0x01B0: + return True + else: + return False + except: + return False + + def getChipID(self): + """ + Reads the chip ID from the HM01B0 sensor. + Returns: + int: The chip ID as a 16-bit integer. + """ + data = self.readRegister(self.MODEL_ID_H, 2) + return (data[0] << 8) | data[1] + + def soft_reset(self): + """ + Performs a software reset of the HM01B0 sensor. + This resets the sensor to its default state. + """ + self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + + def setMode(self, mode): + """ + Sets the operating mode of the HM01B0 sensor. + Args: + mode (int): The mode to set, e.g., MODE_STREAMING. + """ + self.writeRegister(self.MODE_SELECT, mode) + + def trigger(self): + self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) + + def set_n_frames(self, n_frames): + self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) + + def send_init(self, num_data_pins): + """ + Initializes the HM01B0 sensor with default settings. + This includes setting up exposure, gain, and frame timing. + """ + for reg, value in self.INIT_COMMANDS: + if reg == 0x3059: + # Set the data pin mode based on the number of data pins + if num_data_pins == 1: + value = 0x22 + elif num_data_pins == 4: + value = 0x42 + else: + value = 0x02 + self.writeRegister(reg, value) + sleep_us(1000) + + def read(self, image = None): + """ + Reads a frame from the camera. + Returns: + tuple: (success, frame) + """ + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 26de674..2a30301 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -1,185 +1,7 @@ -import rp2 -from machine import Pin -from ulab import numpy as np -from time import sleep_us -import cv2 - -# Derived from: -# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c -class HM01B0_PIO(): - - # Read only registers - MODEL_ID_H = 0x0000 - MODEL_ID_L = 0x0001 - FRAME_COUNT = 0x0005 - PIXEL_ORDER = 0x0006 - # Sensor mode control - MODE_SELECT = 0x0100 - IMG_ORIENTATION = 0x0101 - SW_RESET = 0x0103 - GRP_PARAM_HOLD = 0x0104 - # Sensor exposure gain control - INTEGRATION_H = 0x0202 - INTEGRATION_L = 0x0203 - ANALOG_GAIN = 0x0205 - DIGITAL_GAIN_H = 0x020E - DIGITAL_GAIN_L = 0x020F - # Frame timing control - FRAME_LEN_LINES_H = 0x0340 - FRAME_LEN_LINES_L = 0x0341 - LINE_LEN_PCK_H = 0x0342 - LINE_LEN_PCK_L = 0x0343 - # Binning mode control - READOUT_X = 0x0383 - READOUT_Y = 0x0387 - BINNING_MODE = 0x0390 - # Test pattern control - TEST_PATTERN_MODE = 0x0601 - # Black level control - BLC_CFG = 0x1000 - BLC_TGT = 0x1003 - BLI_EN = 0x1006 - BLC2_TGT = 0x1007 - # Sensor reserved - DPC_CTRL = 0x1008 - SINGLE_THR_HOT = 0x100B - SINGLE_THR_COLD = 0x100C - # VSYNC,HSYNC and pixel shift register - VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 - # Automatic exposure gain control - AE_CTRL = 0x2100 - AE_TARGET_MEAN = 0x2101 - AE_MIN_MEAN = 0x2102 - CONVERGE_IN_TH = 0x2103 - CONVERGE_OUT_TH = 0x2104 - MAX_INTG_H = 0x2105 - MAX_INTG_L = 0x2106 - MIN_INTG = 0x2107 - MAX_AGAIN_FULL = 0x2108 - MAX_AGAIN_BIN2 = 0x2109 - MIN_AGAIN = 0x210A - MAX_DGAIN = 0x210B - MIN_DGAIN = 0x210C - DAMPING_FACTOR = 0x210D - FS_CTRL = 0x210E - FS_60HZ_H = 0x210F - FS_60HZ_L = 0x2110 - FS_50HZ_H = 0x2111 - FS_50HZ_L = 0x2112 - FS_HYST_TH = 0x2113 - # Motion detection control - MD_CTRL = 0x2150 - I2C_CLEAR = 0x2153 - WMEAN_DIFF_TH_H = 0x2155 - WMEAN_DIFF_TH_M = 0x2156 - WMEAN_DIFF_TH_L = 0x2157 - MD_THH = 0x2158 - MD_THM1 = 0x2159 - MD_THM2 = 0x215A - MD_THL = 0x215B - STATISTIC_CTRL = 0x2000 - MD_LROI_X_START_H = 0x2011 - MD_LROI_X_START_L = 0x2012 - MD_LROI_Y_START_H = 0x2013 - MD_LROI_Y_START_L = 0x2014 - MD_LROI_X_END_H = 0x2015 - MD_LROI_X_END_L = 0x2016 - MD_LROI_Y_END_H = 0x2017 - MD_LROI_Y_END_L = 0x2018 - MD_INTERRUPT = 0x2160 - # Sensor timing control - QVGA_WIN_EN = 0x3010 - SIX_BIT_MODE_EN = 0x3011 - PMU_AUTOSLEEP_FRAMECNT = 0x3020 - ADVANCE_VSYNC = 0x3022 - ADVANCE_HSYNC = 0x3023 - EARLY_GAIN = 0x3035 - # IO and clock control - BIT_CONTROL = 0x3059 - OSC_CLK_DIV = 0x3060 - ANA_Register_11 = 0x3061 - IO_DRIVE_STR = 0x3062 - IO_DRIVE_STR2 = 0x3063 - ANA_Register_14 = 0x3064 - OUTPUT_PIN_STATUS_CONTROL = 0x3065 - ANA_Register_17 = 0x3067 - PCLK_POLARITY = 0x3068 - - # Useful values of Himax registers - HIMAX_RESET = 0x01 - HIMAX_MODE_STANDBY = 0x00 - HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable - HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames - HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger - # HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) - # HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) - - PCLK_RISING_EDGE = 0x00 - PCLK_FALLING_EDGE = 0x01 - AE_CTRL_ENABLE = 0x00 - AE_CTRL_DISABLE = 0x01 - - HIMAX_BOOT_RETRY = 10 - HIMAX_LINE_LEN_PCK_FULL = 0x178 - HIMAX_FRAME_LENGTH_FULL = 0x109 - - HIMAX_LINE_LEN_PCK_QVGA = 0x178 - HIMAX_FRAME_LENGTH_QVGA = 0x104 - - HIMAX_LINE_LEN_PCK_QQVGA = 0x178 - HIMAX_FRAME_LENGTH_QQVGA = 0x084 - - INIT_COMMANDS = ( - (0x3044, 0x0A), # Increase CDS time for settling - (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg - (0x3047, 0x0A), # Increase CDS time for settling - (0x3050, 0xC0), # Make negative offset up to 4x - (0x3051, 0x42), - (0x3052, 0x50), - (0x3053, 0x00), - (0x3054, 0x03), # tuning sf sig clamping as lowest - (0x3055, 0xF7), # tuning dsun - (0x3056, 0xF8), # increase adc nonoverlap clk - (0x3057, 0x29), # increase adc pwr for missing code - (0x3058, 0x1F), # turn on dsun - (0x3059, 0x1E), - (0x3064, 0x00), - (0x3065, 0x04), # pad pull 0 - (ANA_Register_17, 0x00), # Disable internal oscillator - - (0x1012, 0x00), # Sync. shift disable - - (AE_CTRL, 0x01), #Automatic Exposure - (AE_TARGET_MEAN, 0x80), #AE target mean [Def: 0x3C] - (AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] - (CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] - (CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] - (MAX_INTG_H, (HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] - (MAX_INTG_L, (HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] - (MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] - (MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] - (MAX_DGAIN, 0xC0), - - (INTEGRATION_H, 0x01), #Integration H [Def: 0x01] - (INTEGRATION_L, 0x08), #Integration L [Def: 0x08] - (ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] - (DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] - (DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] - (DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] - - (MD_CTRL, 0x00), - (FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH_QVGA >> 8), - (FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH_QVGA & 0xFF), - (LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK_QVGA >> 8), - (LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK_QVGA & 0xFF), - (QVGA_WIN_EN, 0x01), # Enable QVGA window readout - (0x3059, 0x22), # 1-bit mode - (OSC_CLK_DIV, 0x14), - (IMG_ORIENTATION, 0x00), # change the orientation - (0x0104, 0x01), - (MODE_SELECT, 0x01), # Streaming mode - ) +from .hm01b0 import HM01B0 +from .dvp_rp2_pio import DVP_RP2_PIO +class HM01B0_PIO(HM01B0, DVP_RP2_PIO): def __init__( self, i2c, @@ -187,172 +9,17 @@ def __init__( pin_vsync, pin_hsync, pin_pclk, + pin_xclk = None, sm_id = 0, + num_data_pins = 1, i2c_address = 0x24, ): - self.i2c = i2c - self.pin_d0 = pin_d0 - self.pin_vsync = pin_vsync - self.pin_hsync = pin_hsync - self.pin_pclk = pin_pclk - self.sm_id = sm_id - self.i2c_address = i2c_address - self.buffer = np.zeros((244, 324), dtype=np.uint8) - - Pin(pin_d0, Pin.IN) - Pin(pin_vsync, Pin.IN) - Pin(pin_hsync, Pin.IN) - Pin(pin_pclk, Pin.IN) - - self.soft_reset() - self.send_init() - self.start_pio_dma() - - def is_connected(self): - try: - # Try to read the chip ID - # If it throws an I/O error - the device isn't connected - id = self.getChipID() - - # Confirm the chip ID is correct - if id == 0x01B0: - return True - else: - return False - except: - return False - - def getChipID(self): - """ - Reads the chip ID from the HM01B0 sensor. - Returns: - int: The chip ID as a 16-bit integer. - """ - data = self.readRegister(self.MODEL_ID_H, 2) - return (data[0] << 8) | data[1] - - def soft_reset(self): - """ - Performs a software reset of the HM01B0 sensor. - This resets the sensor to its default state. - """ - self.writeRegister(self.SW_RESET, self.HIMAX_RESET) - - def setMode(self, mode): - """ - Sets the operating mode of the HM01B0 sensor. - Args: - mode (int): The mode to set, e.g., MODE_STREAMING. - """ - self.writeRegister(self.MODE_SELECT, mode) - - def trigger(self): - self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) - - def set_n_frames(self, n_frames): - self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) - - def send_init(self): - """ - Initializes the HM01B0 sensor with default settings. - This includes setting up exposure, gain, and frame timing. - """ - for reg, value in self.INIT_COMMANDS: - self.writeRegister(reg, value) - sleep_us(1000) - - def readRegister(self, reg, nbytes=1): - self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF])) - return self.i2c.readfrom(self.i2c_address, nbytes) - - def writeRegister(self, reg, data): - if isinstance(data, int): - data = bytes([data]) - elif isinstance(data, (list, tuple)): - data = bytes(data) - self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) - - def start_pio_dma(self): - program = self._pio_read_dvp - program[0][0] |= self.pin_hsync & 0x1F - program[0][1] |= self.pin_pclk & 0x1F - program[0][3] |= self.pin_pclk & 0x1F - self.sm = rp2.StateMachine( - self.sm_id, - program, - in_base = self.pin_d0 - ) - self.sm.active(1) - - self.dma = rp2.DMA() - req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 - dma_ctrl = self.dma.pack_ctrl( - size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit - inc_read = False, - treq_sel = req_num, - bswap = True - # irq_quiet = False - ) - self.dma.config( - read = self.sm, - count = 244 * 324 // 4, - ctrl = dma_ctrl - ) - - def active(self, active = None): - if active == None: - return self.sm.active() - - self.sm.active(active) - - if active: - Pin(self.pin_vsync).irq( - trigger = Pin.IRQ_FALLING, - handler = lambda pin: self._vsync_handler() - ) - else: - Pin(self.pin_vsync).irq( - handler = None - ) + # Call both parent constructors + HM01B0.__init__(self, i2c, i2c_address, num_data_pins) + DVP_RP2_PIO.__init__(self, pin_d0, pin_vsync, pin_hsync, pin_pclk, pin_xclk, sm_id, num_data_pins) def open(self): self.active(True) def release(self): self.active(False) - - def read(self, image = None): - """ - Reads a frame from the camera. - Returns: - tuple: (success, frame) - """ - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) - - def _vsync_handler(self): - # Disable DMA before reconfiguring it - self.dma.active(False) - - # Reset state machine to ensure ISR is cleared - self.sm.restart() - - # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) - while self.sm.rx_fifo() > 0: - self.sm.get() - - # Reset the DMA write address - self.dma.write = self.buffer - - # Start the DMA - self.dma.active(True) - - @rp2.asm_pio( - in_shiftdir = rp2.PIO.SHIFT_LEFT, - push_thresh = 32, - autopush = True - ) - def _pio_read_dvp(): - wait(1, gpio, 0) # Mask in HSYNC pin - wait(1, gpio, 0) # Mask in PCLK pin - in_(pins, 1) # Mask in number of pins - wait(0, gpio, 0) # Mask in PCLK pin From 07fa6632559ef8d88aa5efe6c34d3f3d74df6fa1 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 10:52:51 -0600 Subject: [PATCH 086/158] Add initial OV5640 PIO driver (broken) Not fully functioning, DMA seems to not be transferring fast enough??? --- cv2_drivers/cameras/__init__.py | 3 +- cv2_drivers/cameras/ov5640.py | 1124 +++++++++++++++++++++++++++++ cv2_drivers/cameras/ov5640_pio.py | 25 + 3 files changed, 1151 insertions(+), 1 deletion(-) create mode 100644 cv2_drivers/cameras/ov5640.py create mode 100644 cv2_drivers/cameras/ov5640_pio.py diff --git a/cv2_drivers/cameras/__init__.py b/cv2_drivers/cameras/__init__.py index 293266b..f52e57c 100644 --- a/cv2_drivers/cameras/__init__.py +++ b/cv2_drivers/cameras/__init__.py @@ -1 +1,2 @@ -from . import hm01b0_pio \ No newline at end of file +from . import hm01b0_pio +from . import ov5640_pio \ No newline at end of file diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py new file mode 100644 index 0000000..5d5d828 --- /dev/null +++ b/cv2_drivers/cameras/ov5640.py @@ -0,0 +1,1124 @@ +from .dvp_camera import DVP_Camera +from time import sleep_us +import cv2 +# from micropython import const + +# Derived from: +# https://github.com/adafruit/Adafruit_CircuitPython_OV5640 +class OV5640(DVP_Camera): + + OV5640_COLOR_RGB = 0 + OV5640_COLOR_YUV = 1 + OV5640_COLOR_GRAYSCALE = 2 + OV5640_COLOR_JPEG = 3 + + # fmt: off + + _SYSTEM_RESET00 = 0x3000 # Reset for Individual Block + # (0: enable block; 1: reset block) + # Bit[7]: Reset BIST + # Bit[6]: Reset MCU program memory + # Bit[5]: Reset MCU + # Bit[4]: Reset OTP + # Bit[3]: Reset STB + # Bit[2]: Reset d5060 + # Bit[1]: Reset timing control + # Bit[0]: Reset array control + + _SYSTEM_RESET02 = 0x3002 # Reset for Individual Block + # (0: enable block; 1: reset block) + # Bit[7]: Reset VFIFO + # Bit[5]: Reset format + # Bit[4]: Reset JFIFO + # Bit[3]: Reset SFIFO + # Bit[2]: Reset JPG + # Bit[1]: Reset format MUX + # Bit[0]: Reset average + + _CLOCK_ENABLE02 = 0x3006 # Clock Enable Control + # (0: disable clock; 1: enable clock) + # Bit[7]: Enable PSRAM clock + # Bit[6]: Enable FMT clock + # Bit[5]: Enable JPEG 2x clock + # Bit[3]: Enable JPEG clock + # Bit[1]: Enable format MUX clock + # Bit[0]: Enable average clock + + _SYSTEM_CTROL0 = 0x3008 + # Bit[7]: Software reset + # Bit[6]: Software power down + # Bit[5]: Reserved + # Bit[4]: SRB clock SYNC enable + # Bit[3]: Isolation suspend select + # Bit[2:0]: Not used + + _CHIP_ID_HIGH = 0x300A + + _DRIVE_CAPABILITY = 0x302C + # Bit[7:6]: + # 00: 1x + # 01: 2x + # 10: 3x + # 11: 4x + + _SC_PLLS_CTRL0 = 0x303A + # Bit[7]: PLLS bypass + _SC_PLLS_CTRL1 = 0x303B + # Bit[4:0]: PLLS multiplier + _SC_PLLS_CTRL2 = 0x303C + # Bit[6:4]: PLLS charge pump control + # Bit[3:0]: PLLS system divider + _SC_PLLS_CTRL3 = 0x303D + # Bit[5:4]: PLLS pre-divider + # 00: 1 + # 01: 1.5 + # 10: 2 + # 11: 3 + # Bit[2]: PLLS root-divider - 1 + # Bit[1:0]: PLLS seld5 + # 00: 1 + # 01: 1 + # 10: 2 + # 11: 2.5 + + # AEC/AGC control functions + _AEC_PK_MANUAL = 0x3503 + # AEC Manual Mode Control + # Bit[7:6]: Reserved + # Bit[5]: Gain delay option + # Valid when 0x3503[4]=1’b0 + # 0: Delay one frame latch + # 1: One frame latch + # Bit[4:2]: Reserved + # Bit[1]: AGC manual + # 0: Auto enable + # 1: Manual enable + # Bit[0]: AEC manual + # 0: Auto enable + # 1: Manual enable + + # gain = {0x350A[1:0], 0x350B[7:0]} / 16 + + + _X_ADDR_ST_H = 0x3800 + # Bit[3:0]: X address start[11:8] + _X_ADDR_ST_L = 0x3801 + # Bit[7:0]: X address start[7:0] + _Y_ADDR_ST_H = 0x3802 + # Bit[2:0]: Y address start[10:8] + _Y_ADDR_ST_L = 0x3803 + # Bit[7:0]: Y address start[7:0] + _X_ADDR_END_H = 0x3804 + # Bit[3:0]: X address end[11:8] + _X_ADDR_END_L = 0x3805 + # Bit[7:0]: + _Y_ADDR_END_H = 0x3806 + # Bit[2:0]: Y address end[10:8] + _Y_ADDR_END_L = 0x3807 + # Bit[7:0]: + # Size after scaling + _X_OUTPUT_SIZE_H = 0x3808 + # Bit[3:0]: DVP output horizontal width[11:8] + _X_OUTPUT_SIZE_L = 0x3809 + # Bit[7:0]: + _Y_OUTPUT_SIZE_H = 0x380A + # Bit[2:0]: DVP output vertical height[10:8] + _Y_OUTPUT_SIZE_L = 0x380B + # Bit[7:0]: + _X_TOTAL_SIZE_H = 0x380C + # Bit[3:0]: Total horizontal size[11:8] + _X_TOTAL_SIZE_L = 0x380D + # Bit[7:0]: + _Y_TOTAL_SIZE_H = 0x380E + # Bit[7:0]: Total vertical size[15:8] + _Y_TOTAL_SIZE_L = 0x380F + # Bit[7:0]: + _X_OFFSET_H = 0x3810 + # Bit[3:0]: ISP horizontal offset[11:8] + _X_OFFSET_L = 0x3811 + # Bit[7:0]: + _Y_OFFSET_H = 0x3812 + # Bit[2:0]: ISP vertical offset[10:8] + _Y_OFFSET_L = 0x3813 + # Bit[7:0]: + _X_INCREMENT = 0x3814 + # Bit[7:4]: Horizontal odd subsample increment + # Bit[3:0]: Horizontal even subsample increment + _Y_INCREMENT = 0x3815 + # Bit[7:4]: Vertical odd subsample increment + # Bit[3:0]: Vertical even subsample increment + # Size before scaling + # X_INPUT_SIZE = (X_ADDR_END - X_ADDR_ST + 1 - (2 * X_OFFSET)) + # Y_INPUT_SIZE = (Y_ADDR_END - Y_ADDR_ST + 1 - (2 * Y_OFFSET)) + + # mirror and flip registers + _TIMING_TC_REG20 = 0x3820 + # Timing Control Register + # Bit[2:1]: Vertical flip enable + # 00: Normal + # 11: Vertical flip + # Bit[0]: Vertical binning enable + _TIMING_TC_REG21 = 0x3821 + # Timing Control Register + # Bit[5]: Compression Enable + # Bit[2:1]: Horizontal mirror enable + # 00: Normal + # 11: Horizontal mirror + # Bit[0]: Horizontal binning enable + + _PCLK_RATIO = 0x3824 + # Bit[4:0]: PCLK ratio manual + + # frame control registers + _FRAME_CTRL01 = 0x4201 + # Control Passed Frame Number When both ON and OFF number set to 0x00,frame + # control is in bypass mode + # Bit[7:4]: Not used + # Bit[3:0]: Frame ON number + _FRAME_CTRL02 = 0x4202 + # Control Masked Frame Number When both ON and OFF number set to 0x00,frame + # control is in bypass mode + # Bit[7:4]: Not used + # BIT[3:0]: Frame OFF number + + # format control registers + _FORMAT_CTRL00 = 0x4300 + + _CLOCK_POL_CONTROL = 0x4740 + # Bit[5]: PCLK polarity 0: active low + # 1: active high + # Bit[3]: Gate PCLK under VSYNC + # Bit[2]: Gate PCLK under HREF + # Bit[1]: HREF polarity + # 0: active low + # 1: active high + # Bit[0] VSYNC polarity + # 0: active low + # 1: active high + + _ISP_CONTROL_01 = 0x5001 + # Bit[5]: Scale enable + # 0: Disable + # 1: Enable + + # output format control registers + _FORMAT_CTRL = 0x501F + # Format select + # Bit[2:0]: + # 000: YUV422 + # 001: RGB + # 010: Dither + # 011: RAW after DPC + # 101: RAW after CIP + + # ISP top control registers + _PRE_ISP_TEST_SETTING_1 = 0x503D + # Bit[7]: Test enable + # 0: Test disable + # 1: Color bar enable + # Bit[6]: Rolling + # Bit[5]: Transparent + # Bit[4]: Square black and white + # Bit[3:2]: Color bar style + # 00: Standard 8 color bar + # 01: Gradual change at vertical mode 1 + # 10: Gradual change at horizontal + # 11: Gradual change at vertical mode 2 + # Bit[1:0]: Test select + # 00: Color bar + # 01: Random data + # 10: Square data + # 11: Black image + + # exposure = {0x3500[3:0], 0x3501[7:0], 0x3502[7:0]} / 16 × tROW + + _SCALE_CTRL_1 = 0x5601 + # Bit[6:4]: HDIV RW + # DCW scale times + # 000: DCW 1 time + # 001: DCW 2 times + # 010: DCW 4 times + # 100: DCW 8 times + # 101: DCW 16 times + # Others: DCW 16 times + # Bit[2:0]: VDIV RW + # DCW scale times + # 000: DCW 1 time + # 001: DCW 2 times + # 010: DCW 4 times + # 100: DCW 8 times + # 101: DCW 16 times + # Others: DCW 16 times + + _SCALE_CTRL_2 = 0x5602 + # X_SCALE High Bits + _SCALE_CTRL_3 = 0x5603 + # X_SCALE Low Bits + _SCALE_CTRL_4 = 0x5604 + # Y_SCALE High Bits + _SCALE_CTRL_5 = 0x5605 + # Y_SCALE Low Bits + _SCALE_CTRL_6 = 0x5606 + # Bit[3:0]: V Offset + + _VFIFO_CTRL0C = 0x460C + # Bit[1]: PCLK manual enable + # 0: Auto + # 1: Manual by PCLK_RATIO + + _VFIFO_X_SIZE_H = 0x4602 + _VFIFO_X_SIZE_L = 0x4603 + _VFIFO_Y_SIZE_H = 0x4604 + _VFIFO_Y_SIZE_L = 0x4605 + + _COMPRESSION_CTRL00 = 0x4400 + _COMPRESSION_CTRL01 = 0x4401 + _COMPRESSION_CTRL02 = 0x4402 + _COMPRESSION_CTRL03 = 0x4403 + _COMPRESSION_CTRL04 = 0x4404 + _COMPRESSION_CTRL05 = 0x4405 + _COMPRESSION_CTRL06 = 0x4406 + _COMPRESSION_CTRL07 = 0x4407 + # Bit[5:0]: QS + _COMPRESSION_ISI_CTRL = 0x4408 + _COMPRESSION_CTRL09 = 0x4409 + _COMPRESSION_CTRL0A = 0x440A + _COMPRESSION_CTRL0B = 0x440B + _COMPRESSION_CTRL0C = 0x440C + _COMPRESSION_CTRL0D = 0x440D + _COMPRESSION_CTRL0E = 0x440E + + _TEST_COLOR_BAR = 0xC0 + # Enable Color Bar roling Test + + _AEC_PK_MANUAL_AGC_MANUALEN = 0x02 + # Enable AGC Manual enable + _AEC_PK_MANUAL_AEC_MANUALEN = 0x01 + # Enable AEC Manual enable + + _TIMING_TC_REG20_VFLIP = 0x06 + # Vertical flip enable + _TIMING_TC_REG21_HMIRROR = 0x06 + # Horizontal mirror enable + + OV5640_SIZE_96X96 = 0 # 96x96 + OV5640_SIZE_QQVGA = 1 # 160x120 + OV5640_SIZE_QCIF = 2 # 176x144 + OV5640_SIZE_HQVGA = 3 # 240x176 + OV5640_SIZE_240X240 = 4 # 240x240 + OV5640_SIZE_QVGA = 5 # 320x240 + OV5640_SIZE_CIF = 6 # 400x296 + OV5640_SIZE_HVGA = 7 # 480x320 + OV5640_SIZE_VGA = 8 # 640x480 + OV5640_SIZE_SVGA = 9 # 800x600 + OV5640_SIZE_XGA = 10 # 1024x768 + OV5640_SIZE_HD = 11 # 1280x720 + OV5640_SIZE_SXGA = 12 # 1280x1024 + OV5640_SIZE_UXGA = 13 # 1600x1200 + OV5640_SIZE_QHDA = 14 # 2560x1440 + OV5640_SIZE_WQXGA = 15 # 2560x1600 + OV5640_SIZE_PFHD = 16 # 1088x1920 + OV5640_SIZE_QSXGA = 17 # 2560x1920 + + _ASPECT_RATIO_4X3 = 0 + _ASPECT_RATIO_3X2 = 1 + _ASPECT_RATIO_16X10 = 2 + _ASPECT_RATIO_5X3 = 3 + _ASPECT_RATIO_16X9 = 4 + _ASPECT_RATIO_21X9 = 5 + _ASPECT_RATIO_5X4 = 6 + _ASPECT_RATIO_1X1 = 7 + _ASPECT_RATIO_9X16 = 8 + + _resolution_info = [ + [96, 96, _ASPECT_RATIO_1X1], # 96x96 + [160, 120, _ASPECT_RATIO_4X3], # QQVGA + [176, 144, _ASPECT_RATIO_5X4], # QCIF + [240, 176, _ASPECT_RATIO_4X3], # HQVGA + [240, 240, _ASPECT_RATIO_1X1], # 240x240 + [320, 240, _ASPECT_RATIO_4X3], # QVGA + [400, 296, _ASPECT_RATIO_4X3], # CIF + [480, 320, _ASPECT_RATIO_3X2], # HVGA + [640, 480, _ASPECT_RATIO_4X3], # VGA + [800, 600, _ASPECT_RATIO_4X3], # SVGA + [1024, 768, _ASPECT_RATIO_4X3], # XGA + [1280, 720, _ASPECT_RATIO_16X9], # HD + [1280, 1024, _ASPECT_RATIO_5X4], # SXGA + [1600, 1200, _ASPECT_RATIO_4X3], # UXGA + [2560, 1440, _ASPECT_RATIO_16X9], # QHD + [2560, 1600, _ASPECT_RATIO_16X10], # WQXGA + [1088, 1920, _ASPECT_RATIO_9X16], # Portrait FHD + [2560, 1920, _ASPECT_RATIO_4X3], # QSXGA + + ] + + + _ratio_table = [ + # mw, mh, sx, sy, ex, ey, ox, oy, tx, ty + [2560, 1920, 0, 0, 2623, 1951, 32, 16, 2844, 1968], # 4x3 + [2560, 1704, 0, 110, 2623, 1843, 32, 16, 2844, 1752], # 3x2 + [2560, 1600, 0, 160, 2623, 1791, 32, 16, 2844, 1648], # 16x10 + [2560, 1536, 0, 192, 2623, 1759, 32, 16, 2844, 1584], # 5x3 + [2560, 1440, 0, 240, 2623, 1711, 32, 16, 2844, 1488], # 16x9 + [2560, 1080, 0, 420, 2623, 1531, 32, 16, 2844, 1128], # 21x9 + [2400, 1920, 80, 0, 2543, 1951, 32, 16, 2684, 1968], # 5x4 + [1920, 1920, 320, 0, 2543, 1951, 32, 16, 2684, 1968], # 1x1 + [1088, 1920, 736, 0, 1887, 1951, 32, 16, 1884, 1968], # 9x16 + ] + + _pll_pre_div2x_factors = [1, 1, 2, 3, 4, 1.5, 6, 2.5, 8] + _pll_pclk_root_div_factors = [1,2,4,8] + + _REG_DLY = 0xFFFF + _REGLIST_TAIL = 0x0000 + + _OV5640_STAT_FIRMWAREBAD = 0x7F + _OV5640_STAT_STARTUP = 0x7E + _OV5640_STAT_IDLE = 0x70 + _OV5640_STAT_FOCUSING = 0x00 + _OV5640_STAT_FOCUSED = 0x10 + + _OV5640_CMD_TRIGGER_AUTOFOCUS = 0x03 + _OV5640_CMD_AUTO_AUTOFOCUS = 0x04 + _OV5640_CMD_RELEASE_FOCUS = 0x08 + _OV5640_CMD_AF_SET_VCM_STEP = 0x1A + _OV5640_CMD_AF_GET_VCM_STEP = 0x1B + + _OV5640_CMD_MAIN = 0x3022 + _OV5640_CMD_ACK = 0x3023 + _OV5640_CMD_PARA0 = 0x3024 + _OV5640_CMD_PARA1 = 0x3025 + _OV5640_CMD_PARA2 = 0x3026 + _OV5640_CMD_PARA3 = 0x3027 + _OV5640_CMD_PARA4 = 0x3028 + _OV5640_CMD_FW_STATUS = 0x3029 + + + _sensor_default_regs = [ + _SYSTEM_CTROL0, 0x82, # software reset + _REG_DLY, 10, # delay 10ms + _SYSTEM_CTROL0, 0x42, # power down + # enable pll + 0x3103, 0x13, + # io direction + 0x3017, 0xFF, + 0x3018, 0xFF, + _DRIVE_CAPABILITY, 0xC3, + _CLOCK_POL_CONTROL, 0x21, + 0x4713, 0x02, # jpg mode select + _ISP_CONTROL_01, 0x83, # turn color matrix, awb and SDE + # sys reset + _SYSTEM_RESET00, 0x00, # enable all blocks + _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg + # clock enable + 0x3004, 0xFF, + _CLOCK_ENABLE02, 0xC3, + # isp control + 0x5000, 0xA7, + _ISP_CONTROL_01, 0xA3, # +scaling? + 0x5003, 0x08, # special_effect + # unknown + 0x370C, 0x02, #!!IMPORTANT + 0x3634, 0x40, #!!IMPORTANT + # AEC/AGC + 0x3A02, 0x03, + 0x3A03, 0xD8, + 0x3A08, 0x01, + 0x3A09, 0x27, + 0x3A0A, 0x00, + 0x3A0B, 0xF6, + 0x3A0D, 0x04, + 0x3A0E, 0x03, + 0x3A0F, 0x30, # ae_level + 0x3A10, 0x28, # ae_level + 0x3A11, 0x60, # ae_level + 0x3A13, 0x43, + 0x3A14, 0x03, + 0x3A15, 0xD8, + 0x3A18, 0x00, # gainceiling + 0x3A19, 0xF8, # gainceiling + 0x3A1B, 0x30, # ae_level + 0x3A1E, 0x26, # ae_level + 0x3A1F, 0x14, # ae_level + # vcm debug + 0x3600, 0x08, + 0x3601, 0x33, + # 50/60Hz + 0x3C01, 0xA4, + 0x3C04, 0x28, + 0x3C05, 0x98, + 0x3C06, 0x00, + 0x3C07, 0x08, + 0x3C08, 0x00, + 0x3C09, 0x1C, + 0x3C0A, 0x9C, + 0x3C0B, 0x40, + 0x460C, 0x22, # disable jpeg footer + # BLC + 0x4001, 0x02, + 0x4004, 0x02, + # AWB + 0x5180, 0xFF, + 0x5181, 0xF2, + 0x5182, 0x00, + 0x5183, 0x14, + 0x5184, 0x25, + 0x5185, 0x24, + 0x5186, 0x09, + 0x5187, 0x09, + 0x5188, 0x09, + 0x5189, 0x75, + 0x518A, 0x54, + 0x518B, 0xE0, + 0x518C, 0xB2, + 0x518D, 0x42, + 0x518E, 0x3D, + 0x518F, 0x56, + 0x5190, 0x46, + 0x5191, 0xF8, + 0x5192, 0x04, + 0x5193, 0x70, + 0x5194, 0xF0, + 0x5195, 0xF0, + 0x5196, 0x03, + 0x5197, 0x01, + 0x5198, 0x04, + 0x5199, 0x12, + 0x519A, 0x04, + 0x519B, 0x00, + 0x519C, 0x06, + 0x519D, 0x82, + 0x519E, 0x38, + # color matrix (Saturation) + 0x5381, 0x1E, + 0x5382, 0x5B, + 0x5383, 0x08, + 0x5384, 0x0A, + 0x5385, 0x7E, + 0x5386, 0x88, + 0x5387, 0x7C, + 0x5388, 0x6C, + 0x5389, 0x10, + 0x538A, 0x01, + 0x538B, 0x98, + # CIP control (Sharpness) + 0x5300, 0x10, # sharpness + 0x5301, 0x10, # sharpness + 0x5302, 0x18, # sharpness + 0x5303, 0x19, # sharpness + 0x5304, 0x10, + 0x5305, 0x10, + 0x5306, 0x08, # denoise + 0x5307, 0x16, + 0x5308, 0x40, + 0x5309, 0x10, # sharpness + 0x530A, 0x10, # sharpness + 0x530B, 0x04, # sharpness + 0x530C, 0x06, # sharpness + # GAMMA + 0x5480, 0x01, + 0x5481, 0x00, + 0x5482, 0x1E, + 0x5483, 0x3B, + 0x5484, 0x58, + 0x5485, 0x66, + 0x5486, 0x71, + 0x5487, 0x7D, + 0x5488, 0x83, + 0x5489, 0x8F, + 0x548A, 0x98, + 0x548B, 0xA6, + 0x548C, 0xB8, + 0x548D, 0xCA, + 0x548E, 0xD7, + 0x548F, 0xE3, + 0x5490, 0x1D, + # Special Digital Effects (SDE) (UV adjust) + 0x5580, 0x06, # enable brightness and contrast + 0x5583, 0x40, # special_effect + 0x5584, 0x10, # special_effect + 0x5586, 0x20, # contrast + 0x5587, 0x00, # brightness + 0x5588, 0x00, # brightness + 0x5589, 0x10, + 0x558A, 0x00, + 0x558B, 0xF8, + 0x501D, 0x40, # enable manual offset of contrast + # power on + 0x3008, 0x02, + # 50Hz + 0x3C00, 0x04, + #_REG_DLY, 300, + ] + + + + _reset_awb = [ + _ISP_CONTROL_01, 0x83, # turn color matrix, awb and SDE + # sys reset + _SYSTEM_RESET00, 0x00, # enable all blocks + _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg + # clock enable + #0x3004, 0xFF, + #_CLOCK_ENABLE02, 0xC3, + # isp control + 0x5000, 0xA7, + _ISP_CONTROL_01, 0xA3, # +scaling? + 0x5003, 0x08, # special_effect + # unknown + 0x370C, 0x02, #!!IMPORTANT + 0x3634, 0x40, #!!IMPORTANT + # AEC/AGC + 0x3A02, 0x03, + 0x3A03, 0xD8, + 0x3A08, 0x01, + 0x3A09, 0x27, + 0x3A0A, 0x00, + 0x3A0B, 0xF6, + 0x3A0D, 0x04, + 0x3A0E, 0x03, + 0x3A0F, 0x30, # ae_level + 0x3A10, 0x28, # ae_level + 0x3A11, 0x60, # ae_level + 0x3A13, 0x43, + 0x3A14, 0x03, + 0x3A15, 0xD8, + 0x3A18, 0x00, # gainceiling + 0x3A19, 0xF8, # gainceiling + 0x3A1B, 0x30, # ae_level + 0x3A1E, 0x26, # ae_level + 0x3A1F, 0x14, # ae_level + # vcm debug + 0x3600, 0x08, + 0x3601, 0x33, + # 50/60Hz + 0x3C01, 0xA4, + 0x3C04, 0x28, + 0x3C05, 0x98, + 0x3C06, 0x00, + 0x3C07, 0x08, + 0x3C08, 0x00, + 0x3C09, 0x1C, + 0x3C0A, 0x9C, + 0x3C0B, 0x40, + 0x460C, 0x22, # disable jpeg footer + # BLC + 0x4001, 0x02, + 0x4004, 0x02, + # AWB + 0x5180, 0xFF, + 0x5181, 0xF2, + 0x5182, 0x00, + 0x5183, 0x14, + 0x5184, 0x25, + 0x5185, 0x24, + 0x5186, 0x09, + 0x5187, 0x09, + 0x5188, 0x09, + 0x5189, 0x75, + 0x518A, 0x54, + 0x518B, 0xE0, + 0x518C, 0xB2, + 0x518D, 0x42, + 0x518E, 0x3D, + 0x518F, 0x56, + 0x5190, 0x46, + 0x5191, 0xF8, + 0x5192, 0x04, + 0x5193, 0x70, + 0x5194, 0xF0, + 0x5195, 0xF0, + 0x5196, 0x03, + 0x5197, 0x01, + 0x5198, 0x04, + 0x5199, 0x12, + 0x519A, 0x04, + 0x519B, 0x00, + 0x519C, 0x06, + 0x519D, 0x82, + 0x519E, 0x38, + # color matrix (Saturation) + 0x5381, 0x1E, + 0x5382, 0x5B, + 0x5383, 0x08, + 0x5384, 0x0A, + 0x5385, 0x7E, + 0x5386, 0x88, + 0x5387, 0x7C, + 0x5388, 0x6C, + 0x5389, 0x10, + 0x538A, 0x01, + 0x538B, 0x98, + # CIP control (Sharpness) + 0x5300, 0x10, # sharpness + 0x5301, 0x10, # sharpness + 0x5302, 0x18, # sharpness + 0x5303, 0x19, # sharpness + 0x5304, 0x10, + 0x5305, 0x10, + 0x5306, 0x08, # denoise + 0x5307, 0x16, + 0x5308, 0x40, + 0x5309, 0x10, # sharpness + 0x530A, 0x10, # sharpness + 0x530B, 0x04, # sharpness + 0x530C, 0x06, # sharpness + # GAMMA + 0x5480, 0x01, + 0x5481, 0x00, + 0x5482, 0x1E, + 0x5483, 0x3B, + 0x5484, 0x58, + 0x5485, 0x66, + 0x5486, 0x71, + 0x5487, 0x7D, + 0x5488, 0x83, + 0x5489, 0x8F, + 0x548A, 0x98, + 0x548B, 0xA6, + 0x548C, 0xB8, + 0x548D, 0xCA, + 0x548E, 0xD7, + 0x548F, 0xE3, + 0x5490, 0x1D, + # Special Digital Effects (SDE) (UV adjust) + 0x5580, 0x06, # enable brightness and contrast + 0x5583, 0x40, # special_effect + 0x5584, 0x10, # special_effect + 0x5586, 0x20, # contrast + 0x5587, 0x00, # brightness + 0x5588, 0x00, # brightness + 0x5589, 0x10, + 0x558A, 0x00, + 0x558B, 0xF8, + 0x501D, 0x40, # enable manual offset of contrast + ] + _sensor_format_jpeg = [ + _FORMAT_CTRL, 0x00, # YUV422 + _FORMAT_CTRL00, 0x30, # YUYV + _SYSTEM_RESET02, 0x00, # enable everything + _CLOCK_ENABLE02, 0xFF, # enable all clocks + 0x471C, 0x50, # 0xd0 to 0x50 !!! + ] + + _sensor_format_raw = [ + _FORMAT_CTRL, 0x03, # RAW (DPC) + _FORMAT_CTRL00, 0x00, # RAW + ] + + _sensor_format_grayscale = [ + _FORMAT_CTRL, 0x00, # YUV422 + _FORMAT_CTRL00, 0x10, # Y8 + ] + + _sensor_format_yuv422 = [ + _FORMAT_CTRL, 0x00, # YUV422 + _FORMAT_CTRL00, 0x30, # YUYV + ] + + _sensor_format_rgb565 = [ + _FORMAT_CTRL, 0x01, # RGB + _FORMAT_CTRL00, 0x61, # RGB565 (BGR) + _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg + _CLOCK_ENABLE02, 0xC3, # reset to how it was before (no jpg clock) + + ] + + _ov5640_color_settings = { + OV5640_COLOR_RGB: _sensor_format_rgb565, + OV5640_COLOR_YUV: _sensor_format_yuv422, + OV5640_COLOR_GRAYSCALE: _sensor_format_grayscale, + OV5640_COLOR_JPEG: _sensor_format_jpeg, + } + + _contrast_settings = [ + [0x20, 0x00], # 0 + [0x24, 0x10], # +1 + [0x28, 0x18], # +2 + [0x2c, 0x1c], # +3 + [0x14, 0x14], # -3 + [0x18, 0x18], # -2 + [0x1c, 0x1c], # -1 + ] + + _sensor_saturation_levels = [ + [0x1D, 0x60, 0x03, 0x0C, 0x78, 0x84, 0x7D, 0x6B, 0x12, 0x01, 0x98], # 0 + [0x1D, 0x60, 0x03, 0x0D, 0x84, 0x91, 0x8A, 0x76, 0x14, 0x01, 0x98], # +1 + [0x1D, 0x60, 0x03, 0x0E, 0x90, 0x9E, 0x96, 0x80, 0x16, 0x01, 0x98], # +2 + [0x1D, 0x60, 0x03, 0x10, 0x9C, 0xAC, 0xA2, 0x8B, 0x17, 0x01, 0x98], # +3 + [0x1D, 0x60, 0x03, 0x11, 0xA8, 0xB9, 0xAF, 0x96, 0x19, 0x01, 0x98], # +4 + [0x1D, 0x60, 0x03, 0x07, 0x48, 0x4F, 0x4B, 0x40, 0x0B, 0x01, 0x98], # -4 + [0x1D, 0x60, 0x03, 0x08, 0x54, 0x5C, 0x58, 0x4B, 0x0D, 0x01, 0x98], # -3 + [0x1D, 0x60, 0x03, 0x0A, 0x60, 0x6A, 0x64, 0x56, 0x0E, 0x01, 0x98], # -2 + [0x1D, 0x60, 0x03, 0x0B, 0x6C, 0x77, 0x70, 0x60, 0x10, 0x01, 0x98], # -1 + ] + + _sensor_ev_levels = [ + [0x38, 0x30, 0x61, 0x38, 0x30, 0x10], # 0 + [0x40, 0x38, 0x71, 0x40, 0x38, 0x10], # +1 + [0x50, 0x48, 0x90, 0x50, 0x48, 0x20], # +2 + [0x60, 0x58, 0xa0, 0x60, 0x58, 0x20], # +3 + [0x10, 0x08, 0x10, 0x08, 0x20, 0x10], # -3 + [0x20, 0x18, 0x41, 0x20, 0x18, 0x10], # -2 + [0x30, 0x28, 0x61, 0x30, 0x28, 0x10], # -1 + ] + + OV5640_WHITE_BALANCE_AUTO = 0 + OV5640_WHITE_BALANCE_SUNNY = 1 + OV5640_WHITE_BALANCE_FLUORESCENT = 2 + OV5640_WHITE_BALANCE_CLOUDY = 3 + OV5640_WHITE_BALANCE_INCANDESCENT = 4 + + _light_registers = [0x3406, 0x3400, 0x3401, 0x3402, 0x3403, 0x3404, 0x3405] + _light_modes = [ + [0x00, 0x04, 0x00, 0x04, 0x00, 0x04, 0x00], # auto + [0x01, 0x06, 0x1c, 0x04, 0x00, 0x04, 0xf3], # sunny + [0x01, 0x05, 0x48, 0x04, 0x00, 0x07, 0xcf], # office / fluorescent + [0x01, 0x06, 0x48, 0x04, 0x00, 0x04, 0xd3], # cloudy + [0x01, 0x04, 0x10, 0x04, 0x00, 0x08, 0x40], # home / incandescent + + ] + + OV5640_SPECIAL_EFFECT_NONE = 0 + OV5640_SPECIAL_EFFECT_NEGATIVE = 1 + OV5640_SPECIAL_EFFECT_GRAYSCALE = 2 + OV5640_SPECIAL_EFFECT_RED_TINT = 3 + OV5640_SPECIAL_EFFECT_GREEN_TINT = 4 + OV5640_SPECIAL_EFFECT_BLUE_TINT = 5 + OV5640_SPECIAL_EFFECT_SEPIA = 6 + + _sensor_special_effects = [ + [0x06, 0x40, 0x10, 0x08], # Normal + [0x46, 0x40, 0x28, 0x08], # Negative + [0x1E, 0x80, 0x80, 0x08], # Grayscale + [0x1E, 0x80, 0xC0, 0x08], # Red Tint + [0x1E, 0x60, 0x60, 0x08], # Green Tint + [0x1E, 0xA0, 0x40, 0x08], # Blue Tint + [0x1E, 0x40, 0xA0, 0x08], # Sepia + ] + + _sensor_regs_gamma0 = [ + 0x5480, 0x01, + 0x5481, 0x08, + 0x5482, 0x14, + 0x5483, 0x28, + 0x5484, 0x51, + 0x5485, 0x65, + 0x5486, 0x71, + 0x5487, 0x7D, + 0x5488, 0x87, + 0x5489, 0x91, + 0x548A, 0x9A, + 0x548B, 0xAA, + 0x548C, 0xB8, + 0x548D, 0xCD, + 0x548E, 0xDD, + 0x548F, 0xEA, + 0x5490, 0x1D, + ] + + sensor_regs_gamma1 = [ + 0x5480, 0x1, + 0x5481, 0x0, + 0x5482, 0x1E, + 0x5483, 0x3B, + 0x5484, 0x58, + 0x5485, 0x66, + 0x5486, 0x71, + 0x5487, 0x7D, + 0x5488, 0x83, + 0x5489, 0x8F, + 0x548A, 0x98, + 0x548B, 0xA6, + 0x548C, 0xB8, + 0x548D, 0xCA, + 0x548E, 0xD7, + 0x548F, 0xE3, + 0x5490, 0x1D, + ] + + sensor_regs_awb0 = [ + 0x5180, 0xFF, + 0x5181, 0xF2, + 0x5182, 0x00, + 0x5183, 0x14, + 0x5184, 0x25, + 0x5185, 0x24, + 0x5186, 0x09, + 0x5187, 0x09, + 0x5188, 0x09, + 0x5189, 0x75, + 0x518A, 0x54, + 0x518B, 0xE0, + 0x518C, 0xB2, + 0x518D, 0x42, + 0x518E, 0x3D, + 0x518F, 0x56, + 0x5190, 0x46, + 0x5191, 0xF8, + 0x5192, 0x04, + 0x5193, 0x70, + 0x5194, 0xF0, + 0x5195, 0xF0, + 0x5196, 0x03, + 0x5197, 0x01, + 0x5198, 0x04, + 0x5199, 0x12, + 0x519A, 0x04, + 0x519B, 0x00, + 0x519C, 0x06, + 0x519D, 0x82, + 0x519E, 0x38, + ] + # fmt: on + + def __init__( + self, + i2c, + i2c_address = 0x3C, + num_data_pins = 1 + ): + super().__init__(i2c, i2c_address, (240, 320, 2)) + + # self.soft_reset() + # sleep_us(1_000_000) + self.write_list(self._sensor_default_regs) + + self._colorspace = self.OV5640_COLOR_RGB + self._flip_x = False + self._flip_y = False + self._w = None + self._h = None + self._size = self.OV5640_SIZE_QVGA + self._test_pattern = False + self._binning = False + self._scale = False + self._ev = 0 + self._white_balance = 0 + + self._set_size_and_colorspace() + + def is_connected(self): + try: + # Try to read the chip ID + # If it throws an I/O error - the device isn't connected + id = self.getChipID() + + # Confirm the chip ID is correct + if id == 0x5640: + return True + else: + return False + except: + return False + + def getChipID(self): + """ + Reads the chip ID from the HM01B0 sensor. + Returns: + int: The chip ID as a 16-bit integer. + """ + data = self.readRegister(self._CHIP_ID_HIGH, 2) + return (data[0] << 8) | data[1] + + def soft_reset(self): + """ + Performs a software reset of the HM01B0 sensor. + This resets the sensor to its default state. + """ + self.writeRegister(self._SYSTEM_CTROL0, 0x82) + + # def setMode(self, mode): + # """ + # Sets the operating mode of the HM01B0 sensor. + # Args: + # mode (int): The mode to set, e.g., MODE_STREAMING. + # """ + # self.writeRegister(self.MODE_SELECT, mode) + + # def trigger(self): + # self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) + + # def set_n_frames(self, n_frames): + # self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) + + def write_list(self, data): + """ + Initializes the HM01B0 sensor with default settings. + This includes setting up exposure, gain, and frame timing. + """ + for i in range(len(data) // 2): + reg = data[i * 2] + value = data[i * 2 + 1] + print(i, reg, value) + if reg == self._REG_DLY: + sleep_us(value) + else: + self.writeRegister(reg, value) + sleep_us(1000) + + def _set_size_and_colorspace(self) -> None: + size = self._size + width, height, ratio = self._resolution_info[size] + self._w = width + self._h = height + ( + max_width, + max_height, + start_x, + start_y, + end_x, + end_y, + offset_x, + offset_y, + total_x, + total_y, + ) = self._ratio_table[ratio] + + self._binning = (width <= max_width // 2) and (height <= max_height // 2) + self._scale = not ( + (width == max_width and height == max_height) + or (width == max_width // 2 and height == max_height // 2) + ) + + self._write_addr_reg(self._X_ADDR_ST_H, start_x, start_y) + self._write_addr_reg(self._X_ADDR_END_H, end_x, end_y) + self._write_addr_reg(self._X_OUTPUT_SIZE_H, width, height) + + if not self._binning: + self._write_addr_reg(self._X_TOTAL_SIZE_H, total_x, total_y) + self._write_addr_reg(self._X_OFFSET_H, offset_x, offset_y) + else: + if width > 920: + self._write_addr_reg(self._X_TOTAL_SIZE_H, total_x - 200, total_y // 2) + else: + self._write_addr_reg(self._X_TOTAL_SIZE_H, 2060, total_y // 2) + self._write_addr_reg(self._X_OFFSET_H, offset_x // 2, offset_y // 2) + + self._write_reg_bits(self._ISP_CONTROL_01, 0x20, self._scale) + + self._set_image_options() + + # if self._colorspace == self.OV5640_COLOR_JPEG: + # sys_mul = 200 + # if size < self.OV5640_SIZE_QVGA: + # sys_mul = 160 + # if size < self.OV5640_SIZE_XGA: + # sys_mul = 180 + # self._set_pll(False, sys_mul, 4, 2, False, 2, True, 4) + # else: + # self._set_pll(False, 32, 1, 1, False, 1, True, 4) + self._set_pll(False, 32, 1, 1, False, 1, True, 4) + + self._set_colorspace() + + def _set_pll( + self, + bypass: bool, + multiplier: int, + sys_div: int, + pre_div: int, + root_2x: bool, + pclk_root_div: int, + pclk_manual: bool, + pclk_div: int, + ) -> None: + if ( + multiplier > 252 + or multiplier < 4 + or sys_div > 15 + or pre_div > 8 + or pclk_div > 31 + or pclk_root_div > 3 + ): + raise ValueError("Invalid argument to internal function") + + self.writeRegister(0x3039, 0x80 if bypass else 0) + self.writeRegister(0x3034, 0x1A) + self.writeRegister(0x3035, 1 | ((sys_div & 0xF) << 4)) + self.writeRegister(0x3036, multiplier & 0xFF) + self.writeRegister(0x3037, (pre_div & 0xF) | (0x10 if root_2x else 0)) + self.writeRegister(0x3108, (pclk_root_div & 3) << 4 | 0x06) + self.writeRegister(0x3824, pclk_div & 0x1F) + self.writeRegister(0x460C, 0x22 if pclk_manual else 0x22) + self.writeRegister(0x3103, 0x13) + + def _set_colorspace(self) -> None: + colorspace = self._colorspace + settings = self._ov5640_color_settings[colorspace] + + self.write_list(settings) + + def _set_image_options(self) -> None: + reg20 = reg21 = reg4514 = reg4514_test = 0 + if self._colorspace == self.OV5640_COLOR_JPEG: + reg21 |= 0x20 + + if self._binning: + reg20 |= 1 + reg21 |= 1 + reg4514_test |= 4 + else: + reg20 |= 0x40 + + if self._flip_y: + reg20 |= 0x06 + reg4514_test |= 1 + + if self._flip_x: + reg21 |= 0x06 + reg4514_test |= 2 + + if reg4514_test == 0: + reg4514 = 0x88 + elif reg4514_test == 1: + reg4514 = 0x00 + elif reg4514_test == 2: + reg4514 = 0xBB + elif reg4514_test == 3: + reg4514 = 0x00 + elif reg4514_test == 4: + reg4514 = 0xAA + elif reg4514_test == 5: + reg4514 = 0xBB + elif reg4514_test == 6: + reg4514 = 0xBB + elif reg4514_test == 7: + reg4514 = 0xAA + + self.writeRegister(self._TIMING_TC_REG20, reg20) + self.writeRegister(self._TIMING_TC_REG21, reg21) + self.writeRegister(0x4514, reg4514) + + if self._binning: + self.writeRegister(0x4520, 0x0B) + self.writeRegister(self._X_INCREMENT, 0x31) + self.writeRegister(self._Y_INCREMENT, 0x31) + else: + self.writeRegister(0x4520, 0x10) + self.writeRegister(self._X_INCREMENT, 0x11) + self.writeRegister(self._Y_INCREMENT, 0x11) + + def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: + self.writeRegister(reg, [ + (x_value >> 8) & 0xFF, + x_value & 0xFF, + (y_value >> 8) & 0xFF, + y_value & 0xFF, + ]) + + def _write_reg_bits(self, reg: int, mask: int, enable: bool) -> None: + val = self.readRegister(reg)[0] + if enable: + val |= mask + else: + val &= ~mask + self.writeRegister(reg, val) + + def read(self, image = None): + """ + Reads a frame from the camera. + Returns: + tuple: (success, frame) + """ + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py new file mode 100644 index 0000000..97af01b --- /dev/null +++ b/cv2_drivers/cameras/ov5640_pio.py @@ -0,0 +1,25 @@ +from .ov5640 import OV5640 +from .dvp_rp2_pio import DVP_RP2_PIO + +class OV5640_PIO(OV5640, DVP_RP2_PIO): + def __init__( + self, + i2c, + pin_d0, + pin_vsync, + pin_hsync, + pin_pclk, + pin_xclk = None, + sm_id = 0, + num_data_pins = 8, + i2c_address = 0x3c, + ): + # Call both parent constructors + DVP_RP2_PIO.__init__(self, pin_d0, pin_vsync, pin_hsync, pin_pclk, pin_xclk, sm_id, num_data_pins) + OV5640.__init__(self, i2c, i2c_address, num_data_pins) + + def open(self): + self.active(True) + + def release(self): + self.active(False) From bdb9fee56f37a2684a6919f1130687ef3ad6389e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 10:54:09 -0600 Subject: [PATCH 087/158] Remove base classes from display driver __init__.py --- cv2_drivers/displays/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cv2_drivers/displays/__init__.py b/cv2_drivers/displays/__init__.py index 8710847..8028cdc 100644 --- a/cv2_drivers/displays/__init__.py +++ b/cv2_drivers/displays/__init__.py @@ -1,4 +1,2 @@ -from . import cv2_display -from . import st7789 from . import st7789_spi from . import st7789_pio \ No newline at end of file From 184df9793d5d26c8031cb19c3d813256c0ab4a14 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 11:02:27 -0600 Subject: [PATCH 088/158] Update boot.py with camera driver refactor --- examples/boot.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/examples/boot.py b/examples/boot.py index c570f74..6abf1cc 100644 --- a/examples/boot.py +++ b/examples/boot.py @@ -98,15 +98,30 @@ # a popular camera module for embedded systems. This example uses a PIO # driver, which is a peripheral interface only available on Raspberry Pi RP2 # processors - from cv2_drivers.cameras import hm01b0_pio + import cv2_drivers.cameras as cameras # Create a camera object. This will depend on the camera driver you are # using, and you may need to adjust the parameters based on your specific # camera and board configuration - camera = hm01b0_pio.HM01B0_PIO(i2c, - pin_d0=12, - pin_vsync=13, - pin_hsync=14, - pin_pclk=15) + camera = cameras.hm01b0_pio.HM01B0_PIO( + i2c, + pin_d0=12, + pin_vsync=13, + pin_hsync=14, + pin_pclk=15, + pin_xclk=None, # Optional xclock pin, specify if needed + num_data_pins=1 # Number of data pins used by the camera (1, 4, or 8) + ) + + # camera = cameras.ov5640_pio.OV5640_PIO( + # i2c, + # pin_d0=8, + # pin_vsync=22, + # pin_hsync=21, + # pin_pclk=20, + # pin_xclk=None # Optional xclock pin, specify if needed + # ) except ImportError: print("boot.py - Camera driver module not found, skipping camera initialization.") +except OSError: + print("boot.py - Camera initialization failed, skipping camera initialization.") From ade4731de48e54f038b3f68b246247e480aa03bf Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 26 Jun 2025 11:19:14 -0600 Subject: [PATCH 089/158] Fix RP2 PIO DVP interface Had some hacks for OV5640 testing --- cv2_drivers/cameras/dvp_rp2_pio.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index 517673c..0581234 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -27,8 +27,8 @@ def __init__( if self.pin_xclk is not None: self.xclk = PWM(Pin(pin_xclk)) - # self.xclk.freq(25_000_000) - self.xclk.freq(15_000_000) + self.xclk.freq(25_000_000) + # self.xclk.freq(15_000_000) # Test for OV5640 self.xclk.duty_u16(32768) self.start_pio_dma(num_data_pins) @@ -54,17 +54,16 @@ def start_pio_dma(self, num_data_pins): self.dma = rp2.DMA() req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 dma_ctrl = self.dma.pack_ctrl( - # size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit inc_read = False, treq_sel = req_num, - bswap = False - # irq_quiet = False + bswap = True + # bswap = False # Test for OV5640 ) self.dma.config( read = self.sm, - # count = 244 * 324 // 4, - count = 240 * 320 * 2 // 4, + count = 244 * 324 // 4, + # count = 240 * 320 * 2 // 4, # Test for OV5640 ctrl = dma_ctrl ) @@ -85,7 +84,6 @@ def active(self, active = None): ) def _vsync_handler(self): - # print("VSYNC") # Disable DMA before reconfiguring it self.dma.active(False) From 81f7cb1eef102d74c3df525ae080cd28f90bed52 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 2 Jul 2025 15:38:05 -0600 Subject: [PATCH 090/158] Clean up camera drivers a bit Now switching between HM01B0 and OV5640 is as simple as changing only boot.py! RP2350 has issues capturing images correctly from OV5640 (see #22). Example 3 is funky even with 5MHz XCLK, since other things use the DMA at the same time --- cv2_drivers/cameras/cv2_camera.py | 8 ++--- cv2_drivers/cameras/dvp_camera.py | 5 ++- cv2_drivers/cameras/dvp_rp2_pio.py | 39 +++++++++++++-------- cv2_drivers/cameras/hm01b0.py | 11 +----- cv2_drivers/cameras/hm01b0_pio.py | 26 ++++++++++++-- cv2_drivers/cameras/ov5640.py | 54 ++++++++++++++++-------------- cv2_drivers/cameras/ov5640_pio.py | 28 +++++++++++++--- 7 files changed, 108 insertions(+), 63 deletions(-) diff --git a/cv2_drivers/cameras/cv2_camera.py b/cv2_drivers/cameras/cv2_camera.py index 77a7720..990cef5 100644 --- a/cv2_drivers/cameras/cv2_camera.py +++ b/cv2_drivers/cameras/cv2_camera.py @@ -1,5 +1,5 @@ -from ulab import numpy as np - class CV2_Camera(): - def __init__(self, buffer_size): - self.buffer = np.zeros(buffer_size, dtype=np.uint8) + def __init__(self): + pass + + # TODO: Implement common methods for all cameras diff --git a/cv2_drivers/cameras/dvp_camera.py b/cv2_drivers/cameras/dvp_camera.py index 9105c70..a04a734 100644 --- a/cv2_drivers/cameras/dvp_camera.py +++ b/cv2_drivers/cameras/dvp_camera.py @@ -6,10 +6,9 @@ class DVP_Camera(CV2_Camera): def __init__( self, i2c, - i2c_address, - buffer_size + i2c_address ): - super().__init__(buffer_size) + super().__init__() self.i2c = i2c self.i2c_address = i2c_address diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index 0581234..7f6e87d 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -9,8 +9,11 @@ def __init__( pin_hsync, pin_pclk, pin_xclk, + xclk_freq, sm_id, - num_data_pins + num_data_pins, + bytes_per_frame, + byte_swap ): self.pin_d0 = pin_d0 self.pin_vsync = pin_vsync @@ -19,22 +22,22 @@ def __init__( self.pin_xclk = pin_xclk self.sm_id = sm_id + # Initialize DVP pins as inputs for i in range(num_data_pins): Pin(pin_d0+i, Pin.IN) Pin(pin_vsync, Pin.IN) Pin(pin_hsync, Pin.IN) Pin(pin_pclk, Pin.IN) + # Set up XCLK pin if provided if self.pin_xclk is not None: self.xclk = PWM(Pin(pin_xclk)) - self.xclk.freq(25_000_000) - # self.xclk.freq(15_000_000) # Test for OV5640 - self.xclk.duty_u16(32768) + self.xclk.freq(xclk_freq) + self.xclk.duty_u16(32768) # 50% duty cycle - self.start_pio_dma(num_data_pins) - - def start_pio_dma(self, num_data_pins): + # Copy the PIO program program = self._pio_read_dvp + # Mask in the GPIO pins program[0][0] |= self.pin_hsync & 0x1F program[0][1] |= self.pin_pclk & 0x1F @@ -44,40 +47,45 @@ def start_pio_dma(self, num_data_pins): program[0][2] &= 0xFFFFFFE0 program[0][2] |= num_data_pins + # Create PIO state machine to capture DVP data self.sm = rp2.StateMachine( self.sm_id, program, - in_base = self.pin_d0 + in_base = pin_d0 ) - self.sm.active(1) + # Create DMA controller to transfer data from PIO to buffer self.dma = rp2.DMA() req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 + bytes_per_transfer = 4 dma_ctrl = self.dma.pack_ctrl( - size = 2, # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit + # 0 = 1 byte, 1 = 2 bytes, 2 = 4 bytes + size = {1:0, 2:1, 4:2}[bytes_per_transfer], inc_read = False, treq_sel = req_num, - bswap = True - # bswap = False # Test for OV5640 + bswap = byte_swap ) self.dma.config( read = self.sm, - count = 244 * 324 // 4, - # count = 240 * 320 * 2 // 4, # Test for OV5640 + count = bytes_per_frame // bytes_per_transfer, ctrl = dma_ctrl ) def active(self, active = None): + # If no argument is provided, return the current active state if active == None: return self.sm.active() + # Set the active state of the state machine self.sm.active(active) + # If active, set up the VSYNC interrupt handler if active: Pin(self.pin_vsync).irq( trigger = Pin.IRQ_FALLING, handler = lambda pin: self._vsync_handler() ) + # If not active, disable the VSYNC interrupt handler else: Pin(self.pin_vsync).irq( handler = None @@ -100,6 +108,9 @@ def _vsync_handler(self): # Start the DMA self.dma.active(True) + # Here is the PIO program, which is configurable to mask in the GPIO pins + # and the number of data pins. It must be configured before the state + # machine is created @rp2.asm_pio( in_shiftdir = rp2.PIO.SHIFT_LEFT, push_thresh = 32, diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py index 262d770..d07581e 100644 --- a/cv2_drivers/cameras/hm01b0.py +++ b/cv2_drivers/cameras/hm01b0.py @@ -184,16 +184,7 @@ def __init__( i2c_address = 0x24, num_data_pins = 1 ): - super().__init__(i2c, i2c_address, (244, 324)) - - # for i in range(len(self.INIT_COMMANDS)): - # if self.INIT_COMMANDS[i][0] == 0x3059: - # if num_data_pins == 1: - # self.INIT_COMMANDS[i][1] = 0x22 - # elif num_data_pins == 4: - # self.INIT_COMMANDS[i][1] = 0x42 - # else: - # self.INIT_COMMANDS[i][1] = 0x02 + super().__init__(i2c, i2c_address) self.soft_reset() self.send_init(num_data_pins) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 2a30301..3e3c69c 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -1,5 +1,6 @@ from .hm01b0 import HM01B0 from .dvp_rp2_pio import DVP_RP2_PIO +from ulab import numpy as np class HM01B0_PIO(HM01B0, DVP_RP2_PIO): def __init__( @@ -10,13 +11,34 @@ def __init__( pin_hsync, pin_pclk, pin_xclk = None, + xclk_freq = 25_000_000, sm_id = 0, num_data_pins = 1, i2c_address = 0x24, ): + # Create the frame buffer + self.buffer = np.zeros((244, 324), dtype=np.uint8) + # Call both parent constructors - HM01B0.__init__(self, i2c, i2c_address, num_data_pins) - DVP_RP2_PIO.__init__(self, pin_d0, pin_vsync, pin_hsync, pin_pclk, pin_xclk, sm_id, num_data_pins) + DVP_RP2_PIO.__init__( + self, + pin_d0, + pin_vsync, + pin_hsync, + pin_pclk, + pin_xclk, + xclk_freq, + sm_id, + num_data_pins, + bytes_per_frame = self.buffer.size, + byte_swap = True + ) + HM01B0.__init__( + self, + i2c, + i2c_address, + num_data_pins + ) def open(self): self.active(True) diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py index 5d5d828..140ddda 100644 --- a/cv2_drivers/cameras/ov5640.py +++ b/cv2_drivers/cameras/ov5640.py @@ -355,15 +355,15 @@ class OV5640(DVP_Camera): _ratio_table = [ # mw, mh, sx, sy, ex, ey, ox, oy, tx, ty - [2560, 1920, 0, 0, 2623, 1951, 32, 16, 2844, 1968], # 4x3 - [2560, 1704, 0, 110, 2623, 1843, 32, 16, 2844, 1752], # 3x2 - [2560, 1600, 0, 160, 2623, 1791, 32, 16, 2844, 1648], # 16x10 - [2560, 1536, 0, 192, 2623, 1759, 32, 16, 2844, 1584], # 5x3 - [2560, 1440, 0, 240, 2623, 1711, 32, 16, 2844, 1488], # 16x9 - [2560, 1080, 0, 420, 2623, 1531, 32, 16, 2844, 1128], # 21x9 - [2400, 1920, 80, 0, 2543, 1951, 32, 16, 2684, 1968], # 5x4 - [1920, 1920, 320, 0, 2543, 1951, 32, 16, 2684, 1968], # 1x1 - [1088, 1920, 736, 0, 1887, 1951, 32, 16, 1884, 1968], # 9x16 + [2560, 1920, 0, 0, 2623, 1951, 32, 16, 2844, 1968], # 4x3 + [2560, 1704, 0, 110, 2623, 1843, 32, 16, 2844, 1752], # 3x2 + [2560, 1600, 0, 160, 2623, 1791, 32, 16, 2844, 1648], # 16x10 + [2560, 1536, 0, 192, 2623, 1759, 32, 16, 2844, 1584], # 5x3 + [2560, 1440, 0, 240, 2623, 1711, 32, 16, 2844, 1488], # 16x9 + [2560, 1080, 0, 420, 2623, 1531, 32, 16, 2844, 1128], # 21x9 + [2400, 1920, 80, 0, 2543, 1951, 32, 16, 2684, 1968], # 5x4 + [1920, 1920, 320, 0, 2543, 1951, 32, 16, 2684, 1968], # 1x1 + [1088, 1920, 736, 0, 1887, 1951, 32, 16, 1884, 1968], # 9x16 ] _pll_pre_div2x_factors = [1, 1, 2, 3, 4, 1.5, 6, 2.5, 8] @@ -875,13 +875,10 @@ class OV5640(DVP_Camera): def __init__( self, i2c, - i2c_address = 0x3C, - num_data_pins = 1 + i2c_address = 0x3C ): - super().__init__(i2c, i2c_address, (240, 320, 2)) + super().__init__(i2c, i2c_address) - # self.soft_reset() - # sleep_us(1_000_000) self.write_list(self._sensor_default_regs) self._colorspace = self.OV5640_COLOR_RGB @@ -950,7 +947,6 @@ def write_list(self, data): for i in range(len(data) // 2): reg = data[i * 2] value = data[i * 2 + 1] - print(i, reg, value) if reg == self._REG_DLY: sleep_us(value) else: @@ -999,16 +995,15 @@ def _set_size_and_colorspace(self) -> None: self._set_image_options() - # if self._colorspace == self.OV5640_COLOR_JPEG: - # sys_mul = 200 - # if size < self.OV5640_SIZE_QVGA: - # sys_mul = 160 - # if size < self.OV5640_SIZE_XGA: - # sys_mul = 180 - # self._set_pll(False, sys_mul, 4, 2, False, 2, True, 4) - # else: - # self._set_pll(False, 32, 1, 1, False, 1, True, 4) - self._set_pll(False, 32, 1, 1, False, 1, True, 4) + if self._colorspace == self.OV5640_COLOR_JPEG: + sys_mul = 200 + if size < self.OV5640_SIZE_QVGA: + sys_mul = 160 + if size < self.OV5640_SIZE_XGA: + sys_mul = 180 + self._set_pll(False, sys_mul, 4, 2, False, 2, True, 4) + else: + self._set_pll(False, 32, 1, 1, False, 1, True, 4) self._set_colorspace() @@ -1121,4 +1116,11 @@ def read(self, image = None): Returns: tuple: (success, frame) """ - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) + if self._colorspace == self.OV5640_COLOR_RGB: + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BGR5652BGR, image)) + elif self._colorspace == self.OV5640_COLOR_GRAYSCALE: + return (True, cv2.cvtColor(self.buffer, cv2.COLOR_GRAY2BGR, image)) + else: + NotImplementedError( + f"OV5640:Reading images in colorspace {self._colorspace} is not yet implemented." + ) diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py index 97af01b..b6021bd 100644 --- a/cv2_drivers/cameras/ov5640_pio.py +++ b/cv2_drivers/cameras/ov5640_pio.py @@ -1,5 +1,6 @@ from .ov5640 import OV5640 from .dvp_rp2_pio import DVP_RP2_PIO +from ulab import numpy as np class OV5640_PIO(OV5640, DVP_RP2_PIO): def __init__( @@ -10,13 +11,32 @@ def __init__( pin_hsync, pin_pclk, pin_xclk = None, + xclk_freq = 5_000_000, sm_id = 0, - num_data_pins = 8, - i2c_address = 0x3c, + i2c_address = 0x3c ): + # Create the frame buffer + self.buffer = np.zeros((240, 320, 2), dtype=np.uint8) + # Call both parent constructors - DVP_RP2_PIO.__init__(self, pin_d0, pin_vsync, pin_hsync, pin_pclk, pin_xclk, sm_id, num_data_pins) - OV5640.__init__(self, i2c, i2c_address, num_data_pins) + DVP_RP2_PIO.__init__( + self, + pin_d0, + pin_vsync, + pin_hsync, + pin_pclk, + pin_xclk, + xclk_freq, + sm_id, + num_data_pins = 8, + bytes_per_frame = self.buffer.size, + byte_swap = False + ) + OV5640.__init__( + self, + i2c, + i2c_address + ) def open(self): self.active(True) From be7d810d134dfbbcf5c09e03799fe8248fbe51f4 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 2 Jul 2025 15:40:23 -0600 Subject: [PATCH 091/158] Add comment about copying buffers being slow See https://github.com/v923z/micropython-ulab/issues/726 --- cv2_drivers/displays/cv2_display.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index 3467002..f95213b 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -82,6 +82,8 @@ def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): if ch == 1: # Grayscale buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) elif ch == 2: # Already in BGR565 format + # For some reason, this is relatively slow and creates a new buffer: + # https://github.com/v923z/micropython-ulab/issues/726 buffer_roi[:] = image_roi elif ch == 3: # BGR buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) From 83376cf7944e55fc7475155ef224e4a12748d88e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 3 Jul 2025 16:12:08 -0600 Subject: [PATCH 092/158] Add findContours(), drawContours(), and moments() Have to force floats because of https://github.com/v923z/micropython-ulab/issues/719 --- src/convert.cpp | 46 +++++++++++++++ src/convert.h | 3 + src/imgproc.cpp | 150 ++++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 24 ++++++++ 4 files changed, 223 insertions(+) diff --git a/src/convert.cpp b/src/convert.cpp index 89ecb75..d777e2f 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -310,3 +310,49 @@ Scalar mp_obj_to_scalar(mp_obj_t obj) return scalar; } + +std::vector> mp_obj_to_contours(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) + { + // Create an empty contours object + return std::vector>(); + } + + // Create a vector to hold the contours + std::vector> contours; + + // Ideally, we could just use ndarray_from_mp_obj(), but it has a bug with + // 4D arrays, so we need to do this a bit manually. + // https://github.com/v923z/micropython-ulab/issues/727 + + // Assume the object is iterable. Will raise an exception if not + mp_obj_iter_buf_t iter_buf; + mp_obj_t iterable = mp_getiter(obj, &iter_buf); + mp_obj_t item; + + // Iterate through all items in the iterable + while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) + { + // Create a vector to hold the points of this contour + std::vector contour; + + // Convert the item to a Mat object (should be a 3D ndarray of points) + Mat contour_mat = mp_obj_to_mat(item); + + // Iterate through the rows of the Mat object and extract the points + for (int j = 0; j < contour_mat.rows; j++) + { + contour.push_back(Point( + contour_mat.at(j, 0), + contour_mat.at(j, 1) + )); + } + + // Add the contour to the list of contours + contours.push_back(contour); + } + + return contours; +} diff --git a/src/convert.h b/src/convert.h index 4a81c58..bdcca3a 100644 --- a/src/convert.h +++ b/src/convert.h @@ -30,3 +30,6 @@ Point mp_obj_to_point(mp_obj_t obj); // Conversion functions between Scalar and mp_obj_t Scalar mp_obj_to_scalar(mp_obj_t obj); + +// Conversion functions between contours (vector of vector of Point) and mp_obj_t +std::vector> mp_obj_to_contours(mp_obj_t obj); diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 6ec5772..6a796fa 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -423,6 +423,47 @@ mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_drawContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_contours, ARG_contourIdx, ARG_color, ARG_thickness, ARG_lineType, ARG_hierarchy, ARG_maxLevel, ARG_offset }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_contours, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_contourIdx, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = -1 } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_hierarchy, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_maxLevel, MP_ARG_INT, { .u_int = INT_MAX } }, + { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + std::vector> contours = mp_obj_to_contours(args[ARG_contours].u_obj); + int contourIdx = args[ARG_contourIdx].u_int; + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + Mat hierarchy = args[ARG_hierarchy].u_obj != mp_const_none ? mp_obj_to_mat(args[ARG_hierarchy].u_obj) : Mat(); + int maxLevel = args[ARG_maxLevel].u_int; + Point offset = args[ARG_offset].u_obj != mp_const_none ? mp_obj_to_point(args[ARG_offset].u_obj) : Point(); + + // Call the corresponding OpenCV function + try { + drawContours(image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(image); +} + mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_position, ARG_color, ARG_markerType, ARG_markerSize, ARG_thickness, ARG_line_type }; @@ -671,6 +712,60 @@ mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_mode, ARG_method, ARG_contours, ARG_hierarchy, ARG_offset }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_mode, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_contours, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_hierarchy, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_offset, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + int mode = args[ARG_mode].u_int; + int method = args[ARG_method].u_int; + std::vector> contours; // TODO: Allow user input + std::vector hierarchy; // TODO: Allow user input + Point offset = args[ARG_offset].u_obj == mp_const_none ? Point() : mp_obj_to_point(args[ARG_offset].u_obj); + + // Call the corresponding OpenCV function + try { + findContours(image, contours, hierarchy, mode, method, offset); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Convert contours to a tuple of ndarray objects + mp_obj_t contours_obj = mp_obj_new_tuple(contours.size(), NULL); + mp_obj_tuple_t *contours_tuple = (mp_obj_tuple_t*) MP_OBJ_TO_PTR(contours_obj); + + for(size_t i = 0; i < contours.size(); i++) { + Mat mat_contour(contours[i]); + Mat mat_f32; + mat_contour.convertTo(mat_f32, CV_32F); + contours_tuple->items[i] = mat_to_mp_obj(mat_f32); + } + + // Convert hierarchy to a Mat object + Mat mat_hierarchy(hierarchy); + + // Return the result + mp_obj_t result_tuple[2]; + result_tuple[0] = contours_tuple; + Mat mat_16s; + mat_hierarchy.convertTo(mat_16s, CV_16S); + result_tuple[1] = mat_to_mp_obj(mat_16s); + return mp_obj_new_tuple(2, result_tuple); +} + mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_ksize, ARG_sigmaX, ARG_dst, ARG_sigmaY, ARG_borderType, ARG_hint }; @@ -1214,6 +1309,61 @@ mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_ma return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_binary }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_binary, MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + bool binary = args[ARG_binary].u_bool; + Moments moments; + + // Call the corresponding OpenCV function + try { + moments = cv::moments(src, binary); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Create a dictionary to hold the moments + mp_obj_t moments_dict = mp_obj_new_dict(0); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m00), mp_obj_new_float(moments.m00)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m10), mp_obj_new_float(moments.m10)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m01), mp_obj_new_float(moments.m01)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m20), mp_obj_new_float(moments.m20)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m11), mp_obj_new_float(moments.m11)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m02), mp_obj_new_float(moments.m02)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m30), mp_obj_new_float(moments.m30)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m21), mp_obj_new_float(moments.m21)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m12), mp_obj_new_float(moments.m12)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_m03), mp_obj_new_float(moments.m03)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu20), mp_obj_new_float(moments.mu20)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu11), mp_obj_new_float(moments.mu11)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu02), mp_obj_new_float(moments.mu02)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu30), mp_obj_new_float(moments.mu30)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu21), mp_obj_new_float(moments.mu21)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu12), mp_obj_new_float(moments.mu12)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_mu03), mp_obj_new_float(moments.mu03)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu20), mp_obj_new_float(moments.nu20)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu11), mp_obj_new_float(moments.nu11)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu02), mp_obj_new_float(moments.nu02)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu30), mp_obj_new_float(moments.nu30)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu21), mp_obj_new_float(moments.nu21)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu12), mp_obj_new_float(moments.nu12)); + mp_obj_dict_store(moments_dict, MP_OBJ_NEW_QSTR(MP_QSTR_nu03), mp_obj_new_float(moments.nu03)); + + // Return the moments dictionary + return moments_dict; +} + mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin }; diff --git a/src/imgproc.h b/src/imgproc.h index 4a28175..590387e 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -13,12 +13,14 @@ extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *p extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_drawContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_drawMarker(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_ellipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_erode(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -31,6 +33,7 @@ extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map extern mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -50,12 +53,14 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_im static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawContours_obj, 3, cv2_imgproc_drawContours); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawMarker_obj, 3, cv2_imgproc_drawMarker); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_ellipse_obj, 7, cv2_imgproc_ellipse); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_erode_obj, 2, cv2_imgproc_erode); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc_fillConvexPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_findContours_obj, 3, cv2_imgproc_findContours); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); @@ -68,6 +73,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_moments_obj, 1, cv2_imgproc_moments); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); @@ -89,12 +95,14 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, \ { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_drawContours), MP_ROM_PTR(&cv2_imgproc_drawContours_obj) }, \ { MP_ROM_QSTR(MP_QSTR_drawMarker), MP_ROM_PTR(&cv2_imgproc_drawMarker_obj) }, \ { MP_ROM_QSTR(MP_QSTR_ellipse), MP_ROM_PTR(&cv2_imgproc_ellipse_obj) }, \ { MP_ROM_QSTR(MP_QSTR_erode), MP_ROM_PTR(&cv2_imgproc_erode_obj) }, \ { MP_ROM_QSTR(MP_QSTR_fillConvexPoly), MP_ROM_PTR(&cv2_imgproc_fillConvexPoly_obj) }, \ { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, \ { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_findContours), MP_ROM_PTR(&cv2_imgproc_findContours_obj) }, \ { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, \ { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, \ { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, \ @@ -107,6 +115,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, \ { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, \ { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_moments), MP_ROM_PTR(&cv2_imgproc_moments_obj) }, \ { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, \ { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, \ @@ -143,6 +152,21 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_MEAN_C), MP_ROM_INT(0) }, \ { MP_ROM_QSTR(MP_QSTR_ADAPTIVE_THRESH_GAUSSIAN_C), MP_ROM_INT(1) }, \ \ + /* Retrieval modes, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_RETR_EXTERNAL), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_RETR_LIST), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_RETR_CCOMP), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_RETR_TREE), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_RETR_FLOODFILL), MP_ROM_INT(4) }, \ + \ + /* Contour approximation methods, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_CHAIN_CODE), MP_ROM_INT(0) }, \ + { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_NONE), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_SIMPLE), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_TC89_L1), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_TC89_KCOS), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_LINK_RUNS), MP_ROM_INT(5) }, \ + \ /* Hough modes, from opencv2/imgproc.hpp */ \ { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, \ { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, \ From 527a8a50095d538035f971b56a9e11f392077878 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 8 Jul 2025 14:51:26 -0600 Subject: [PATCH 093/158] Add more contour functions --- src/convert.cpp | 110 ++++++++++ src/convert.h | 2 + src/core.cpp | 44 ++++ src/core.h | 3 + src/imgproc.cpp | 531 ++++++++++++++++++++++++++++++++++++++++++++++++ src/imgproc.h | 63 ++++++ 6 files changed, 753 insertions(+) diff --git a/src/convert.cpp b/src/convert.cpp index d777e2f..957cb44 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -201,6 +201,61 @@ Size mp_obj_to_size(mp_obj_t obj) return size; } +Size2f mp_obj_to_size2f(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) + { + // Create an empty Size2f object + return Size2f(); + } + + // Assume the object is a ndarray, or can be converted to one. Will raise an + // exception if not + ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0); + + // Validate the length of the ndarray + if(ndarray->len != 2) + { + mp_raise_TypeError(MP_ERROR_TEXT("Size2f must be length 2")); + } + + // Compute the size, checking the type of the ndarray + Size2f size; + switch(ndarray->dtype) + { + case NDARRAY_UINT8: + size.width = ((uint8_t*) ndarray->array)[0]; + size.height = ((uint8_t*) ndarray->array)[1]; + break; + case NDARRAY_INT8: + size.width = ((int8_t*) ndarray->array)[0]; + size.height = ((int8_t*) ndarray->array)[1]; + break; + case NDARRAY_UINT16: + size.width = ((uint16_t*) ndarray->array)[0]; + size.height = ((uint16_t*) ndarray->array)[1]; + break; + case NDARRAY_INT16: + size.width = ((int16_t*) ndarray->array)[0]; + size.height = ((int16_t*) ndarray->array)[1]; + break; + case NDARRAY_FLOAT: + size.width = ((float*) ndarray->array)[0]; + size.height = ((float*) ndarray->array)[1]; + break; + case NDARRAY_BOOL: + size.width = ((bool*) ndarray->array)[0]; + size.height = ((bool*) ndarray->array)[1]; + break; + default: + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + break; + } + + return size; +} + Point mp_obj_to_point(mp_obj_t obj) { // Check for None object @@ -256,6 +311,61 @@ Point mp_obj_to_point(mp_obj_t obj) return point; } +Point2f mp_obj_to_point2f(mp_obj_t obj) +{ + // Check for None object + if(obj == mp_const_none) + { + // Create an empty Point2f object + return Point2f(); + } + + // Assume the object is a ndarray, or can be converted to one. Will raise an + // exception if not + ndarray_obj_t *ndarray = ndarray_from_mp_obj(obj, 0); + + // Validate the length of the ndarray + if(ndarray->len != 2) + { + mp_raise_TypeError(MP_ERROR_TEXT("Point2f must be length 2")); + } + + // Compute the point, checking the type of the ndarray + Point2f point; + switch(ndarray->dtype) + { + case NDARRAY_UINT8: + point.x = ((uint8_t*) ndarray->array)[0]; + point.y = ((uint8_t*) ndarray->array)[1]; + break; + case NDARRAY_INT8: + point.x = ((int8_t*) ndarray->array)[0]; + point.y = ((int8_t*) ndarray->array)[1]; + break; + case NDARRAY_UINT16: + point.x = ((uint16_t*) ndarray->array)[0]; + point.y = ((uint16_t*) ndarray->array)[1]; + break; + case NDARRAY_INT16: + point.x = ((int16_t*) ndarray->array)[0]; + point.y = ((int16_t*) ndarray->array)[1]; + break; + case NDARRAY_FLOAT: + point.x = ((float*) ndarray->array)[0]; + point.y = ((float*) ndarray->array)[1]; + break; + case NDARRAY_BOOL: + point.x = ((bool*) ndarray->array)[0]; + point.y = ((bool*) ndarray->array)[1]; + break; + default: + mp_raise_TypeError(MP_ERROR_TEXT("Unsupported ndarray type")); + break; + } + + return point; +} + Scalar mp_obj_to_scalar(mp_obj_t obj) { // Check for None object diff --git a/src/convert.h b/src/convert.h index bdcca3a..0458b43 100644 --- a/src/convert.h +++ b/src/convert.h @@ -24,9 +24,11 @@ Mat mp_obj_to_mat(mp_obj_t obj); // Conversion functions between Size and mp_obj_t Size mp_obj_to_size(mp_obj_t obj); +Size2f mp_obj_to_size2f(mp_obj_t obj); // Conversion functions between Point and mp_obj_t Point mp_obj_to_point(mp_obj_t obj); +Point2f mp_obj_to_point2f(mp_obj_t obj); // Conversion functions between Scalar and mp_obj_t Scalar mp_obj_to_scalar(mp_obj_t obj); diff --git a/src/core.cpp b/src/core.cpp index 75f2a06..2160273 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -122,3 +122,47 @@ mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ // Return the result return mat_to_mp_obj(dst); } + +mp_obj_t cv2_core_minMaxLoc(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_mask }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_mask, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat mask = mp_obj_to_mat(args[ARG_mask].u_obj); + + double minVal, maxVal; + Point minLoc, maxLoc; + + // Call the corresponding OpenCV function + try { + minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + mp_obj_t min_loc_tuple[2] = { + mp_obj_new_float(minLoc.x), + mp_obj_new_float(minLoc.y) + }; + mp_obj_t max_loc_tuple[2] = { + mp_obj_new_float(maxLoc.x), + mp_obj_new_float(maxLoc.y) + }; + mp_obj_t result_tuple[4] = { + mp_obj_new_float(minVal), + mp_obj_new_float(maxVal), + mp_obj_new_tuple(2, min_loc_tuple), + mp_obj_new_tuple(2, max_loc_tuple) + }; + return mp_obj_new_tuple(4, result_tuple); +} diff --git a/src/core.h b/src/core.h index dc289c6..9aabf89 100644 --- a/src/core.h +++ b/src/core.h @@ -4,16 +4,19 @@ // Function declarations extern mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_core_minMaxLoc(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); // Python references to the functions static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_minMaxLoc_obj, 1, cv2_core_minMaxLoc); // Global definitions for functions and constants #define OPENCV_CORE_GLOBALS \ /* Functions */ \ { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, \ { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_minMaxLoc), MP_ROM_PTR(&cv2_core_minMaxLoc_obj) }, \ \ /* OpenCV data types, from opencv2/core/hal/interface.h */ \ /* Other types are currently not supported by ulab */ \ diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 6a796fa..d8e6894 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -49,6 +49,35 @@ mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_curve, ARG_closed }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_closed, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); + bool closed = args[ARG_closed].u_bool; + + mp_float_t retval; + + // Call the corresponding OpenCV function + try { + retval = arcLength(curve, closed); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mp_obj_new_float(retval); +} + mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength }; @@ -92,6 +121,70 @@ mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_curve, ARG_epsilon, ARG_closed, ARG_approxCurve }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_epsilon, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_closed, MP_ARG_BOOL, { .u_bool = false } }, + { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); + double epsilon = mp_obj_get_float(args[ARG_epsilon].u_obj); + bool closed = args[ARG_closed].u_bool; + Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); + + // Call the corresponding OpenCV function + try { + approxPolyDP(curve, approxCurve, epsilon, closed); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(approxCurve); +} + +mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_curve, ARG_nsides, ARG_approxCurve, ARG_epsilon_percentage, ARG_ensure_convex }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_nsides, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_epsilon_percentage, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_ensure_convex, MP_ARG_BOOL, { .u_bool = true } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); + int nsides = args[ARG_nsides].u_int; + Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); + mp_float_t epsilon_percentage = args[ARG_epsilon_percentage].u_obj == mp_const_none ? -1.0 : mp_obj_get_float(args[ARG_epsilon_percentage].u_obj); + bool ensure_convex = args[ARG_ensure_convex].u_bool; + + // Call the corresponding OpenCV function + try { + approxPolyN(curve, approxCurve, nsides, epsilon_percentage, ensure_convex); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(approxCurve); +} + mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_d, ARG_sigmaColor, ARG_sigmaSpace, ARG_dst, ARG_borderType }; @@ -160,6 +253,39 @@ mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_boundingRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_array }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_array, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat array = mp_obj_to_mat(args[ARG_array].u_obj); + + Rect retval; + + // Call the corresponding OpenCV function + try { + retval = boundingRect(array); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result as a tuple + mp_obj_t retval_tuple[4]; + retval_tuple[0] = mp_obj_new_int(retval.x); + retval_tuple[1] = mp_obj_new_int(retval.y); + retval_tuple[2] = mp_obj_new_int(retval.width); + retval_tuple[3] = mp_obj_new_int(retval.height); + mp_obj_t result = mp_obj_new_tuple(4, retval_tuple); + return result; +} + mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_ddepth, ARG_ksize, ARG_dst, ARG_anchor, ARG_normalize, ARG_borderType }; @@ -197,6 +323,37 @@ mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_box, ARG_points }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_box, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_points, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + mp_obj_tuple_t *box_tuple = (mp_obj_tuple_t*) args[ARG_box].u_obj; + RotatedRect box; + box.center = mp_obj_to_point2f(box_tuple->items[0]); + box.size = mp_obj_to_size2f(box_tuple->items[1]); + box.angle = mp_obj_get_float(box_tuple->items[2]); + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + + // Call the corresponding OpenCV function + try { + boxPoints(box, points); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(points); +} + mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_image, ARG_threshold1, ARG_threshold2, ARG_edges, ARG_apertureSize, ARG_L2gradient }; @@ -312,6 +469,115 @@ mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args // return mp_obj_new_tuple(4, result); // } +mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_contour, ARG_oriented }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_oriented, MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj); + bool oriented = args[ARG_oriented].u_bool; + + mp_float_t retval; + + // Call the corresponding OpenCV function + try { + retval = contourArea(contour, oriented); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mp_obj_new_float(retval); +} + +mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points, ARG_hull, ARG_clockwise, ARG_returnPoints }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_hull, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_clockwise, MP_ARG_BOOL, { .u_bool = false } }, + { MP_QSTR_returnPoints, MP_ARG_BOOL, { .u_bool = true } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + Mat hull; // TODO: Allow user input + bool clockwise = args[ARG_clockwise].u_bool; + bool returnPoints = args[ARG_returnPoints].u_bool; + + // Call the corresponding OpenCV function + try { + convexHull(points, hull, clockwise, returnPoints); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // If hull is 32S, convert it to float + if (hull.type() == CV_32S) { + Mat hullFloat; + hull.convertTo(hullFloat, CV_32F); + hull = hullFloat; + } + + // Return the result + return mat_to_mp_obj(hull); +} + +mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_contour, ARG_convexhull, ARG_convexityDefects }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_convexhull, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_convexityDefects, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj); + Mat convexhull = mp_obj_to_mat(args[ARG_convexhull].u_obj); + Mat convexityDefects32S; // TODO: Allow user input + + // contour must be of type CV_32S + Mat contour32S; + contour.convertTo(contour32S, CV_32S); + + // convexhull must be of type CV_32S + Mat convexhull32S; + convexhull.convertTo(convexhull32S, CV_32S); + + // Call the corresponding OpenCV function + try { + cv::convexityDefects(contour32S, convexhull32S, convexityDefects32S); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Convert the convexityDefects32S to float + Mat convexityDefects; + convexityDefects.allocator = &GetNumpyAllocator(); + convexityDefects32S.convertTo(convexityDefects, CV_32F); + + // Return the result + return mat_to_mp_obj(convexityDefects); +} + mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; @@ -766,6 +1032,78 @@ mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_ma return mp_obj_new_tuple(2, result_tuple); } +mp_obj_t cv2_imgproc_fitEllipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + + RotatedRect ellipse; + + // Call the corresponding OpenCV function + try { + ellipse = fitEllipse(points); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Convert the result to a tuple + mp_obj_t center[2]; + center[0] = mp_obj_new_float(ellipse.center.x); + center[1] = mp_obj_new_float(ellipse.center.y); + mp_obj_t size[2]; + size[0] = mp_obj_new_float(ellipse.size.width); + size[1] = mp_obj_new_float(ellipse.size.height); + mp_obj_t result[3]; + result[0] = mp_obj_new_tuple(2, center); + result[1] = mp_obj_new_tuple(2, size); + result[2] = mp_obj_new_float(ellipse.angle); + return mp_obj_new_tuple(3, result); +} + +mp_obj_t cv2_imgproc_fitLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points, ARG_distType, ARG_param, ARG_reps, ARG_aeps, ARG_line }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_distType, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_param, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_reps, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_aeps, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_line, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + int distType = args[ARG_distType].u_int; + mp_float_t param = mp_obj_get_float(args[ARG_param].u_obj); + mp_float_t reps = mp_obj_get_float(args[ARG_reps].u_obj); + mp_float_t aeps = mp_obj_get_float(args[ARG_aeps].u_obj); + Mat line = mp_obj_to_mat(args[ARG_line].u_obj); + + // Call the corresponding OpenCV function + try { + fitLine(points, line, distType, param, reps, aeps); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(line); +} + mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_ksize, ARG_sigmaX, ARG_dst, ARG_sigmaY, ARG_borderType, ARG_hint }; @@ -1126,6 +1464,32 @@ mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *po return mat_to_mp_obj(lines); } +mp_obj_t cv2_imgproc_isContourConvex(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_contour }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj); + + // Call the corresponding OpenCV function + bool isConvex; + try { + isConvex = isContourConvex(contour); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mp_obj_new_bool(isConvex); +} + mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_ddepth, ARG_dst, ARG_ksize, ARG_scale, ARG_delta, ARG_borderType }; @@ -1200,6 +1564,39 @@ mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ return mat_to_mp_obj(img); } +mp_obj_t cv2_imgproc_matchShapes(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_contour1, ARG_contour2, ARG_method, ARG_parameter }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_contour1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_contour2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_method, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_parameter, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat contour1 = mp_obj_to_mat(args[ARG_contour1].u_obj); + Mat contour2 = mp_obj_to_mat(args[ARG_contour2].u_obj); + int method = args[ARG_method].u_int; + mp_float_t parameter = mp_obj_get_float(args[ARG_parameter].u_obj); + + mp_float_t retval; + + // Call the corresponding OpenCV function + try { + retval = matchShapes(contour1, contour2, method, parameter); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mp_obj_new_float(retval); +} + mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_templ, ARG_method, ARG_result, ARG_mask }; @@ -1262,6 +1659,109 @@ mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_ return mat_to_mp_obj(dst); } +mp_obj_t cv2_imgproc_minAreaRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + + RotatedRect retval; + + // Call the corresponding OpenCV function + try { + retval = minAreaRect(points); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result as a tuple + mp_obj_t center_tuple[2]; + center_tuple[0] = mp_obj_new_float(retval.center.x); + center_tuple[1] = mp_obj_new_float(retval.center.y); + mp_obj_t size_tuple[2]; + size_tuple[0] = mp_obj_new_float(retval.size.width); + size_tuple[1] = mp_obj_new_float(retval.size.height); + mp_obj_t result_tuple[3]; + result_tuple[0] = mp_obj_new_tuple(2, center_tuple); + result_tuple[1] = mp_obj_new_tuple(2, size_tuple); + result_tuple[2] = mp_obj_new_float(retval.angle); + return mp_obj_new_tuple(3, result_tuple); +} + +mp_obj_t cv2_imgproc_minEnclosingCircle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + + Point2f center; + float radius; + + // Call the corresponding OpenCV function + try { + minEnclosingCircle(points, center, radius); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result as a tuple + mp_obj_t center_tuple[2]; + center_tuple[0] = mp_obj_new_float(center.x); + center_tuple[1] = mp_obj_new_float(center.y); + mp_obj_t result_tuple[3]; + result_tuple[0] = mp_obj_new_tuple(2, center_tuple); + result_tuple[1] = mp_obj_new_float(radius); + return mp_obj_new_tuple(2, result_tuple); +} + +mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_points, ARG_triangle }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_points, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_triangle, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat points = mp_obj_to_mat(args[ARG_points].u_obj); + Mat triangle = mp_obj_to_mat(args[ARG_triangle].u_obj); + + mp_float_t retval; + + // Call the corresponding OpenCV function + try { + retval = minEnclosingTriangle(points, triangle); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result as a tuple + mp_obj_t result_tuple[2]; + result_tuple[0] = mp_obj_new_float(retval); + result_tuple[1] = mat_to_mp_obj(triangle); + return mp_obj_new_tuple(2, result_tuple); +} + mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; @@ -1364,6 +1864,37 @@ mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t * return moments_dict; } +mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_contour, ARG_pt, ARG_measureDist }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_contour, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_measureDist, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat contour = mp_obj_to_mat(args[ARG_contour].u_obj); + Point pt = mp_obj_to_point(args[ARG_pt].u_obj); + bool measureDist = args[ARG_measureDist].u_bool; + + mp_float_t retval; + + // Call the corresponding OpenCV function + try { + retval = pointPolygonTest(contour, pt, measureDist); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mp_obj_new_float(retval); +} + mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_img, ARG_text, ARG_org, ARG_fontFace, ARG_fontScale, ARG_color, ARG_thickness, ARG_lineType, ARG_bottomLeftOrigin }; diff --git a/src/imgproc.h b/src/imgproc.h index 590387e..7f04e5c 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -3,13 +3,21 @@ // Function declarations extern mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_boundingRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); // extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -21,6 +29,8 @@ extern mp_obj_t cv2_imgproc_fillConvexPoly(size_t n_args, const mp_obj_t *pos_ar extern mp_obj_t cv2_imgproc_fillPoly(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_filter2D(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_findContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_fitEllipse(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_fitLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_GaussianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -28,12 +38,18 @@ extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_ extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); // extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_isContourConvex(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_line(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_matchShapes(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_matchTemplate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_minAreaRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_minEnclosingCircle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Scharr(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -43,13 +59,21 @@ extern mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, m // Python references to the functions static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arcLength_obj, 2, cv2_imgproc_arcLength); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyDP_obj, 3, cv2_imgproc_approxPolyDP); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyN_obj, 2, cv2_imgproc_approxPolyN); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boundingRect_obj, 1, cv2_imgproc_boundingRect); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxPoints_obj, 1, cv2_imgproc_boxPoints); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); // static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_contourArea_obj, 1, cv2_imgproc_contourArea); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexHull_obj, 1, cv2_imgproc_convexHull); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexityDefects_obj, 1, cv2_imgproc_convexityDefects); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); @@ -61,6 +85,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillConvexPoly_obj, 3, cv2_imgproc static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fillPoly_obj, 3, cv2_imgproc_fillPoly); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_filter2D_obj, 3, cv2_imgproc_filter2D); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_findContours_obj, 3, cv2_imgproc_findContours); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fitEllipse_obj, 1, cv2_imgproc_fitEllipse); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_fitLine_obj, 5, cv2_imgproc_fitLine); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_GaussianBlur_obj, 3, cv2_imgproc_GaussianBlur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_imgproc_getStructuringElement); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); @@ -68,12 +94,18 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4 static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); // static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_isContourConvex_obj, 1, cv2_imgproc_isContourConvex); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_line_obj, 4, cv2_imgproc_line); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchShapes_obj, 3, cv2_imgproc_matchShapes); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_matchTemplate_obj, 3, cv2_imgproc_matchTemplate); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_medianBlur); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minAreaRect_obj, 1, cv2_imgproc_minAreaRect); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingCircle_obj, 1, cv2_imgproc_minEnclosingCircle); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingTriangle_obj, 1, cv2_imgproc_minEnclosingTriangle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_moments_obj, 1, cv2_imgproc_moments); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_pointPolygonTest_obj, 3, cv2_imgproc_pointPolygonTest); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Scharr_obj, 4, cv2_imgproc_Scharr); @@ -85,13 +117,21 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre #define OPENCV_IMGPROC_GLOBALS \ /* Functions */ \ { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_arcLength), MP_ROM_PTR(&cv2_imgproc_arcLength_obj) }, \ { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_approxPolyDP), MP_ROM_PTR(&cv2_imgproc_approxPolyDP_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_approxPolyN), MP_ROM_PTR(&cv2_imgproc_approxPolyN_obj) }, \ { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, \ { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_boundingRect), MP_ROM_PTR(&cv2_imgproc_boundingRect_obj) }, \ { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_boxPoints), MP_ROM_PTR(&cv2_imgproc_boxPoints_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, \ { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, \ /* { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, */ \ + { MP_ROM_QSTR(MP_QSTR_contourArea), MP_ROM_PTR(&cv2_imgproc_contourArea_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_convexHull), MP_ROM_PTR(&cv2_imgproc_convexHull_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_convexityDefects), MP_ROM_PTR(&cv2_imgproc_convexityDefects_obj) }, \ { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, \ { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, \ @@ -103,6 +143,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_fillPoly), MP_ROM_PTR(&cv2_imgproc_fillPoly_obj) }, \ { MP_ROM_QSTR(MP_QSTR_filter2D), MP_ROM_PTR(&cv2_imgproc_filter2D_obj) }, \ { MP_ROM_QSTR(MP_QSTR_findContours), MP_ROM_PTR(&cv2_imgproc_findContours_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_fitEllipse), MP_ROM_PTR(&cv2_imgproc_fitEllipse_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_fitLine), MP_ROM_PTR(&cv2_imgproc_fitLine_obj) }, \ { MP_ROM_QSTR(MP_QSTR_GaussianBlur), MP_ROM_PTR(&cv2_imgproc_GaussianBlur_obj) }, \ { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, \ { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, \ @@ -110,12 +152,18 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, \ /* { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, */ \ { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_isContourConvex), MP_ROM_PTR(&cv2_imgproc_isContourConvex_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, \ { MP_ROM_QSTR(MP_QSTR_line), MP_ROM_PTR(&cv2_imgproc_line_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_matchShapes), MP_ROM_PTR(&cv2_imgproc_matchShapes_obj) }, \ { MP_ROM_QSTR(MP_QSTR_matchTemplate), MP_ROM_PTR(&cv2_imgproc_matchTemplate_obj) }, \ { MP_ROM_QSTR(MP_QSTR_medianBlur), MP_ROM_PTR(&cv2_imgproc_medianBlur_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_minAreaRect), MP_ROM_PTR(&cv2_imgproc_minAreaRect_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_minEnclosingCircle), MP_ROM_PTR(&cv2_imgproc_minEnclosingCircle_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_minEnclosingTriangle), MP_ROM_PTR(&cv2_imgproc_minEnclosingTriangle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \ { MP_ROM_QSTR(MP_QSTR_moments), MP_ROM_PTR(&cv2_imgproc_moments_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_pointPolygonTest), MP_ROM_PTR(&cv2_imgproc_pointPolygonTest_obj) }, \ { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, \ { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Scharr), MP_ROM_PTR(&cv2_imgproc_Scharr_obj) }, \ @@ -138,6 +186,16 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_MORPH_CROSS), MP_ROM_INT(1) }, \ { MP_ROM_QSTR(MP_QSTR_MORPH_ELLIPSE), MP_ROM_INT(2) }, \ \ + /* Distance types, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_DIST_USER), MP_ROM_INT(-1) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_L1), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_L2), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_C), MP_ROM_INT(3) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_L12), MP_ROM_INT(4) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_FAIR), MP_ROM_INT(5) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_WELSCH), MP_ROM_INT(6) }, \ + { MP_ROM_QSTR(MP_QSTR_DIST_HUBER), MP_ROM_INT(7) }, \ + \ /* Threshold types, from opencv2/imgproc.hpp */ \ { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY), MP_ROM_INT(0) }, \ { MP_ROM_QSTR(MP_QSTR_THRESH_BINARY_INV), MP_ROM_INT(1) }, \ @@ -167,6 +225,11 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_CHAIN_APPROX_TC89_KCOS), MP_ROM_INT(4) }, \ { MP_ROM_QSTR(MP_QSTR_LINK_RUNS), MP_ROM_INT(5) }, \ \ + /* Shape match modes, from opencv2/imgproc.hpp */ \ + { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I1), MP_ROM_INT(1) }, \ + { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I2), MP_ROM_INT(2) }, \ + { MP_ROM_QSTR(MP_QSTR_CONTOURS_MATCH_I3), MP_ROM_INT(3) }, \ + \ /* Hough modes, from opencv2/imgproc.hpp */ \ { MP_ROM_QSTR(MP_QSTR_HOUGH_STANDARD), MP_ROM_INT(0) }, \ { MP_ROM_QSTR(MP_QSTR_HOUGH_PROBABILISTIC), MP_ROM_INT(1) }, \ From 3acc441c9911728b3edba4156df4978a6d6c2490 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 8 Jul 2025 14:58:31 -0600 Subject: [PATCH 094/158] Implement HoughLinesP() Fixes #3 Just converts to float --- src/imgproc.cpp | 85 ++++++++++++++++++++++++++----------------------- src/imgproc.h | 8 ++--- 2 files changed, 49 insertions(+), 44 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index d8e6894..0b986bc 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -1348,50 +1348,55 @@ mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_ return mat_to_mp_obj(lines); } -// mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { -// // Define the arguments -// enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_minLineLength, ARG_maxLineGap }; -// static const mp_arg_t allowed_args[] = { -// { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } }, -// { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_minLineLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_maxLineGap, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// }; +mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_rho, ARG_theta, ARG_threshold, ARG_lines, ARG_minLineLength, ARG_maxLineGap }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_rho, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_theta, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_threshold, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 100 } }, + { MP_QSTR_lines, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_minLineLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_maxLineGap, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; -// // Parse the arguments -// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; -// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); -// // Convert arguments to required types -// Mat image = mp_obj_to_mat(args[ARG_image].u_obj); -// mp_float_t rho = mp_obj_get_float(args[ARG_rho].u_obj); -// mp_float_t theta = mp_obj_get_float(args[ARG_theta].u_obj); -// int threshold = args[ARG_threshold].u_int; -// Mat lines = mp_obj_to_mat(args[ARG_lines].u_obj); -// mp_float_t minLineLength; -// if(args[ARG_minLineLength].u_obj == mp_const_none) -// minLineLength = 0; // Default value -// else -// minLineLength = mp_obj_get_float(args[ARG_minLineLength].u_obj); -// mp_float_t maxLineGap; -// if(args[ARG_maxLineGap].u_obj == mp_const_none) -// maxLineGap = 0; // Default value -// else -// maxLineGap = mp_obj_get_float(args[ARG_maxLineGap].u_obj); + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + mp_float_t rho = mp_obj_get_float(args[ARG_rho].u_obj); + mp_float_t theta = mp_obj_get_float(args[ARG_theta].u_obj); + int threshold = args[ARG_threshold].u_int; + Mat lines32S; // TODO: Allow user input + mp_float_t minLineLength; + if(args[ARG_minLineLength].u_obj == mp_const_none) + minLineLength = 0; // Default value + else + minLineLength = mp_obj_get_float(args[ARG_minLineLength].u_obj); + mp_float_t maxLineGap; + if(args[ARG_maxLineGap].u_obj == mp_const_none) + maxLineGap = 0; // Default value + else + maxLineGap = mp_obj_get_float(args[ARG_maxLineGap].u_obj); -// // Call the corresponding OpenCV function -// try { -// HoughLinesP(image, lines, rho, theta, threshold, minLineLength, maxLineGap); -// } catch(Exception& e) { -// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); -// } + // Call the corresponding OpenCV function + try { + HoughLinesP(image, lines32S, rho, theta, threshold, minLineLength, maxLineGap); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } -// // Return the result -// return mat_to_mp_obj(lines); -// } + // Convert lines to float + Mat lines; + lines.allocator = &GetNumpyAllocator(); + lines32S.convertTo(lines, CV_32F); + + // Return the result + return mat_to_mp_obj(lines); +} mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments diff --git a/src/imgproc.h b/src/imgproc.h index 7f04e5c..323bcb5 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -36,7 +36,7 @@ extern mp_obj_t cv2_imgproc_getStructuringElement(size_t n_args, const mp_obj_t extern mp_obj_t cv2_imgproc_HoughCircles(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughCirclesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughLines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -// extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_HoughLinesP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_HoughLinesWithAccumulator(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_isContourConvex(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Laplacian(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -92,7 +92,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_getStructuringElement_obj, 2, cv2_ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCircles_obj, 4, cv2_imgproc_HoughCircles); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughCirclesWithAccumulator_obj, 4, cv2_imgproc_HoughCirclesWithAccumulator); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLines_obj, 4, cv2_imgproc_HoughLines); -// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesP_obj, 4, cv2_imgproc_HoughLinesP); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_HoughLinesWithAccumulator_obj, 4, cv2_imgproc_HoughLinesWithAccumulator); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_isContourConvex_obj, 1, cv2_imgproc_isContourConvex); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Laplacian_obj, 2, cv2_imgproc_Laplacian); @@ -149,8 +149,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_getStructuringElement), MP_ROM_PTR(&cv2_imgproc_getStructuringElement_obj) }, \ { MP_ROM_QSTR(MP_QSTR_HoughCircles), MP_ROM_PTR(&cv2_imgproc_HoughCircles_obj) }, \ { MP_ROM_QSTR(MP_QSTR_HoughCirclesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughCirclesWithAccumulator_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, \ - /* { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, */ \ + { MP_ROM_QSTR(MP_QSTR_HoughLines), MP_ROM_PTR(&cv2_imgproc_HoughLines_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_HoughLinesP), MP_ROM_PTR(&cv2_imgproc_HoughLinesP_obj) }, \ { MP_ROM_QSTR(MP_QSTR_HoughLinesWithAccumulator), MP_ROM_PTR(&cv2_imgproc_HoughLinesWithAccumulator_obj) }, \ { MP_ROM_QSTR(MP_QSTR_isContourConvex), MP_ROM_PTR(&cv2_imgproc_isContourConvex_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Laplacian), MP_ROM_PTR(&cv2_imgproc_Laplacian_obj) }, \ From 2486762b683954312016ed3a28919816b6bd6f17 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 8 Jul 2025 16:11:57 -0600 Subject: [PATCH 095/158] Add connectedComponentsWithStats() Fixes #6 Just convert output matrices to floats --- src/imgproc.cpp | 93 +++++++++++++++++++++++++++---------------------- src/imgproc.h | 6 ++-- 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 0b986bc..0a2dd0f 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -426,48 +426,57 @@ mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args return mp_obj_new_tuple(2, result); } -// mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { -// // Define the arguments -// enum { ARG_image, ARG_labels, ARG_stats, ARG_centroids, ARG_connectivity, ARG_ltype }; -// static const mp_arg_t allowed_args[] = { -// { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, -// { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_stats, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_centroids, MP_ARG_OBJ, { .u_obj = mp_const_none } }, -// { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } }, -// { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers -// }; - -// // Parse the arguments -// mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; -// mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - -// // Convert arguments to required types -// Mat image = mp_obj_to_mat(args[ARG_image].u_obj); -// Mat labels = mp_obj_to_mat(args[ARG_labels].u_obj); -// Mat stats = mp_obj_to_mat(args[ARG_stats].u_obj); -// Mat centroids = mp_obj_to_mat(args[ARG_centroids].u_obj); -// int connectivity = args[ARG_connectivity].u_int; -// int ltype = args[ARG_ltype].u_int; - -// // Return value -// int retval = 0; - -// // Call the corresponding OpenCV function -// try { -// retval = connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype); -// } catch(Exception& e) { -// mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); -// } - -// // Return the result -// mp_obj_t result[4]; -// result[0] = mp_obj_new_int(retval); -// result[1] = mat_to_mp_obj(labels); -// result[2] = mat_to_mp_obj(stats); -// result[3] = mat_to_mp_obj(centroids); -// return mp_obj_new_tuple(4, result); -// } +mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_image, ARG_labels, ARG_stats, ARG_centroids, ARG_connectivity, ARG_ltype }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_image, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_labels, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_stats, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_centroids, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_connectivity, MP_ARG_INT, { .u_int = 8 } }, + { MP_QSTR_ltype, MP_ARG_INT, { .u_int = CV_16U } }, // Normally CV_32S, but ulab doesn't support 32-bit integers + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat image = mp_obj_to_mat(args[ARG_image].u_obj); + Mat labels32S; // TODO: Allow user input + Mat stats32S; // TODO: Allow user input + Mat centroids64F; // TODO: Allow user input + int connectivity = args[ARG_connectivity].u_int; + int ltype = args[ARG_ltype].u_int; + + // Return value + int retval = 0; + + // Call the corresponding OpenCV function + try { + retval = connectedComponentsWithStats(image, labels32S, stats32S, centroids64F, connectivity, ltype); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Convert output matrices to float + Mat labels, stats, centroids; + labels.allocator = &GetNumpyAllocator(); + stats.allocator = &GetNumpyAllocator(); + centroids.allocator = &GetNumpyAllocator(); + labels32S.convertTo(labels, CV_32F); + stats32S.convertTo(stats, CV_32F); + centroids64F.convertTo(centroids, CV_32F); + + // Return the result + mp_obj_t result[4]; + result[0] = mp_obj_new_int(retval); + result[1] = mat_to_mp_obj(labels); + result[2] = mat_to_mp_obj(stats); + result[3] = mat_to_mp_obj(centroids); + return mp_obj_new_tuple(4, result); +} mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments diff --git a/src/imgproc.h b/src/imgproc.h index 323bcb5..f3a3fd2 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -14,7 +14,7 @@ extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, m extern mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -// extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -70,7 +70,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxF static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxPoints_obj, 1, cv2_imgproc_boxPoints); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); -// static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_contourArea_obj, 1, cv2_imgproc_contourArea); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexHull_obj, 1, cv2_imgproc_convexHull); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexityDefects_obj, 1, cv2_imgproc_convexityDefects); @@ -128,7 +128,7 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_boxPoints), MP_ROM_PTR(&cv2_imgproc_boxPoints_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, \ { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, \ - /* { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, */ \ + { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, \ { MP_ROM_QSTR(MP_QSTR_contourArea), MP_ROM_PTR(&cv2_imgproc_contourArea_obj) }, \ { MP_ROM_QSTR(MP_QSTR_convexHull), MP_ROM_PTR(&cv2_imgproc_convexHull_obj) }, \ { MP_ROM_QSTR(MP_QSTR_convexityDefects), MP_ROM_PTR(&cv2_imgproc_convexityDefects_obj) }, \ From 3e57d35c0442b9102db4adfc6217cb95f5acd758 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 9 Jul 2025 11:58:05 -0600 Subject: [PATCH 096/158] Fix HM01B0 reset Fixes #21 Proper reset sequence with retries, HM01B0 seems to need it Also add back all original register initialization settings --- cv2_drivers/cameras/hm01b0.py | 52 +++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py index d07581e..e42713b 100644 --- a/cv2_drivers/cameras/hm01b0.py +++ b/cv2_drivers/cameras/hm01b0.py @@ -3,7 +3,7 @@ import cv2 # Derived from: -# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c +# https:#github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c class HM01B0(DVP_Camera): # Read only registers @@ -128,6 +128,8 @@ class HM01B0(DVP_Camera): HIMAX_FRAME_LENGTH_QQVGA = 0x084 INIT_COMMANDS = ( + (BLC_TGT, 0x08), # BLC target :8 at 8 bit mode + (BLC2_TGT, 0x08), # BLI target :8 at 8 bit mode (0x3044, 0x0A), # Increase CDS time for settling (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg (0x3047, 0x0A), # Increase CDS time for settling @@ -145,10 +147,36 @@ class HM01B0(DVP_Camera): (0x3065, 0x04), # pad pull 0 (ANA_Register_17, 0x00), # Disable internal oscillator + (BLC_CFG, 0x43), # BLC_on, IIR + + (0x1001, 0x43), # BLC dithering en + (0x1002, 0x43), # blc_darkpixel_thd + (0x0350, 0x7F), # Dgain Control + (BLI_EN, 0x01), # BLI enable + (0x1003, 0x00), # BLI Target [Def: 0x20] + + (DPC_CTRL, 0x01), # DPC option 0: DPC off 1 : mono 3 : bayer1 5 : bayer2 + (0x1009, 0xA0), # cluster hot pixel th + (0x100A, 0x60), # cluster cold pixel th + (SINGLE_THR_HOT, 0x90), # single hot pixel th + (SINGLE_THR_COLD, 0x40), # single cold pixel th (0x1012, 0x00), # Sync. shift disable + (STATISTIC_CTRL, 0x07), # AE stat en | MD LROI stat en | magic + (0x2003, 0x00), + (0x2004, 0x1C), + (0x2007, 0x00), + (0x2008, 0x58), + (0x200B, 0x00), + (0x200C, 0x7A), + (0x200F, 0x00), + (0x2010, 0xB8), + (0x2013, 0x00), + (0x2014, 0x58), + (0x2017, 0x00), + (0x2018, 0x9B), (AE_CTRL, 0x01), #Automatic Exposure - (AE_TARGET_MEAN, 0x80), #AE target mean [Def: 0x3C] + (AE_TARGET_MEAN, 0x64), #AE target mean [Def: 0x3C] (AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] (CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] (CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] @@ -165,12 +193,23 @@ class HM01B0(DVP_Camera): (DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] (DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] + (FS_CTRL, 0x00), #Flicker Control + + (FS_60HZ_H, 0x00), + (FS_60HZ_L, 0x3C), + (FS_50HZ_H, 0x00), + (FS_50HZ_L, 0x32), + (MD_CTRL, 0x00), (FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH_QVGA >> 8), (FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH_QVGA & 0xFF), (LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK_QVGA >> 8), (LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK_QVGA & 0xFF), (QVGA_WIN_EN, 0x01), # Enable QVGA window readout + (0x0383, 0x01), + (0x0387, 0x01), + (0x0390, 0x00), + (0x3011, 0x70), (0x3059, 0x22), # 1-bit mode (OSC_CLK_DIV, 0x14), (IMG_ORIENTATION, 0x00), # change the orientation @@ -217,7 +256,14 @@ def soft_reset(self): Performs a software reset of the HM01B0 sensor. This resets the sensor to its default state. """ - self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + # HM01B0 can require multiple attempts to reset properly + for i in range(self.HIMAX_BOOT_RETRY): + self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + sleep_us(1000) + mode = self.readRegister(self.MODE_SELECT) + if mode[0] == self.HIMAX_MODE_STANDBY: + break + sleep_us(10000) def setMode(self, mode): """ From 307ea7c7fff00b1568b8b827662a829088499d11 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 9 Jul 2025 13:05:38 -0600 Subject: [PATCH 097/158] RP2 DVP PIO driver: Disable DMA in active() Fixes #28 --- cv2_drivers/cameras/dvp_rp2_pio.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index 7f6e87d..9a1879b 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -76,6 +76,9 @@ def active(self, active = None): if active == None: return self.sm.active() + # Disable the DMA, the VSYNC handler will re-enable it when needed + self.dma.active(False) + # Set the active state of the state machine self.sm.active(active) From ca68e8c81f6f42aecc3f0c7cdd2b1750a15cbe9c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 9 Jul 2025 15:30:13 -0600 Subject: [PATCH 098/158] Initial CST816 touch screen driver Resolves #23 --- cv2_drivers/__init__.py | 3 +- cv2_drivers/touch_screens/__init__.py | 1 + cv2_drivers/touch_screens/cst816.py | 104 ++++++++++++++++++ cv2_drivers/touch_screens/cv2_touch_screen.py | 5 + 4 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 cv2_drivers/touch_screens/__init__.py create mode 100644 cv2_drivers/touch_screens/cst816.py create mode 100644 cv2_drivers/touch_screens/cv2_touch_screen.py diff --git a/cv2_drivers/__init__.py b/cv2_drivers/__init__.py index b7341bf..7de84a0 100644 --- a/cv2_drivers/__init__.py +++ b/cv2_drivers/__init__.py @@ -1,2 +1,3 @@ from . import displays -from . import cameras \ No newline at end of file +from . import cameras +from . import touch_screens \ No newline at end of file diff --git a/cv2_drivers/touch_screens/__init__.py b/cv2_drivers/touch_screens/__init__.py new file mode 100644 index 0000000..780188a --- /dev/null +++ b/cv2_drivers/touch_screens/__init__.py @@ -0,0 +1 @@ +from . import cst816 \ No newline at end of file diff --git a/cv2_drivers/touch_screens/cst816.py b/cv2_drivers/touch_screens/cst816.py new file mode 100644 index 0000000..f854b6e --- /dev/null +++ b/cv2_drivers/touch_screens/cst816.py @@ -0,0 +1,104 @@ +from .cv2_touch_screen import CV2_Touch_Screen + +# Derived from: +# https://github.com/fbiego/CST816S +class CST816(CV2_Touch_Screen): + _I2C_ADDRESS = 0x15 + _CHIP_ID = 0xB6 + + # Registers + _REG_GESTURE_ID = 0x01 + _REG_FINGER_NUM = 0x02 + _REG_X_POS_H = 0x03 + _REG_X_POS_L = 0x04 + _REG_Y_POS_H = 0x05 + _REG_Y_POS_L = 0x06 + _REG_BPC0H = 0xB0 + _REG_BPC0L = 0xB1 + _REG_BPC1H = 0xB2 + _REG_BPC1L = 0xB3 + _REG_CHIP_ID = 0xA7 + _REG_PROJ_ID = 0xA8 + _REG_FW_VERSION = 0xA9 + _REG_MOTION_MASK = 0xEC + _REG_IRQ_PULSE_WIDTH = 0xED + _REG_NOR_SCAN_PER = 0xEE + _REG_MOTION_SL_ANGLE = 0xEF + _REG_LP_SCAN_RAW_1H = 0xF0 + _REG_LP_SCAN_RAW_1L = 0xF1 + _REG_LP_SCAN_RAW_2H = 0xF2 + _REG_LP_SCAN_RAW_2L = 0xF3 + _REG_LP_AUTO_WAKE_TIME = 0xF4 + _REG_LP_SCAN_TH = 0xF5 + _REG_LP_SCAN_WIN = 0xF6 + _REG_LP_SCAN_FREQ = 0xF7 + _REG_LP_SCAN_IDAC = 0xF8 + _REG_AUTO_SLEEP_TIME = 0xF9 + _REG_IRQ_CTL = 0xFA + _REG_AUTO_RESET = 0xFB + _REG_LONG_PRESS_TIME = 0xFC + _REG_IO_CTL = 0xFD + _REG_DIS_AUTO_SLEEP = 0xFE + + def __init__(self, i2c, address=_I2C_ADDRESS, width=240, height=320, rotation=1): + self.i2c = i2c + self.address = address + self.width = width + self.height = height + self.rotation = rotation + + def is_connected(self): + """ + Check if the CST816 touch screen is connected by reading the chip ID. + + Returns: + bool: True if connected, False otherwise + """ + try: + # Try to read the chip ID + # If it throws an I/O error - the device isn't connected + chip_id = self.read_register_value(self._REG_CHIP_ID) + + # Confirm the chip ID is correct + if chip_id == self._CHIP_ID: + return True + else: + return False + except: + return False + + def getChipID(self): + return self.read_register_value(self._REG_CHIP_ID) + + def get_touch(self): + x = self.read_register_value(self._REG_X_POS_H, 2) & 0x0FFF + y = self.read_register_value(self._REG_Y_POS_H, 2) & 0x0FFF + touch_num = self.read_register_value(self._REG_FINGER_NUM) + + # Adjust for the rotation + if self.rotation == 0: + x,y = x, y + elif self.rotation == 1: + x,y = y, self.width - x + elif self.rotation == 2: + x,y = self.height - x, self.width - y + elif self.rotation == 3: + x,y = self.height - y, x + + return (x, y, touch_num) + + def read_register_value(self, reg, num_bytes=1): + """ + Read a single byte from the specified register. + + Args: + reg (int): Register address to read from + + Returns: + int: Value read from the register + """ + data = self.i2c.readfrom_mem(self.address, reg, num_bytes) + value = 0 + for i in range(num_bytes): + value = (value << 8) | data[i] + return value diff --git a/cv2_drivers/touch_screens/cv2_touch_screen.py b/cv2_drivers/touch_screens/cv2_touch_screen.py new file mode 100644 index 0000000..3940dd3 --- /dev/null +++ b/cv2_drivers/touch_screens/cv2_touch_screen.py @@ -0,0 +1,5 @@ +class CV2_Touch_Screen(): + def __init__(self): + pass + + # TODO: Implement common methods for all touch screens From 3ac97b822599b28348086962f13212013d82e5f6 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 10:08:19 -0600 Subject: [PATCH 099/158] Move manifest.py out of cv2_drivers Fixes #30 --- Makefile | 2 +- cv2_drivers/manifest.py => manifest.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename cv2_drivers/manifest.py => manifest.py (85%) diff --git a/Makefile b/Makefile index e722085..43fc78f 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ CURRENT_DIR = $(shell pwd) MAKE_ARGS = USER_C_MODULES="$(CURRENT_DIR)/src/opencv_upy.cmake" # Use the OpenCV driver manifest -MAKE_ARGS += FROZEN_MANIFEST="$(CURRENT_DIR)/cv2_drivers/manifest.py" +MAKE_ARGS += FROZEN_MANIFEST="$(CURRENT_DIR)/manifest.py" # Build MicroPython with the OpenCV module all: diff --git a/cv2_drivers/manifest.py b/manifest.py similarity index 85% rename from cv2_drivers/manifest.py rename to manifest.py index c556dbf..21966ed 100644 --- a/cv2_drivers/manifest.py +++ b/manifest.py @@ -3,7 +3,7 @@ include("$(BOARD_DIR)/manifest.py") # Include this directory as one package -package("cv2_drivers", base_path="..") +package("cv2_drivers") # Include the SD card module require("sdcard") From 90f87dca1a17965b4b9600e0995b54831bcdb4e8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 10:09:46 -0600 Subject: [PATCH 100/158] Remove TODO comment for ULAB_MAX_DIMS 4 dimensions is required for some OpenCV things --- src/opencv_upy.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index c06537c..ee74b6c 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -31,8 +31,8 @@ set(MICROPY_C_HEAP_SIZE 65536) # functions set(MICROPY_TRACKED_ALLOC 1) -# Set ULAB max number of dimensions to 4 (default is 2). TODO: See if 4 is -# actually needed, or if we can get away with 2. +# Set ULAB max number of dimensions to 4 (default is 2), which is needed for +# some OpenCV functions target_compile_definitions(usermod INTERFACE ULAB_MAX_DIMS=4) # Include ULAB From a70c0471b02d97c649b67838424e6274599d386e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 11:25:51 -0600 Subject: [PATCH 101/158] Remove dependency on OpenCV fork Fixes #31 --- .gitmodules | 2 +- src/opencv/opencv | 2 +- src/opencv/platforms/include/zephyr_stdint.h | 79 ++++++++++++++++++++ src/opencv/platforms/rp2350.toolchain.cmake | 9 +++ src/opencv_upy.cmake | 5 ++ 5 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 src/opencv/platforms/include/zephyr_stdint.h diff --git a/.gitmodules b/.gitmodules index 59bc0ec..860523c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "src/opencv/opencv"] path = src/opencv/opencv - url = https://github.com/sfe-SparkFro/opencv.git + url = https://github.com/opencv/opencv.git [submodule "src/ulab"] path = src/ulab url = https://github.com/v923z/micropython-ulab.git diff --git a/src/opencv/opencv b/src/opencv/opencv index 3dc189e..31b0eee 160000 --- a/src/opencv/opencv +++ b/src/opencv/opencv @@ -1 +1 @@ -Subproject commit 3dc189e3990d03a66335aa60d11f2e7df7ea01d1 +Subproject commit 31b0eeea0b44b370fd0712312df4214d4ae1b158 diff --git a/src/opencv/platforms/include/zephyr_stdint.h b/src/opencv/platforms/include/zephyr_stdint.h new file mode 100644 index 0000000..750db64 --- /dev/null +++ b/src/opencv/platforms/include/zephyr_stdint.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_ +#define ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_ + +/* + * Some gcc versions and/or configurations as found in the Zephyr SDK + * (questionably) define __INT32_TYPE__ and derivatives as a long int + * which makes the printf format checker to complain about long vs int + * mismatch when %u is given a uint32_t argument, and uint32_t pointers not + * being compatible with int pointers. Let's redefine them to follow + * common expectations and usage. + */ + +#if __SIZEOF_INT__ != 4 +#error "unexpected int width" +#endif + +#undef __INT32_TYPE__ +#undef __UINT32_TYPE__ +#undef __INT_FAST32_TYPE__ +#undef __UINT_FAST32_TYPE__ +#undef __INT_LEAST32_TYPE__ +#undef __UINT_LEAST32_TYPE__ +#undef __INT64_TYPE__ +#undef __UINT64_TYPE__ +#undef __INT_FAST64_TYPE__ +#undef __UINT_FAST64_TYPE__ +#undef __INT_LEAST64_TYPE__ +#undef __UINT_LEAST64_TYPE__ + +#define __INT32_TYPE__ int +#define __UINT32_TYPE__ unsigned int +#define __INT_FAST32_TYPE__ __INT32_TYPE__ +#define __UINT_FAST32_TYPE__ __UINT32_TYPE__ +#define __INT_LEAST32_TYPE__ __INT32_TYPE__ +#define __UINT_LEAST32_TYPE__ __UINT32_TYPE__ +#define __INT64_TYPE__ long long int +#define __UINT64_TYPE__ unsigned long long int +#define __INT_FAST64_TYPE__ __INT64_TYPE__ +#define __UINT_FAST64_TYPE__ __UINT64_TYPE__ +#define __INT_LEAST64_TYPE__ __INT64_TYPE__ +#define __UINT_LEAST64_TYPE__ __UINT64_TYPE__ + +/* + * The confusion also exists with __INTPTR_TYPE__ which is either an int + * (even when __INT32_TYPE__ is a long int) or a long int. Let's redefine + * it to a long int to get some uniformity. Doing so also makes it compatible + * with LP64 (64-bit) targets where a long is always 64-bit wide. + */ + +#if __SIZEOF_POINTER__ != __SIZEOF_LONG__ +#error "unexpected size difference between pointers and long ints" +#endif + +#undef __INTPTR_TYPE__ +#undef __UINTPTR_TYPE__ +#define __INTPTR_TYPE__ long int +#define __UINTPTR_TYPE__ long unsigned int + +/* + * Re-define the INTN_C(value) integer constant expression macros to match the + * integer types re-defined above. + */ + +#undef __INT32_C +#undef __UINT32_C +#undef __INT64_C +#undef __UINT64_C +#define __INT32_C(c) c +#define __UINT32_C(c) c ## U +#define __INT64_C(c) c ## LL +#define __UINT64_C(c) c ## ULL + +#endif /* ZEPHYR_INCLUDE_TOOLCHAIN_STDINT_H_ */ \ No newline at end of file diff --git a/src/opencv/platforms/rp2350.toolchain.cmake b/src/opencv/platforms/rp2350.toolchain.cmake index 1c420b4..853aa36 100644 --- a/src/opencv/platforms/rp2350.toolchain.cmake +++ b/src/opencv/platforms/rp2350.toolchain.cmake @@ -12,6 +12,15 @@ include("${CMAKE_CURRENT_LIST_DIR}/common.cmake") # Set RP2350 specific settings set(OPENCV_DISABLE_THREAD_SUPPORT ON) +# Add compiler flag -D_M_CEE +# set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -D_M_CEE") +# set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -D_M_CEE") + # Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") + +# Fix for https://github.com/sparkfun/micropython-opencv/issues/31 +# Source: https://docs.zephyrproject.org/4.0.0/doxygen/html/zephyr__stdint_8h_source.html +set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -imacros ${CMAKE_CURRENT_LIST_DIR}/include/zephyr_stdint.h") +set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -imacros ${CMAKE_CURRENT_LIST_DIR}/include/zephyr_stdint.h") diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index ee74b6c..3f39c54 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -50,3 +50,8 @@ target_link_libraries(usermod INTERFACE "-Wl,--wrap,malloc") target_link_libraries(usermod INTERFACE "-Wl,--wrap,free") target_link_libraries(usermod INTERFACE "-Wl,--wrap,calloc") target_link_libraries(usermod INTERFACE "-Wl,--wrap,realloc") + +# __NEWLIB__ is not defined for some reason, which causes a conflicting +# definition of uint here: +# https://github.com/opencv/opencv/blob/9cdd525bc59b34a3db8f6db905216c5398ca93d6/modules/core/include/opencv2/core/hal/interface.h#L35-L39 +target_compile_definitions(usermod INTERFACE -D__NEWLIB__) From 4f1acc6db7f6f735d85a96b6eadb90f3f6292f0a Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 11:50:39 -0600 Subject: [PATCH 102/158] Remove commented lines from cmake file --- src/opencv/platforms/rp2350.toolchain.cmake | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/opencv/platforms/rp2350.toolchain.cmake b/src/opencv/platforms/rp2350.toolchain.cmake index 853aa36..56b0b13 100644 --- a/src/opencv/platforms/rp2350.toolchain.cmake +++ b/src/opencv/platforms/rp2350.toolchain.cmake @@ -12,10 +12,6 @@ include("${CMAKE_CURRENT_LIST_DIR}/common.cmake") # Set RP2350 specific settings set(OPENCV_DISABLE_THREAD_SUPPORT ON) -# Add compiler flag -D_M_CEE -# set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -D_M_CEE") -# set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -D_M_CEE") - # Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 set(CMAKE_C_FLAGS_INIT "${CMAKE_C_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") set(CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT} -DOPENCV_INCLUDE_PORT_FILE=\\\"${CMAKE_CURRENT_LIST_DIR}/include/rp2350_unsafe_cv_xadd.h\\\"") From d9501a71af2065747617d6b58ac7b23f14bc79dc Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 13:13:45 -0600 Subject: [PATCH 103/158] Update to use large binary variant Progress towards #29, should rename to "OpenCV" before launch --- .github/workflows/build.yml | 2 +- Makefile | 7 +++++-- micropython | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c801370..91ec7d8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,4 +34,4 @@ jobs: uses: actions/upload-artifact@v4 with: name: firmware.uf2 - path: micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER/firmware.uf2 + path: micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2 diff --git a/Makefile b/Makefile index 43fc78f..b73912c 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,9 @@ CURRENT_DIR = $(shell pwd) # Set the MicroPython user C module path to the OpenCV module MAKE_ARGS = USER_C_MODULES="$(CURRENT_DIR)/src/opencv_upy.cmake" +# Ensure we're building the OpenCV board variant +MAKE_ARGS += BOARD_VARIANT=LARGE_BINARY + # Use the OpenCV driver manifest MAKE_ARGS += FROZEN_MANIFEST="$(CURRENT_DIR)/manifest.py" @@ -16,8 +19,8 @@ all: # Clean the MicroPython build clean: - @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) clean + @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) clean # Load the MicroPython submodules submodules: - @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) submodules + @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) submodules diff --git a/micropython b/micropython index 186caf9..7e728e8 160000 --- a/micropython +++ b/micropython @@ -1 +1 @@ -Subproject commit 186caf9f0326c9d61494a7d5c6d0408c0fef8485 +Subproject commit 7e728e8c6aad74ca244183f3e0705db6f332abd9 From 9edd99595fc7e324a47877636f211887ded0fda3 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 16:50:35 -0600 Subject: [PATCH 104/158] Move hardware initialization out of boot.py There are potential downsides to requiring use of boot.py: * It's a bit obscure * Not all IDEs (eg. XRPCode) properly support boot.py * It's possible to brick a device with a bad boot.py Now all hardware initialization is in an example module, which all examples import Users can still move the contents to a boot.py file if they desire --- examples/boot.py | 127 --------------------- examples/cv2_hardware_init/__init__.py | 40 +++++++ examples/cv2_hardware_init/bus_i2c.py | 11 ++ examples/cv2_hardware_init/bus_spi.py | 13 +++ examples/cv2_hardware_init/camera.py | 38 ++++++ examples/cv2_hardware_init/display.py | 37 ++++++ examples/cv2_hardware_init/sd_card.py | 50 ++++++++ examples/cv2_hardware_init/touch_screen.py | 16 +++ examples/ex01_hello_opencv.py | 8 ++ examples/ex02_imread_imwrite.py | 3 +- examples/ex03_camera.py | 3 +- 11 files changed, 217 insertions(+), 129 deletions(-) delete mode 100644 examples/boot.py create mode 100644 examples/cv2_hardware_init/__init__.py create mode 100644 examples/cv2_hardware_init/bus_i2c.py create mode 100644 examples/cv2_hardware_init/bus_spi.py create mode 100644 examples/cv2_hardware_init/camera.py create mode 100644 examples/cv2_hardware_init/display.py create mode 100644 examples/cv2_hardware_init/sd_card.py create mode 100644 examples/cv2_hardware_init/touch_screen.py diff --git a/examples/boot.py b/examples/boot.py deleted file mode 100644 index 6abf1cc..0000000 --- a/examples/boot.py +++ /dev/null @@ -1,127 +0,0 @@ -# Import the machine module to access hardware features -import machine - -# Initialize SPI bus, assuming default pins on bus 0. You may need to adjust -# this based on your specific board and configuration -spi = machine.SPI(0) - -# Initialize I2C bus, assuming default pins on bus 0. You may need to adjust -# this based on your specific board and configuration -i2c = machine.I2C(0) - -# Initialize display, if available -try: - # Import a display driver module. Multiple options are provided below, so - # you can choose the one that best fits your needs. You may need to adjust - # the parameters based on your specific display and board configuration - import cv2_drivers.displays as displays - - ############################################################################ - # ST7789 - A very popular display for embedded systems - ############################################################################ - - # SPI interface. This should work on any platform, but it's not always the - # fastest option (24Mbps on RP2350) - display = displays.st7789_spi.ST7789_SPI( - width=240, - height=320, - spi=spi, - pin_dc=16, - pin_cs=17, - rotation=1 - ) - - # PIO interface. This is only available on Raspberry Pi RP2 processors, - # and is much faster than the SPI interface (up to 75Mbps on RP2350) - # display = displays.st7789_pio.ST7789_PIO( - # width=240, - # height=320, - # sm_id=1, - # pin_clk=18, - # pin_tx=19, - # pin_dc=16, - # pin_cs=17, - # rotation=1 - # ) -except ImportError: - print("boot.py - Display driver module not found, skipping display initialization.") - -# Initialize SD card, if available -try: - # Import the SD card module. This is often not installed by default in - # MicroPython, so you may need to install it manually. For example, you can - # use `mpremote mip install sdcard` - import sdcard - - # This example assumes the SD card is on the same SPI bus as the display - # with a different chip select pin. You may need to adjust this based on - # your specific board and configuration - sd_cs = machine.Pin(7, machine.Pin.OUT) - sd = sdcard.SDCard(spi, sd_cs) - - # Mount the SD card to the filesystem under the "/sd" directory, which makes - # it accessible just like the normal MicroPython filesystem - import uos - vfs = uos.VfsFat(sd) - uos.mount(vfs, "/sd") -except ImportError: - print("boot.py - sdcard module not found, skipping SD card initialization.") -except OSError as e: - eStr = str(e) - if "no SD card" in eStr: - print("boot.py - no SD card found, skipping SD card initialization.") - elif "Errno 1" in eStr: - print("boot.py - SD card already mounted, skipping SD card initialization.") - else: - print("boot.py - Failed to mount SD card, skipping SD card initialization.") - -# Set the SPI bus baudrate (note - the sdcard module overrides the baudrate upon -# initialization, so the baudrate should be set after that). It is recommended -# to use the fastest baudrate supported by your board, display, and SD card to -# minimize latency -spi.init(baudrate=24_000_000) - -# Attempt to put something on the display to clear the previous content -try: - # Load and display a splash image, if it's available - import cv2 - splash_image = cv2.imread("splash.png") - cv2.imshow(display, splash_image) -except Exception: - # Clear the display, if the driver supports it - if hasattr(display, 'clear'): - display.clear() - -# Initialize camera, if available -try: - # Import a camera driver module. This example assumes the HM01B0, which is - # a popular camera module for embedded systems. This example uses a PIO - # driver, which is a peripheral interface only available on Raspberry Pi RP2 - # processors - import cv2_drivers.cameras as cameras - - # Create a camera object. This will depend on the camera driver you are - # using, and you may need to adjust the parameters based on your specific - # camera and board configuration - camera = cameras.hm01b0_pio.HM01B0_PIO( - i2c, - pin_d0=12, - pin_vsync=13, - pin_hsync=14, - pin_pclk=15, - pin_xclk=None, # Optional xclock pin, specify if needed - num_data_pins=1 # Number of data pins used by the camera (1, 4, or 8) - ) - - # camera = cameras.ov5640_pio.OV5640_PIO( - # i2c, - # pin_d0=8, - # pin_vsync=22, - # pin_hsync=21, - # pin_pclk=20, - # pin_xclk=None # Optional xclock pin, specify if needed - # ) -except ImportError: - print("boot.py - Camera driver module not found, skipping camera initialization.") -except OSError: - print("boot.py - Camera initialization failed, skipping camera initialization.") diff --git a/examples/cv2_hardware_init/__init__.py b/examples/cv2_hardware_init/__init__.py new file mode 100644 index 0000000..d5e0db3 --- /dev/null +++ b/examples/cv2_hardware_init/__init__.py @@ -0,0 +1,40 @@ +# Initializes various hardware components for OpenCV in MicroPython. The +# examples import this module, but you could instead create/edit a `boot.py` +# script to automatically initialize the hardware when the board boots up. See: +# https://micropython.org/resources/docs/en/latest/reference/reset_boot.html#id4 + +try: + from .display import display +except: + print("Display initialization failed, skipping...") + +try: + from .camera import camera +except: + print("Camera initialization failed, skipping...") + +try: + from .touch_screen import touch_screen +except: + print("Touch screen initialization failed, skipping...") + +try: + # We don't actually need to import anything here, just want to run the + # sd_card module so the SD card gets mounted to the filesystem. So just + # import something then delete it to avoid clutter + from .sd_card import sdcard + del sdcard +except: + print("SD card initialization failed, skipping...") + +# Optional - show a splash image on the display if one is available, or clear +# the display of any previous content +try: + # Load and display a splash image, if one is available + import cv2 + splash_image = cv2.imread("splash.png") + cv2.imshow(display, splash_image) +except Exception: + # No splash image, instead clear the display if the driver supports it + if hasattr(display, 'clear'): + display.clear() diff --git a/examples/cv2_hardware_init/bus_i2c.py b/examples/cv2_hardware_init/bus_i2c.py new file mode 100644 index 0000000..593d3d5 --- /dev/null +++ b/examples/cv2_hardware_init/bus_i2c.py @@ -0,0 +1,11 @@ +# Import the machine.I2C class +from machine import I2C + +# Initialize default I2C bus. You may need to adjust the arguments based on your +# specific board and configuration +i2c = I2C( + # id = 0, + # sda = machine.Pin(0), + # scl = machine.Pin(1), + # freq = 400_000 +) diff --git a/examples/cv2_hardware_init/bus_spi.py b/examples/cv2_hardware_init/bus_spi.py new file mode 100644 index 0000000..35fa59d --- /dev/null +++ b/examples/cv2_hardware_init/bus_spi.py @@ -0,0 +1,13 @@ +# Import the machine.SPI class +from machine import SPI + +# Initialize default SPI bus. You may need to adjust the arguments based on your +# specific board and configuration +spi = SPI( + # id = 0, + baudrate = 24_000_000, # Use the fastest baudrate you can for best performance! + # sck = machine.Pin(2), + # mosi = machine.Pin(3), + # miso = machine.Pin(4), + # freq = 100_000 +) diff --git a/examples/cv2_hardware_init/camera.py b/examples/cv2_hardware_init/camera.py new file mode 100644 index 0000000..e41bc90 --- /dev/null +++ b/examples/cv2_hardware_init/camera.py @@ -0,0 +1,38 @@ +# Initializes a camera object. Multiple options are provided below, so you can +# choose one that best fits your needs. You may need to adjust the arguments +# based on your specific camera and board configuration + +# Import the OpenCV camera drivers +from cv2_drivers.cameras import * + +# Import the I2C bus +from .bus_i2c import i2c + +################################################################################ +# HM01B0 +################################################################################ + +# PIO interface, only available on Raspberry Pi RP2 processors +camera = hm01b0_pio.HM01B0_PIO( + i2c, + pin_d0 = 12, + pin_vsync = 13, + pin_hsync = 14, + pin_pclk = 15, + pin_xclk = None, # Optional xclock pin, specify if needed + num_data_pins = 1 # Number of data pins used by the camera (1, 4, or 8) +) + +################################################################################ +# OV5640 +################################################################################ + +# PIO interface, only available on Raspberry Pi RP2 processors +# camera = ov5640_pio.OV5640_PIO( +# i2c, +# pin_d0 = 8, +# pin_vsync = 22, +# pin_hsync = 21, +# pin_pclk = 20, +# pin_xclk = None # Optional xclock pin, specify if needed +# ) diff --git a/examples/cv2_hardware_init/display.py b/examples/cv2_hardware_init/display.py new file mode 100644 index 0000000..56da285 --- /dev/null +++ b/examples/cv2_hardware_init/display.py @@ -0,0 +1,37 @@ +# Initializes a display object. Multiple options are provided below, so you can +# choose one that best fits your needs. You may need to adjust the arguments +# based on your specific display and board configuration + +# Import the OpenCV display drivers +from cv2_drivers.displays import * + +# Import the SPI bus +from .bus_spi import spi + +################################################################################ +# ST7789 +################################################################################ + +# SPI interface. This should work on any platform, but it's not always the +# fastest option (24Mbps on RP2350) +display = st7789_spi.ST7789_SPI( + width = 240, + height = 320, + spi = spi, + pin_dc = 16, + pin_cs = 17, + rotation = 1 +) + +# PIO interface. This is only available on Raspberry Pi RP2 processors, +# but is much faster than the SPI interface (75Mbps on RP2350) +# display = st7789_pio.ST7789_PIO( +# width = 240, +# height = 320, +# sm_id = 1, +# pin_clk = 18, +# pin_tx = 19, +# pin_dc = 16, +# pin_cs = 17, +# rotation = 1 +# ) diff --git a/examples/cv2_hardware_init/sd_card.py b/examples/cv2_hardware_init/sd_card.py new file mode 100644 index 0000000..b2976d0 --- /dev/null +++ b/examples/cv2_hardware_init/sd_card.py @@ -0,0 +1,50 @@ +# Initializes SD card and mounts it to the filesystem. This assumes the SD card +# is on the same SPI bus as the display with a different chip select pin. You +# may need to adjust this based on your specific board and configuration + +# Import the Pin class for the chip select pin +from machine import Pin + +# Import the SPI bus +from .bus_spi import spi + +# When the SD card is initialized, it changes the SPI bus baudrate. We'll +# want to revert it, so we need to know the original baudrate. There's no +# way to get it directly, so we convert the bus to a string and parse it. +# Example format: +# "SPI(0, baudrate=24000000, sck=Pin(2), mosi=Pin(3), miso=Pin(4))" +spi_str = str(spi) +baudrate = int(spi_str[spi_str.index("baudrate=") + 9:].partition(",")[0]) + +# Set the chip select pin for the SD card +sd_cs = Pin(7, Pin.OUT) + +try: + # Import the SD card module. This is often not installed by default in + # MicroPython, so you may need to install it manually. For example, you can + # use `mpremote mip install sdcard` + import sdcard + + # Initialize the SD card, then restore the original SPI bus baudrate. This + # is wrapped in a try/finally block to ensure the baudrate is restored even if + # the SD card initialization fails + try: + sd_card = sdcard.SDCard(spi, sd_cs) + finally: + spi.init(baudrate = baudrate) + + # Mount the SD card to the filesystem under the "/sd" directory, which makes + # it accessible just like the normal MicroPython filesystem + import uos + vfs = uos.VfsFat(sd_card) + uos.mount(vfs, "/sd") +except ImportError: + print("sdcard module not found, skipping SD card initialization...") +except OSError as e: + eStr = str(e) + if "no SD card" in eStr: + print("no SD card found, skipping SD card initialization...") + elif "Errno 1" in eStr: + print("SD card already mounted, skipping SD card initialization...") + else: + print("Failed to mount SD card, skipping SD card initialization...") diff --git a/examples/cv2_hardware_init/touch_screen.py b/examples/cv2_hardware_init/touch_screen.py new file mode 100644 index 0000000..1b314a0 --- /dev/null +++ b/examples/cv2_hardware_init/touch_screen.py @@ -0,0 +1,16 @@ +# Import the machine module to access hardware features +from .bus_i2c import i2c + +# Import the OpenCV touch screen drivers +from cv2_drivers.touch_screens import * + +# Create a touch screen object. Multiple options are provided below, so you can choose +# one that best fits your needs. You may need to adjust the arguments based on +# your specific touch screen and board configuration + +################################################################################ +# CST816 +################################################################################ + +# I2C interface +touch_screen = cst816.CST816(i2c) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 18e149e..1cab7ec 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -1,6 +1,14 @@ # Import OpenCV, just as you would in any other Python environment! import cv2 +# Standard OpenCV leverages the host operating system to access hardware, but we +# don't have that luxury in MicroPython. Instead, drivers are provided for +# various hardware components, which need to be initialized before using them. +# The exmples import a module called `cv2_hardware_init`, which initializes the +# drivers. You may need to edit the contents of the `cv2_hardware_init` module +# based on your specific board and hardware configuration +from cv2_hardware_init import * + # Import NumPy, almost like any other Python environment! The only difference is # the addition of `from ulab` since MicroPython does not have a full NumPy # implementation; ulab NumPy is a lightweight version of standard NumPy diff --git a/examples/ex02_imread_imwrite.py b/examples/ex02_imread_imwrite.py index 11b41ba..e21dce4 100644 --- a/examples/ex02_imread_imwrite.py +++ b/examples/ex02_imread_imwrite.py @@ -1,5 +1,6 @@ -# Import OpenCV +# Import OpenCV and hardware initialization module import cv2 +from cv2_hardware_init import * # Call `cv2.imread()` to read an image from the MicroPython filesystem, just # like in any other Python environment! Make sure to copy the image to the diff --git a/examples/ex03_camera.py b/examples/ex03_camera.py index c52f901..f9ad88d 100644 --- a/examples/ex03_camera.py +++ b/examples/ex03_camera.py @@ -1,5 +1,6 @@ -# Import OpenCV +# Import OpenCV and hardware initialization module import cv2 +from cv2_hardware_init import * # Open a camera, similar to any other Python environment! In standard OpenCV, # you would use `cv2.VideoCapture(0)` or similar, and OpenCV would leverage the From 808dd8d0e3d2ce4ec0f17aa39f9accda3beb9052 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 16:57:43 -0600 Subject: [PATCH 105/158] Update comments in touch screen init example --- examples/cv2_hardware_init/touch_screen.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/cv2_hardware_init/touch_screen.py b/examples/cv2_hardware_init/touch_screen.py index 1b314a0..6ffc1a7 100644 --- a/examples/cv2_hardware_init/touch_screen.py +++ b/examples/cv2_hardware_init/touch_screen.py @@ -1,12 +1,12 @@ -# Import the machine module to access hardware features -from .bus_i2c import i2c +# Initializes a touch screen object. Multiple options are provided below, so you +# can choose one that best fits your needs. You may need to adjust the arguments +# based on your specific touch screen and board configuration # Import the OpenCV touch screen drivers from cv2_drivers.touch_screens import * -# Create a touch screen object. Multiple options are provided below, so you can choose -# one that best fits your needs. You may need to adjust the arguments based on -# your specific touch screen and board configuration +# Import the I2C bus +from .bus_i2c import i2c ################################################################################ # CST816 From 8cad950d7989b2ce4f8736a22a723cfd15f5b1e4 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 17:01:14 -0600 Subject: [PATCH 106/158] Add initial touch screen example --- examples/ex04_touch_screen.py | 55 +++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 examples/ex04_touch_screen.py diff --git a/examples/ex04_touch_screen.py b/examples/ex04_touch_screen.py new file mode 100644 index 0000000..1f21217 --- /dev/null +++ b/examples/ex04_touch_screen.py @@ -0,0 +1,55 @@ +# Import OpenCV and hardware initialization module +import cv2 +from cv2_hardware_init import * + +# Import NumPy +from ulab import numpy as np + +# Initialize an image to draw on +img = np.zeros((240, 320, 3), dtype=np.uint8) + +# Prompt the user to draw on the screen +img = cv2.putText(img, "Touch to draw!", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) + +# Prompt the user to press a key to continue +print("Press any key to continue") + +# Create variables to store touch coordinates and state +x0, y0, x1, y1 = 0, 0, 0, 0 +touching = False + +# Loop to continuously read touch input and draw on the image +while True: + # Read touch input + x, y, touch_num = touch_screen.get_touch() + + # Update the touch coordinates and state + if touch_num > 0: + if not touching: + x0 = x + y0 = y + x1 = x + y1 = y + touching = True + else: + x0 = x1 + y0 = y1 + x1 = x + y1 = y + else: + if touching: + touching = False + + # Draw a line if touching + if touching: + img = cv2.line(img, (x0, y0), (x1, y1), (255, 255, 255), 2) + + # Display the frame + display.imshow(img) + + # Check for key presses + key = cv2.waitKey(1) + + # If any key is pressed, exit the loop + if key != -1: + break From 604874b871a84ec6ca88b70855d707c68ebf1dcb Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 10 Jul 2025 17:01:36 -0600 Subject: [PATCH 107/158] Renumber examples 2-4 Hardware examples first, so users can easily verify hardware functionality --- examples/{ex03_camera.py => ex02_camera.py} | 0 examples/{ex04_touch_screen.py => ex03_touch_screen.py} | 0 examples/{ex02_imread_imwrite.py => ex04_imread_imwrite.py} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename examples/{ex03_camera.py => ex02_camera.py} (100%) rename examples/{ex04_touch_screen.py => ex03_touch_screen.py} (100%) rename examples/{ex02_imread_imwrite.py => ex04_imread_imwrite.py} (100%) diff --git a/examples/ex03_camera.py b/examples/ex02_camera.py similarity index 100% rename from examples/ex03_camera.py rename to examples/ex02_camera.py diff --git a/examples/ex04_touch_screen.py b/examples/ex03_touch_screen.py similarity index 100% rename from examples/ex04_touch_screen.py rename to examples/ex03_touch_screen.py diff --git a/examples/ex02_imread_imwrite.py b/examples/ex04_imread_imwrite.py similarity index 100% rename from examples/ex02_imread_imwrite.py rename to examples/ex04_imread_imwrite.py From 908ad71337a496b9c0bc06373ab626e38fe30648 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 11 Jul 2025 16:36:40 -0600 Subject: [PATCH 108/158] Add SFE logo detection example --- examples/ex05_detect_sfe_logo.py | 126 +++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 examples/ex05_detect_sfe_logo.py diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py new file mode 100644 index 0000000..4a4b8ff --- /dev/null +++ b/examples/ex05_detect_sfe_logo.py @@ -0,0 +1,126 @@ +# Import OpenCV +import cv2 +from cv2_hardware_init import * +from ulab import numpy as np +import time + +# Here we define a reference contour for the SparkFun flame logo. This was +# created manually by picking points on the boundary of a small image of the +# logo in an image editor. This gets drawn in the top left corner of the +# display for reference +logo_contour = np.array( + [[[0,48]], + [[0,22]], + [[4,16]], + [[9,16]], + [[7,19]], + [[10,22]], + [[13,22]], + [[16,19]], + [[16,17]], + [[10,10]], + [[10,5]], + [[15,1]], + [[20,0]], + [[24,2]], + [[19,5]], + [[19,8]], + [[23,12]], + [[26,11]], + [[26,8]], + [[32,14]], + [[32,25]], + [[28,32]], + [[20,36]], + [[12,36]]], dtype=np.float) + +# Initialize a loop timer to calculate processing speed in FPS +loop_time = time.ticks_us() + +# Open the camera +camera.open() + +# Prompt the user to press a key to continue +print("Press any key to continue") + +# Loop to continuously read frames from the camera and display them +while True: + # Read a frame from the camera + success, frame = camera.read() + + # Here we binarize the image. There are many ways to do this, but here we + # simply convert the image to grayscale and then apply Otsu's thresholding + # method to create a binary image. This means it will only detect a dark + # logo on a light background (or vice versa), but you can modify this to + # find specific colors or use other methods if desired + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) + + # Find contours in the binary image, which represent the boundaries of + # shapes. Contours are a powerful tool in OpenCV for shape analysis and + # object detection + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # It's possible that no contours were found, so first check if any were + # found before proceeding + if contours: + # We'll compare the contours found in the image to the reference logo + # contour defined earlier. We will use the `cv2.matchShapes()` function + # to compare the shapes to pick the best match, so we need to initialize + # variables to keep track of the best match found so far + best_contour = None + best_similarity = float('inf') # Start with a very high similarity score + + # Loop through each contour found in the image to find the best match + for i in range(len(contours)): + # If the image is noisy, the binarized image may contain many tiny + # contours that are obviously not the logo. `cv2.matchShapes()` can + # take some time, so we can be more efficient by skipping obviously + # wrong contours. In this example, the logo we're looking for is + # fairly complex, so we can skip contours that have too few points + # since they will definitely be too simple to match the logo + if len(contours[i]) < 20: + continue + + # Now we call `cv2.matchShapes()` which returns a "similarity" score + # between the two shapes. The lower the score, the more similar the + # shapes are + similarity = cv2.matchShapes(logo_contour, contours[i], cv2.CONTOURS_MATCH_I2, 0) + + # Check if this contour is a better match than the best so far + if similarity < best_similarity: + # This contour is a better match, so update the best match + best_similarity = similarity + best_contour = contours[i] + + # We're done checking all contours. It's possible that the best contour + # found is not a good match, so we can check if the score is below a + # threshold to determine whether it's close enough. Testing has shown + # that good matches are usually around 0.5, so we'll use a slightly + # higher threshold of 1.0 + if best_similarity < 1.0: + # Now we'll draw the best contour found on the original image + frame = cv2.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) + + # All processing is done! Calculate the frame rate and display it + current_time = time.ticks_us() + fps = 1000000 / (current_time - loop_time) + loop_time = current_time + frame = cv2.putText(frame, f"FPS: {fps:.2f}", (40, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + + # Draw the reference logo contour in the top left corner of the frame + frame[0:50, 0:40] = (0,0,0) + frame = cv2.drawContours(frame, [logo_contour], -1, (255, 255, 255), 1, offset=(2, 2)) + + # Display the frame + cv2.imshow(display, frame) + + # Check for key presses + key = cv2.waitKey(1) + + # If any key is pressed, exit the loop + if key != -1: + break + +# Release the camera +camera.release() From 8502bf87bd9d56717f3757f6aa47c1859913c187 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 11:06:41 -0600 Subject: [PATCH 109/158] Fix alphabetical order of imgproc module Spelling is hard... --- src/imgproc.cpp | 276 ++++++++++++++++++++++++------------------------ src/imgproc.h | 24 ++--- 2 files changed, 150 insertions(+), 150 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 0a2dd0f..50b1aeb 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -49,12 +49,14 @@ mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, return mat_to_mp_obj(dst); } -mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_curve, ARG_closed }; + enum { ARG_curve, ARG_epsilon, ARG_closed, ARG_approxCurve }; static const mp_arg_t allowed_args[] = { { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_closed, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } }, + { MP_QSTR_epsilon, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_closed, MP_ARG_BOOL, { .u_bool = false } }, + { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -63,33 +65,30 @@ mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t // Convert arguments to required types Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); + double epsilon = mp_obj_get_float(args[ARG_epsilon].u_obj); bool closed = args[ARG_closed].u_bool; - - mp_float_t retval; + Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); // Call the corresponding OpenCV function try { - retval = arcLength(curve, closed); + approxPolyDP(curve, approxCurve, epsilon, closed); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } // Return the result - return mp_obj_new_float(retval); + return mat_to_mp_obj(approxCurve); } -mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength }; + enum { ARG_curve, ARG_nsides, ARG_approxCurve, ARG_epsilon_percentage, ARG_ensure_convex }; static const mp_arg_t allowed_args[] = { - { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, - { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } }, - { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, - { MP_QSTR_tipLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_nsides, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_epsilon_percentage, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_ensure_convex, MP_ARG_BOOL, { .u_bool = true } }, }; // Parse the arguments @@ -97,38 +96,29 @@ mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); // Convert arguments to required types - Mat img = mp_obj_to_mat(args[ARG_img].u_obj); - Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj); - Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj); - Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); - int thickness = args[ARG_thickness].u_int; - int line_type = args[ARG_line_type].u_int; - int shift = args[ARG_shift].u_int; - mp_float_t tipLength; - if(args[ARG_tipLength].u_obj == mp_const_none) - tipLength = 0.1; // Default value - else - tipLength = mp_obj_get_float(args[ARG_tipLength].u_obj); + Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); + int nsides = args[ARG_nsides].u_int; + Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); + mp_float_t epsilon_percentage = args[ARG_epsilon_percentage].u_obj == mp_const_none ? -1.0 : mp_obj_get_float(args[ARG_epsilon_percentage].u_obj); + bool ensure_convex = args[ARG_ensure_convex].u_bool; // Call the corresponding OpenCV function try { - arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength); + approxPolyN(curve, approxCurve, nsides, epsilon_percentage, ensure_convex); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } // Return the result - return mat_to_mp_obj(img); + return mat_to_mp_obj(approxCurve); } -mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_curve, ARG_epsilon, ARG_closed, ARG_approxCurve }; + enum { ARG_curve, ARG_closed }; static const mp_arg_t allowed_args[] = { { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_epsilon, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = mp_const_none } }, - { MP_QSTR_closed, MP_ARG_BOOL, { .u_bool = false } }, - { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_closed, MP_ARG_REQUIRED | MP_ARG_BOOL, { .u_bool = false } }, }; // Parse the arguments @@ -137,30 +127,33 @@ mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_ma // Convert arguments to required types Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); - double epsilon = mp_obj_get_float(args[ARG_epsilon].u_obj); bool closed = args[ARG_closed].u_bool; - Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); + + mp_float_t retval; // Call the corresponding OpenCV function try { - approxPolyDP(curve, approxCurve, epsilon, closed); + retval = arcLength(curve, closed); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } // Return the result - return mat_to_mp_obj(approxCurve); + return mp_obj_new_float(retval); } -mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { +mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments - enum { ARG_curve, ARG_nsides, ARG_approxCurve, ARG_epsilon_percentage, ARG_ensure_convex }; + enum { ARG_img, ARG_pt1, ARG_pt2, ARG_color, ARG_thickness, ARG_line_type, ARG_shift, ARG_tipLength }; static const mp_arg_t allowed_args[] = { - { MP_QSTR_curve, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_nsides, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, - { MP_QSTR_approxCurve, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - { MP_QSTR_epsilon_percentage, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - { MP_QSTR_ensure_convex, MP_ARG_BOOL, { .u_bool = true } }, + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt1, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_pt2, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_line_type, MP_ARG_INT, { .u_int = 8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_tipLength, MP_ARG_OBJ, { .u_obj = mp_const_none } }, }; // Parse the arguments @@ -168,21 +161,28 @@ mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); // Convert arguments to required types - Mat curve = mp_obj_to_mat(args[ARG_curve].u_obj); - int nsides = args[ARG_nsides].u_int; - Mat approxCurve = mp_obj_to_mat(args[ARG_approxCurve].u_obj); - mp_float_t epsilon_percentage = args[ARG_epsilon_percentage].u_obj == mp_const_none ? -1.0 : mp_obj_get_float(args[ARG_epsilon_percentage].u_obj); - bool ensure_convex = args[ARG_ensure_convex].u_bool; + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point pt1 = mp_obj_to_point(args[ARG_pt1].u_obj); + Point pt2 = mp_obj_to_point(args[ARG_pt2].u_obj); + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int line_type = args[ARG_line_type].u_int; + int shift = args[ARG_shift].u_int; + mp_float_t tipLength; + if(args[ARG_tipLength].u_obj == mp_const_none) + tipLength = 0.1; // Default value + else + tipLength = mp_obj_get_float(args[ARG_tipLength].u_obj); // Call the corresponding OpenCV function try { - approxPolyN(curve, approxCurve, nsides, epsilon_percentage, ensure_convex); + arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } // Return the result - return mat_to_mp_obj(approxCurve); + return mat_to_mp_obj(img); } mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { @@ -389,6 +389,43 @@ mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw return mat_to_mp_obj(edges); } +mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_radius, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, + { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat img = mp_obj_to_mat(args[ARG_img].u_obj); + Point center = mp_obj_to_point(args[ARG_center].u_obj); + int radius = args[ARG_radius].u_int; + Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); + int thickness = args[ARG_thickness].u_int; + int lineType = args[ARG_lineType].u_int; + int shift = args[ARG_shift].u_int; + + // Call the corresponding OpenCV function + try { + circle(img, center, radius, color, thickness, lineType, shift); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(img); +} + mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_image, ARG_labels, ARG_connectivity, ARG_ltype }; @@ -587,43 +624,6 @@ mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, m return mat_to_mp_obj(convexityDefects); } -mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - // Define the arguments - enum { ARG_img, ARG_center, ARG_radius, ARG_color, ARG_thickness, ARG_lineType, ARG_shift }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_center, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_radius, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, - { MP_QSTR_color, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_thickness, MP_ARG_INT, { .u_int = 1 } }, - { MP_QSTR_lineType, MP_ARG_INT, { .u_int = LINE_8 } }, - { MP_QSTR_shift, MP_ARG_INT, { .u_int = 0 } }, - }; - - // Parse the arguments - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - // Convert arguments to required types - Mat img = mp_obj_to_mat(args[ARG_img].u_obj); - Point center = mp_obj_to_point(args[ARG_center].u_obj); - int radius = args[ARG_radius].u_int; - Scalar color = mp_obj_to_scalar(args[ARG_color].u_obj); - int thickness = args[ARG_thickness].u_int; - int lineType = args[ARG_lineType].u_int; - int shift = args[ARG_shift].u_int; - - // Call the corresponding OpenCV function - try { - circle(img, center, radius, color, thickness, lineType, shift); - } catch(Exception& e) { - mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); - } - - // Return the result - return mat_to_mp_obj(img); -} - mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_code, ARG_dst }; @@ -1776,53 +1776,6 @@ mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_arg return mp_obj_new_tuple(2, result_tuple); } -mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - // Define the arguments - enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; - static const mp_arg_t allowed_args[] = { - { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_op, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, - { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, - { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } }, - { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } }, - { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } }, - }; - - // Parse the arguments - mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; - mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); - - // Convert arguments to required types - Mat src = mp_obj_to_mat(args[ARG_src].u_obj); - int op = args[ARG_op].u_int; - Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); - Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); - Point anchor; - if(args[ARG_anchor].u_obj == mp_const_none) - anchor = Point(-1, -1); // Default value - else - anchor = mp_obj_to_point(args[ARG_anchor].u_obj); - int iterations = args[ARG_iterations].u_int; - int borderType = args[ARG_borderType].u_int; - Scalar borderValue; - if(args[ARG_borderValue].u_obj == mp_const_none) - borderValue = morphologyDefaultBorderValue(); // Default value - else - borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj); - - // Call the corresponding OpenCV function - try { - morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue); - } catch(Exception& e) { - mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); - } - - // Return the result - return mat_to_mp_obj(dst); -} - mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_binary }; @@ -1878,6 +1831,53 @@ mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t * return moments_dict; } +mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_op, ARG_kernel, ARG_dst, ARG_anchor, ARG_iterations, ARG_borderType, ARG_borderValue }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_op, MP_ARG_REQUIRED | MP_ARG_INT, { .u_int = 0 } }, + { MP_QSTR_kernel, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_anchor, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_iterations, MP_ARG_INT, { .u_int = 1 } }, + { MP_QSTR_borderType, MP_ARG_INT, { .u_int = BORDER_CONSTANT } }, + { MP_QSTR_borderValue, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + int op = args[ARG_op].u_int; + Mat kernel = mp_obj_to_mat(args[ARG_kernel].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + Point anchor; + if(args[ARG_anchor].u_obj == mp_const_none) + anchor = Point(-1, -1); // Default value + else + anchor = mp_obj_to_point(args[ARG_anchor].u_obj); + int iterations = args[ARG_iterations].u_int; + int borderType = args[ARG_borderType].u_int; + Scalar borderValue; + if(args[ARG_borderValue].u_obj == mp_const_none) + borderValue = morphologyDefaultBorderValue(); // Default value + else + borderValue = mp_obj_to_scalar(args[ARG_borderValue].u_obj); + + // Call the corresponding OpenCV function + try { + morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_contour, ARG_pt, ARG_measureDist }; diff --git a/src/imgproc.h b/src/imgproc.h index f3a3fd2..6ef3dce 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -3,22 +3,22 @@ // Function declarations extern mp_obj_t cv2_imgproc_adaptiveThreshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_approxPolyDP(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_approxPolyN(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_arcLength(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_arrowedLine(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_bilateralFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_blur(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_boundingRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_boxFilter(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_boxPoints(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_Canny(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_connectedComponents(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_connectedComponentsWithStats(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_contourArea(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_convexHull(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_convexityDefects(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_imgproc_circle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_cvtColor(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_dilate(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_drawContours(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -47,8 +47,8 @@ extern mp_obj_t cv2_imgproc_medianBlur(size_t n_args, const mp_obj_t *pos_args, extern mp_obj_t cv2_imgproc_minAreaRect(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_minEnclosingCircle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_minEnclosingTriangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); -extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_moments(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_imgproc_morphologyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_pointPolygonTest(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_putText(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_imgproc_rectangle(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); @@ -59,22 +59,22 @@ extern mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, m // Python references to the functions static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arcLength_obj, 2, cv2_imgproc_arcLength); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyDP_obj, 3, cv2_imgproc_approxPolyDP); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_approxPolyN_obj, 2, cv2_imgproc_approxPolyN); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arcLength_obj, 2, cv2_imgproc_arcLength); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_bilateralFilter_obj, 4, cv2_imgproc_bilateralFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_blur_obj, 2, cv2_imgproc_blur); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boundingRect_obj, 1, cv2_imgproc_boundingRect); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxFilter_obj, 3, cv2_imgproc_boxFilter); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_boxPoints_obj, 1, cv2_imgproc_boxPoints); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_Canny_obj, 3, cv2_imgproc_Canny); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponents_obj, 1, cv2_imgproc_connectedComponents); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_connectedComponentsWithStats_obj, 1, cv2_imgproc_connectedComponentsWithStats); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_contourArea_obj, 1, cv2_imgproc_contourArea); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexHull_obj, 1, cv2_imgproc_convexHull); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_convexityDefects_obj, 1, cv2_imgproc_convexityDefects); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_circle_obj, 4, cv2_imgproc_circle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_cvtColor_obj, 2, cv2_imgproc_cvtColor); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_dilate_obj, 2, cv2_imgproc_dilate); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_drawContours_obj, 3, cv2_imgproc_drawContours); @@ -103,8 +103,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_medianBlur_obj, 2, cv2_imgproc_med static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minAreaRect_obj, 1, cv2_imgproc_minAreaRect); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingCircle_obj, 1, cv2_imgproc_minEnclosingCircle); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_minEnclosingTriangle_obj, 1, cv2_imgproc_minEnclosingTriangle); -static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_moments_obj, 1, cv2_imgproc_moments); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_morphologyEx_obj, 3, cv2_imgproc_morphologyEx); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_pointPolygonTest_obj, 3, cv2_imgproc_pointPolygonTest); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_putText_obj, 6, cv2_imgproc_putText); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_rectangle_obj, 4, cv2_imgproc_rectangle); @@ -117,22 +117,22 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre #define OPENCV_IMGPROC_GLOBALS \ /* Functions */ \ { MP_ROM_QSTR(MP_QSTR_adaptiveThreshold), MP_ROM_PTR(&cv2_imgproc_adaptiveThreshold_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_arcLength), MP_ROM_PTR(&cv2_imgproc_arcLength_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, \ { MP_ROM_QSTR(MP_QSTR_approxPolyDP), MP_ROM_PTR(&cv2_imgproc_approxPolyDP_obj) }, \ { MP_ROM_QSTR(MP_QSTR_approxPolyN), MP_ROM_PTR(&cv2_imgproc_approxPolyN_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_arcLength), MP_ROM_PTR(&cv2_imgproc_arcLength_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_arrowedLine), MP_ROM_PTR(&cv2_imgproc_arrowedLine_obj) }, \ { MP_ROM_QSTR(MP_QSTR_bilateralFilter), MP_ROM_PTR(&cv2_imgproc_bilateralFilter_obj) }, \ { MP_ROM_QSTR(MP_QSTR_blur), MP_ROM_PTR(&cv2_imgproc_blur_obj) }, \ { MP_ROM_QSTR(MP_QSTR_boundingRect), MP_ROM_PTR(&cv2_imgproc_boundingRect_obj) }, \ { MP_ROM_QSTR(MP_QSTR_boxFilter), MP_ROM_PTR(&cv2_imgproc_boxFilter_obj) }, \ { MP_ROM_QSTR(MP_QSTR_boxPoints), MP_ROM_PTR(&cv2_imgproc_boxPoints_obj) }, \ { MP_ROM_QSTR(MP_QSTR_Canny), MP_ROM_PTR(&cv2_imgproc_Canny_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_connectedComponents), MP_ROM_PTR(&cv2_imgproc_connectedComponents_obj) }, \ { MP_ROM_QSTR(MP_QSTR_connectedComponentsWithStats), MP_ROM_PTR(&cv2_imgproc_connectedComponentsWithStats_obj) }, \ { MP_ROM_QSTR(MP_QSTR_contourArea), MP_ROM_PTR(&cv2_imgproc_contourArea_obj) }, \ { MP_ROM_QSTR(MP_QSTR_convexHull), MP_ROM_PTR(&cv2_imgproc_convexHull_obj) }, \ { MP_ROM_QSTR(MP_QSTR_convexityDefects), MP_ROM_PTR(&cv2_imgproc_convexityDefects_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_circle), MP_ROM_PTR(&cv2_imgproc_circle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_cvtColor), MP_ROM_PTR(&cv2_imgproc_cvtColor_obj) }, \ { MP_ROM_QSTR(MP_QSTR_dilate), MP_ROM_PTR(&cv2_imgproc_dilate_obj) }, \ { MP_ROM_QSTR(MP_QSTR_drawContours), MP_ROM_PTR(&cv2_imgproc_drawContours_obj) }, \ @@ -161,8 +161,8 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_threshold_obj, 4, cv2_imgproc_thre { MP_ROM_QSTR(MP_QSTR_minAreaRect), MP_ROM_PTR(&cv2_imgproc_minAreaRect_obj) }, \ { MP_ROM_QSTR(MP_QSTR_minEnclosingCircle), MP_ROM_PTR(&cv2_imgproc_minEnclosingCircle_obj) }, \ { MP_ROM_QSTR(MP_QSTR_minEnclosingTriangle), MP_ROM_PTR(&cv2_imgproc_minEnclosingTriangle_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \ { MP_ROM_QSTR(MP_QSTR_moments), MP_ROM_PTR(&cv2_imgproc_moments_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_morphologyEx), MP_ROM_PTR(&cv2_imgproc_morphologyEx_obj) }, \ { MP_ROM_QSTR(MP_QSTR_pointPolygonTest), MP_ROM_PTR(&cv2_imgproc_pointPolygonTest_obj) }, \ { MP_ROM_QSTR(MP_QSTR_putText), MP_ROM_PTR(&cv2_imgproc_putText_obj) }, \ { MP_ROM_QSTR(MP_QSTR_rectangle), MP_ROM_PTR(&cv2_imgproc_rectangle_obj) }, \ From a548e3731652adf72b1bbb275205a9f073e95ace Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 15:57:22 -0600 Subject: [PATCH 110/158] Remove references to boot.py from examples --- examples/ex01_hello_opencv.py | 10 +++------- examples/ex02_camera.py | 3 --- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 1cab7ec..045a1c7 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -40,15 +40,11 @@ # Once we have an image ready to show, just call `cv2.imshow()`, almost like any # other Python environment! However, there is one important difference: # -# Standard OpenCV leverages the host operating system to display images in -# windows, but we don't have that luxury in MicroPython. So there is an API -# change to `cv2.imshow()` to accommodate this: instead of passing a window name -# string as the first argument to `cv2.imshow()`, we pass a display driver. Any +# Standard OpenCV takes a window name string in `cv2.imshow()`, which is used +# to display the image in a window. We don't have windows in MicroPython, so +# there is an API change where the first argument must be a display driver. Any # display driver can be used, as long as it implements an `imshow()` method that # takes a NumPy array as input -# -# This example assumes a display driver called `display` has been initialized by -# a `boot.py` script. See the example `boot.py` script for more details cv2.imshow(display, img) # Can alternatively call `display.imshow(img)` # Standard OpenCV requires a call to `cv2.waitKey()` to process events and diff --git a/examples/ex02_camera.py b/examples/ex02_camera.py index f9ad88d..b7a3521 100644 --- a/examples/ex02_camera.py +++ b/examples/ex02_camera.py @@ -9,9 +9,6 @@ # so a camera driver is required instead. Any camera driver can be used, as long # as it implements the same methods as the standard OpenCV `cv2.VideoCapture` # class, such as `open()`, `read()`, and `release()` -# -# This example assumes a camera driver called `camera` has been initialized by a -# `boot.py` script. See the example `boot.py` script for more details camera.open() # Prompt the user to press a key to continue From 20675662231402d86bc7c1c5930e44469ec75d63 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 15:57:48 -0600 Subject: [PATCH 111/158] Fix typo in comment --- examples/ex04_imread_imwrite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ex04_imread_imwrite.py b/examples/ex04_imread_imwrite.py index e21dce4..0b74d05 100644 --- a/examples/ex04_imread_imwrite.py +++ b/examples/ex04_imread_imwrite.py @@ -32,7 +32,7 @@ # Display the modified image cv2.imshow(display, edges) -# Now we'll save the modified image to the MicroPyhton filesystem using +# Now we'll save the modified image to the MicroPython filesystem using # `cv2.imwrite()`, just like in any other Python environment! # # Again, SD cards are supported, just change the path to point to the SD card From 0bcb62d97226984a53ccb5c1f04536f25633837d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 15:59:16 -0600 Subject: [PATCH 112/158] Change examples to import cv2 as cv Seems to be more common --- examples/ex01_hello_opencv.py | 20 ++++++++++---------- examples/ex02_camera.py | 12 ++++++------ examples/ex03_touch_screen.py | 8 ++++---- examples/ex04_imread_imwrite.py | 20 ++++++++++---------- examples/ex05_detect_sfe_logo.py | 26 +++++++++++++------------- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 045a1c7..61e01ce 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -1,5 +1,5 @@ # Import OpenCV, just as you would in any other Python environment! -import cv2 +import cv2 as cv # Standard OpenCV leverages the host operating system to access hardware, but we # don't have that luxury in MicroPython. Instead, drivers are provided for @@ -26,30 +26,30 @@ # OpenCV's drawing functions can be used to modify the image as well. For # example, we can draw a green ellipse at the center of the image -img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) +img = cv.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) # Note - Most OpenCV functions return the resulting image. It's redundant for # the drawing functions and often ignored, but if you call those functions from # the REPL without assigning it to a variable, the entire array will be printed. # To avoid this, you can simply re-assign the image variable (for example, -# `img = cv2.function(...)`) +# `img = cv.function(...)`) # And the obligatory "Hello OpenCV" text! This time in red -img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) +img = cv.putText(img, "Hello OpenCV!", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) -# Once we have an image ready to show, just call `cv2.imshow()`, almost like any +# Once we have an image ready to show, just call `cv.imshow()`, almost like any # other Python environment! However, there is one important difference: # -# Standard OpenCV takes a window name string in `cv2.imshow()`, which is used +# Standard OpenCV takes a window name string in `cv.imshow()`, which is used # to display the image in a window. We don't have windows in MicroPython, so # there is an API change where the first argument must be a display driver. Any # display driver can be used, as long as it implements an `imshow()` method that # takes a NumPy array as input -cv2.imshow(display, img) # Can alternatively call `display.imshow(img)` +cv.imshow(display, img) # Can alternatively call `display.imshow(img)` -# Standard OpenCV requires a call to `cv2.waitKey()` to process events and +# Standard OpenCV requires a call to `cv.waitKey()` to process events and # actually display the image. However the display driver shows the image -# immediately, so it's not necessary to call `cv2.waitKey()` in MicroPython. +# immediately, so it's not necessary to call `cv.waitKey()` in MicroPython. # But it is available, and behaves almost like any other Python environment! The # only difference is that it requires a key to be pressed in the REPL instead of # a window. It will wait for up to the specified number of milliseconds (0 for @@ -58,7 +58,7 @@ # Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses # until you hit Enter on your keyboard print("Press any key to continue") -key = cv2.waitKey(0) # Not necessary to display image, can remove if desired +key = cv.waitKey(0) # Not necessary to display image, can remove if desired # Print the key pressed print("Key pressed:", chr(key)) diff --git a/examples/ex02_camera.py b/examples/ex02_camera.py index b7a3521..6b18757 100644 --- a/examples/ex02_camera.py +++ b/examples/ex02_camera.py @@ -1,13 +1,13 @@ # Import OpenCV and hardware initialization module -import cv2 +import cv2 as cv from cv2_hardware_init import * # Open a camera, similar to any other Python environment! In standard OpenCV, -# you would use `cv2.VideoCapture(0)` or similar, and OpenCV would leverage the +# you would use `cv.VideoCapture(0)` or similar, and OpenCV would leverage the # host operating system to open a camera object and return it as a -# `cv2.VideoCapture` object. However, we don't have that luxury in MicroPython, +# `cv.VideoCapture` object. However, we don't have that luxury in MicroPython, # so a camera driver is required instead. Any camera driver can be used, as long -# as it implements the same methods as the standard OpenCV `cv2.VideoCapture` +# as it implements the same methods as the standard OpenCV `cv.VideoCapture` # class, such as `open()`, `read()`, and `release()` camera.open() @@ -27,10 +27,10 @@ break # Display the frame - cv2.imshow(display, frame) + cv.imshow(display, frame) # Check for key presses - key = cv2.waitKey(1) + key = cv.waitKey(1) # If any key is pressed, exit the loop if key != -1: diff --git a/examples/ex03_touch_screen.py b/examples/ex03_touch_screen.py index 1f21217..ab0753c 100644 --- a/examples/ex03_touch_screen.py +++ b/examples/ex03_touch_screen.py @@ -1,5 +1,5 @@ # Import OpenCV and hardware initialization module -import cv2 +import cv2 as cv from cv2_hardware_init import * # Import NumPy @@ -9,7 +9,7 @@ img = np.zeros((240, 320, 3), dtype=np.uint8) # Prompt the user to draw on the screen -img = cv2.putText(img, "Touch to draw!", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) +img = cv.putText(img, "Touch to draw!", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # Prompt the user to press a key to continue print("Press any key to continue") @@ -42,13 +42,13 @@ # Draw a line if touching if touching: - img = cv2.line(img, (x0, y0), (x1, y1), (255, 255, 255), 2) + img = cv.line(img, (x0, y0), (x1, y1), (255, 255, 255), 2) # Display the frame display.imshow(img) # Check for key presses - key = cv2.waitKey(1) + key = cv.waitKey(1) # If any key is pressed, exit the loop if key != -1: diff --git a/examples/ex04_imread_imwrite.py b/examples/ex04_imread_imwrite.py index 0b74d05..26d7387 100644 --- a/examples/ex04_imread_imwrite.py +++ b/examples/ex04_imread_imwrite.py @@ -1,8 +1,8 @@ # Import OpenCV and hardware initialization module -import cv2 +import cv2 as cv from cv2_hardware_init import * -# Call `cv2.imread()` to read an image from the MicroPython filesystem, just +# Call `cv.imread()` to read an image from the MicroPython filesystem, just # like in any other Python environment! Make sure to copy the image to the # MicroPython filesystem first, and set the path to the image file as needed # @@ -11,35 +11,35 @@ # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV print("Loading image...") -img = cv2.imread("test_images/sparkfun_logo.png") +img = cv.imread("test_images/sparkfun_logo.png") # Show the image for 1 second # # Note - If the image is larger or smaller than the display, the behavior will # depend on the display driver. For example, the default ST7789 display driver # will crop large images, and show small images in the top-left corner -cv2.imshow(display, img) +cv.imshow(display, img) # Prompt the user to press a key to continue print("Press any key to continue") -key = cv2.waitKey(0) +key = cv.waitKey(0) -# Let's modify the image! Here we use `cv2.Canny()` to perform edge detection +# Let's modify the image! Here we use `cv.Canny()` to perform edge detection # on the image, which is a common operation in computer vision print("Performing edge detection...") -edges = cv2.Canny(img, 100, 200) +edges = cv.Canny(img, 100, 200) # Display the modified image -cv2.imshow(display, edges) +cv.imshow(display, edges) # Now we'll save the modified image to the MicroPython filesystem using -# `cv2.imwrite()`, just like in any other Python environment! +# `cv.imwrite()`, just like in any other Python environment! # # Again, SD cards are supported, just change the path to point to the SD card # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV print("Saving modified image...") -success = cv2.imwrite("test_images/sparkfun_logo_edges.png", edges) +success = cv.imwrite("test_images/sparkfun_logo_edges.png", edges) # Check if the image was saved successfully if success: diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py index 4a4b8ff..f0b4940 100644 --- a/examples/ex05_detect_sfe_logo.py +++ b/examples/ex05_detect_sfe_logo.py @@ -1,5 +1,5 @@ # Import OpenCV -import cv2 +import cv2 as cv from cv2_hardware_init import * from ulab import numpy as np import time @@ -53,19 +53,19 @@ # method to create a binary image. This means it will only detect a dark # logo on a light background (or vice versa), but you can modify this to # find specific colors or use other methods if desired - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) + thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) # Find contours in the binary image, which represent the boundaries of # shapes. Contours are a powerful tool in OpenCV for shape analysis and # object detection - contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # It's possible that no contours were found, so first check if any were # found before proceeding if contours: # We'll compare the contours found in the image to the reference logo - # contour defined earlier. We will use the `cv2.matchShapes()` function + # contour defined earlier. We will use the `cv.matchShapes()` function # to compare the shapes to pick the best match, so we need to initialize # variables to keep track of the best match found so far best_contour = None @@ -74,7 +74,7 @@ # Loop through each contour found in the image to find the best match for i in range(len(contours)): # If the image is noisy, the binarized image may contain many tiny - # contours that are obviously not the logo. `cv2.matchShapes()` can + # contours that are obviously not the logo. `cv.matchShapes()` can # take some time, so we can be more efficient by skipping obviously # wrong contours. In this example, the logo we're looking for is # fairly complex, so we can skip contours that have too few points @@ -82,10 +82,10 @@ if len(contours[i]) < 20: continue - # Now we call `cv2.matchShapes()` which returns a "similarity" score + # Now we call `cv.matchShapes()` which returns a "similarity" score # between the two shapes. The lower the score, the more similar the # shapes are - similarity = cv2.matchShapes(logo_contour, contours[i], cv2.CONTOURS_MATCH_I2, 0) + similarity = cv.matchShapes(logo_contour, contours[i], cv.CONTOURS_MATCH_I2, 0) # Check if this contour is a better match than the best so far if similarity < best_similarity: @@ -100,23 +100,23 @@ # higher threshold of 1.0 if best_similarity < 1.0: # Now we'll draw the best contour found on the original image - frame = cv2.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) + frame = cv.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) # All processing is done! Calculate the frame rate and display it current_time = time.ticks_us() fps = 1000000 / (current_time - loop_time) loop_time = current_time - frame = cv2.putText(frame, f"FPS: {fps:.2f}", (40, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + frame = cv.putText(frame, f"FPS: {fps:.2f}", (40, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) # Draw the reference logo contour in the top left corner of the frame frame[0:50, 0:40] = (0,0,0) - frame = cv2.drawContours(frame, [logo_contour], -1, (255, 255, 255), 1, offset=(2, 2)) + frame = cv.drawContours(frame, [logo_contour], -1, (255, 255, 255), 1, offset=(2, 2)) # Display the frame - cv2.imshow(display, frame) + cv.imshow(display, frame) # Check for key presses - key = cv2.waitKey(1) + key = cv.waitKey(1) # If any key is pressed, exit the loop if key != -1: From 17b0d53b11dff67d3811b9acffff57003c178800 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 15:59:53 -0600 Subject: [PATCH 113/158] Initial readme --- README.md | 334 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 333 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 6083295..cec024e 100644 --- a/README.md +++ b/README.md @@ -1 +1,333 @@ -# micropython-opencv \ No newline at end of file +# MicroPython-OpenCV + +Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroPython port of OpenCV, and as such, there may be some rough edges. Hardware support is limited to SparkFun products. + +# Quick Start + +1. Flash MicroPython-OpenCV firmware + * Back up any files you want to keep, they *will* be overwritten! + * Download the latest firmware from the [Releases tab](https://github.com/sparkfun/micropython-opencv/releases). + * If you don't know how to flash firmware to your board, find your board [here](https://micropython.org/download/) and follow the instructions using the OpenCV firmware. +2. Copy examples (optional) + * It is suggested to copy the entire examples folder to your MicroPython board to get started. This can be done simply with [mpremote](https://docs.micropython.org/en/latest/reference/mpremote.html): + * `cd micropython-opencv/examples` + * `mpremote cp -r . :` +3. Configure hardware drivers + * The MicroPython port of OpenCV depends on hardware drivers to interface with cameras and displays. Drivers are built into the firmware, so there is no need to install them manually, but you will likely need to configure them for your specific hardware and board configuration. + * [Examples](examples/cv2_hardware_init/) are provided for supported hardware. Edit the examples to work with your specific hardware and board configuration. +4. Write OpenCV code! + * Any IDE should work, so use your favorite! + * The code block below contains snippets from various examples to highlight major features. + +```python +# Import OpenCV, just as you would in any other Python environment! +import cv2 as cv + +# Standard OpenCV leverages the host operating system to access hardware, but we +# don't have that luxury in MicroPython. Instead, drivers are provided for +# various hardware components, which need to be initialized before using them. +# The exmples import a module called `cv2_hardware_init`, which initializes the +# drivers. You may need to edit the contents of the `cv2_hardware_init` module +# based on your specific board and hardware configuration +from cv2_hardware_init import * + +# Import NumPy, almost like any other Python environment! The only difference is +# the addition of `from ulab` since MicroPython does not have a full NumPy +# implementation; ulab NumPy is a lightweight version of standard NumPy +from ulab import numpy as np + +# Initialize an image (NumPy array) to be displayed, just like in any other +# Python environment! Here we create a 240x320 pixel image with 3 color channels +# (BGR order, like standard OpenCV) and a data type of `uint8` (you should +# always specify the data type, because NumPy defaults to `float`) +img = np.zeros((240, 320, 3), dtype=np.uint8) + +# OpenCV's drawing functions can be used to modify the image. Here is the +# obligatory "Hello OpenCV!" text in red +img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + +# Once we have an image ready to show, just call `cv.imshow()`, almost like any +# other Python environment! However, there is one important difference: +# +# Standard OpenCV takes a window name string in `cv.imshow()`, which is used +# to display the image in a window. We don't have windows in MicroPython, so +# there is an API change where the first argument must be a display driver. Any +# display driver can be used, as long as it implements an `imshow()` method that +# takes a NumPy array as input +cv.imshow(display, img) # Can alternatively call `display.imshow(img)` + +# Standard OpenCV requires a call to `cv.waitKey()` to process events and +# actually display the image. However the display driver shows the image +# immediately, so it's not necessary to call `cv.waitKey()` in MicroPython. +# But it is available, and behaves almost like any other Python environment! The +# only difference is that it requires a key to be pressed in the REPL instead of +# a window. It will wait for up to the specified number of milliseconds (0 for +# indefinite), and return the ASCII code of the key pressed (-1 if no key press) +# +# Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses +# until you hit Enter on your keyboard +key = cv.waitKey(0) # Not necessary to display image, can remove if desired + +# Open a camera, similar to any other Python environment! In standard OpenCV, +# you would use `cv.VideoCapture(0)` or similar, and OpenCV would leverage the +# host operating system to open a camera object and return it as a +# `cv.VideoCapture` object. However, we don't have that luxury in MicroPython, +# so a camera driver is required instead. Any camera driver can be used, as long +# as it implements the same methods as the standard OpenCV `cv.VideoCapture` +# class, such as `open()`, `read()`, and `release()` +camera.open() + +# Read a frame from the camera, just like any other Python environment! It +# returns a tuple, where the first element is a boolean indicating success, +# and the second element is the frame (NumPy array) read from the camera +success, frame = camera.read() + +# Release the camera, just like in any other Python environment! +camera.release() + +# Call `cv.imread()` to read an image from the MicroPython filesystem, just +# like in any other Python environment! Make sure to copy the image to the +# MicroPython filesystem first, and set the path to the image file as needed +# +# If your board can mount an SD card, you can instead load the image to the SD +# card and change the path to point to the SD card +# +# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +img = cv.imread("test_images/sparkfun_logo.png") + +# Let's modify the image! Here we use `cv2.Canny()` to perform edge detection +# on the image, which is a common operation in computer vision +edges = cv2.Canny(img, 100, 200) + +# Now we'll save the modified image to the MicroPython filesystem using +# `cv.imwrite()`, just like in any other Python environment! +# +# Again, SD cards are supported, just change the path to point to the SD card +# +# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +success = cv.imwrite("test_images/sparkfun_logo_edges.png", edges) +``` + +# Hardware Support and Requirements + +Hardware support in this repository is mostly limited to SparkFun products. The current list of supported proudcts is very small, but may be expanded in the future. Users are welcome to fork this repository to add support for other products, following our licence requirements. Assistance in adding support for other hardware will not be provided by SparkFun. We may consider pull requests that add support for additional hardware, see [#Contributing](#Contributing). + +The OpenCV firmware adds ~3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). So a board with at least 8MB of flash is recommended, to also have space available for file storage. + +PSRAM is a requirement to do anything useful with OpenCV. A single 320x240 RGB888 frame buffer requires 225KiB of RAM; most processors only have a few hundred KiB of SRAM. Several frame buffers can be needed for even simple vision pipelines, so you really need at least a few MiB of RAM available. The more the merrier! + +Below is the list of supported hardware devices: + +* MicroPython Devices + * [XRP Controller](https://www.sparkfun.com/sparkfun-experiential-robotics-platform-xrp-controller.html) +* Camera Drivers + * HM01B0 + * [OV5640](https://www.sparkfun.com/ov5640-camera-board-5-megapixel-2592x1944-fisheye-lens.html) (not fully working yet) +* Display Drivers + * ST7789 +* Touch Screen Drivers + * CST816 + +# Performance + +Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed. In contrast, microcontrollers processors typically run at a few hundred MHz 1 or 2 cores optimized for low power consumtion. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc. But for reference, the RP2350 can run the SparkFun Logo Detection Example at about 2.5 FPS at 320x240 resolution. + +Something to consider is that MicroPython uses a garbage collector for memory management. As images are created and destroyed in a vision pipeline, RAM will be consumed until the garbage collector runs. The collection process takes longer with more RAM, so this can result in noticable delays during collection (typically a few hundred milliseconds). To mitigate this, it's best to pre-allocate arrays and utilize the optional `dst` argument of OpenCV functions to avoid allocating new arrays when possible. Pre-allocation also helps improve performance by avoiding repeated delays from allocating memory. + +Another way to improve performance is to select the best hardware drivers for your setup. For example, the default SPI driver for the ST7789 is limited to the max SPI baudrate for the processor's SPI peripheral. That's 24MHz in the case of the RP2350, but another driver is provided that uses the PIO peripheral that runs at 75MHz, so displaying images can be ~3x faster (ignoring any required colorspace conversions). + +For users wanting maximum performance, it may be desireable to bypass the high-level functions of the display/camera drivers, and instead work directly with the buffer member variables and read/write functions. This can avoid computationally expensive colorspace conversions when reading and writing images if they're not needed. + +# Included OpenCV Functions + +Below is a list of all OpenCV functions included in the MicroPython port of OpenCV. This section follows OpenCV's module structure. + +Only the most useful OpenCV functions are included. The MicroPython environment is *extremely* limited, so many functions are omitted due to prohibitively high RAM and firmware size requirements. Other less useful functions have been omitted to reduce firmware size. If there are additional functions you'd like to be included, see [#Contributing](#Contributing). + +If you need help understanding how to use these functions, see the documentation link for each function. You can also check out [OpenCV's Python Tutorials](https://docs.opencv.org/4.11.0/d6/d00/tutorial_py_root.html) and other tutorials online for more educational experience. This repository is simply a port of OpenCV, so we do not document these functions or how to use them, except for deviations from standard Python OpenCV. + +## [`core`](https://docs.opencv.org/4.11.0/d0/de1/group__core.html) + +> [!NOTE] +> The `core` module includes many functions for basic operations on arrays. Most of these can be performed by `numpy` operations, so they have been omitted to reduce firmware size. + +### [Operations on arrays](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html) + +| Function | Notes | +| --- | --- | +| `cv.convertScaleAbs( src[, dst[, alpha[, beta]]] ) -> dst`
Scales, calculates absolute values, and converts the result to 8-bit.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga3460e9c9f37b563ab9dd550c4d8c4e7d) | | +| `cv.inRange( src, lowerb, upperb[, dst] ) -> dst`
Checks if array elements lie between the elements of two other arrays.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981) | | +| ` cv.minMaxLoc( src[, mask] ) -> minVal, maxVal, minLoc, maxLoc`
Finds the global minimum and maximum in an array.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#gab473bf2eb6d14ff97e89b355dac20707) | | + +## [`imgproc`](https://docs.opencv.org/4.11.0/d7/dbd/group__imgproc.html) + +### [Image Filtering](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html) + +| Function | Notes | +| --- | --- | +| `cv.bilateralFilter( src, d, sigmaColor, sigmaSpace[, dst[, borderType]] ) -> dst`
Applies the bilateral filter to an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed) | | +| `cv.blur( src, ksize[, dst[, anchor[, borderType]]] ) -> dst`
Blurs an image using the normalized box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga8c45db9afe636703801b0b2e440fce37) | | +| `cv.boxFilter( src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]] ) -> dst`
Blurs an image using the box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad533230ebf2d42509547d514f7d3fbc3) | | +| `cv.dilate( src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Dilates an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga4ff0f3318642c4f469d0e11f242f3b6c) | | +| `cv.erode( src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Erodes an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaeb1e0c1033e3f6b891a25d0511362aeb) | | +| `cv.filter2D( src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]] ) -> dst`
Convolves an image with the kernel.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04) | | +| `cv.GaussianBlur( src, ksize, sigmaX[, dst[, sigmaY[, borderType[, hint]]]] ) -> dst`
Blurs an image using a Gaussian filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gae8bdcd9154ed5ca3cbc1766d960f45c1) | | +| `cv.getStructuringElement( shape, ksize[, anchor] ) -> retval`
Returns a structuring element of the specified size and shape for morphological operations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gac342a1bb6eabf6f55c803b09268e36dc) | | +| `cv.Laplacian( src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]] ) -> dst`
Calculates the Laplacian of an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad78703e4c8fe703d479c1860d76429e6) | | +| `cv.medianBlur( src, ksize[, dst] ) -> dst`
Blurs an image using the median filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga564869aa33e58769b4469101aac458f9) | | +| `cv.morphologyEx( src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Performs advanced morphological transformations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f) | | +| `cv.Scharr( src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]] ) -> dst`
Calculates the first x- or y- image derivative using Scharr operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaa13106761eedf14798f37aa2d60404c9) | | +| `cv.Sobel( src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]] ) -> dst`
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gacea54f142e81b6758cb6f375ce782c8d) | | +| `cv.spatialGradient( src[, dx[, dy[, ksize[, borderType]]]] ) -> dx, dy`
Calculates the first order image derivative in both x and y using a Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga405d03b20c782b65a4daf54d233239a2) | | + +### [Miscellaneous Image Transformations](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html) + +| Function | Notes | +| --- | --- | +| `cv.adaptiveThreshold( src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst] ) -> dst`
Applies an adaptive threshold to an array.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3) | | +| `cv.threshold( src, thresh, maxval, type[, dst] ) -> retval, dst`
Applies a fixed-level threshold to each array element.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57) | | + +### [Drawing Functions](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html) + +| Function | Notes | +| --- | --- | +| `cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] ) -> img`
Draws an arrow segment pointing from the first point to the second one.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga0a165a3ca093fd488ac709fdf10c05b2) | | +| `cv.circle( img, center, radius, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a circle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#gaf10604b069374903dbd0f0488cb43670) | | +| `cv.drawContours( image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]] ) -> image`
Draws contours outlines or filled contours.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga746c0625f1781f1ffc9056259103edbc) | | +| `cv.drawMarker( img, position, color[, markerType[, markerSize[, thickness[, line_type]]]] ) -> img`
Draws a marker on a predefined position in an image.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga644c4a170d4799a56b29f864ce984b7e) | | +| `cv.ellipse( img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a simple or thick elliptic arc or fills an ellipse sector.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga28b2267d35786f5f890ca167236cbc69) | | +| `cv.fillConvexPoly( img, points, color[, lineType[, shift]] ) -> img`
Fills a convex polygon.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga9bb982be9d641dc51edd5e8ae3624e1f) | | +| `cv.fillPoly( img, pts, color[, lineType[, shift[, offset]]] ) -> img`
Fills the area bounded by one or more polygons.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga311160e71d37e3b795324d097cb3a7dc) | | +| `cv.line( img, pt1, pt2, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a line segment connecting two points.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2) | | +| `cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] ) -> img`
Draws a text string.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576) | | +| `cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a simple, thick, or filled up-right rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) | | + +### [Color Space Conversions](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html) + +| Function | Notes | +| --- | --- | +| `cv.cvtColor( src, code[, dst[, dstCn[, hint]]] ) -> dst`
Converts an image from one color space to another.
[Documentation](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html#gaf86c09fe702ed037c03c2bc603ceab14) | | + +### [Structural Analysis and Shape Descriptors](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html) + +| Function | Notes | +| --- | --- | +| `cv.approxPolyDP( curve, epsilon, closed[, approxCurve] ) -> approxCurve`
Approximates a polygonal curve(s) with the specified precision.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga0012a5fdaea70b8a9970165d98722b4c) | | +| `cv.approxPolyN( curve, nsides[, approxCurve[, epsilon_percentage[, ensure_convex]]] ) -> approxCurve`
Approximates a polygon with a convex hull with a specified accuracy and number of sides.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga88981607a2d61b95074688aac55625cc) | | +| `cv.arcLength( curve, closed ) -> retval`
Calculates a contour perimeter or a curve length.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8d26483c636be6b35c3ec6335798a47c) | | +| `cv.boundingRect( array ) -> retval`
Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga103fcbda2f540f3ef1c042d6a9b35ac7) | | +| `cv.boxPoints( box[, points] ) -> points`
Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf78d467e024b4d7936cf9397185d2f5c) | | +| `cv.connectedComponents( image[, labels[, connectivity[, ltype]]] ) -> retval, labels`
computes the connected components labeled image of boolean image
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5) | `ltype` defaults to `CV_16U` instead of `CV_32S` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| ` cv.connectedComponentsWithStats( image[, labels[, stats[, centroids[, connectivity[, ltype]]]]] ) -> retval, labels, stats, centroids`
computes the connected components labeled image of boolean image and also produces a statistics output for each label
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f) | `labels`, `stats`, and `centroids` are returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.contourArea( contour[, oriented] ) -> retval`
Calculates a contour area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga2c759ed9f497d4a618048a2f56dc97f1) | | +| `cv.convexHull( points[, hull[, clockwise[, returnPoints]]] ) -> hull`
Finds the convex hull of a point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga014b28e56cb8854c0de4a211cb2be656) | `hull` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.convexityDefects( contour, convexhull[, convexityDefects] ) -> convexityDefects`
Finds the convexity defects of a contour.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gada4437098113fd8683c932e0567f47ba) | `convexityDefects` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.findContours( image, mode, method[, contours[, hierarchy[, offset]]] ) -> contours, hierarchy`
Finds contours in a binary image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0) | `contours` and `hierarchy` are returned with `dtype=np.float` and `dtype=np.int16` respectively instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.fitEllipse( points ) -> retval`
Fits an ellipse around a set of 2D points.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa) | | +| `cv.fitLine( points, distType, param, reps, aeps[, line] ) -> line`
Fits a line to a 2D or 3D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf849da1fdafa67ee84b1e9a23b93f91f) | | +| `cv.isContourConvex( contour ) -> retval`
Tests a contour convexity.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b) | | +| `cv.matchShapes( contour1, contour2, method, parameter ) -> retval`
Compares two shapes.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317) | | +| `cv.minAreaRect( points ) -> retval`
Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9) | | +| `cv.minEnclosingCircle( points ) -> center, radius`
Finds a circle of the minimum area enclosing a 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8ce13c24081bbc7151e9326f412190f1) | | +| `cv.minEnclosingTriangle( points[, triangle] ) -> retval, triangle`
Finds a triangle of minimum area enclosing a 2D point set and returns its area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f) | | +| `cv.moments( array[, binaryImage] ) -> retval`
Calculates all of the moments up to the third order of a polygon or rasterized shape.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga556a180f43cab22649c23ada36a8a139) | | +| `cv.pointPolygonTest( contour, pt, measureDist ) -> retval`
Performs a point-in-contour test.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722) | | + +### [Feature Detection](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html) + +| Function | Notes | +| --- | --- | +| `cv.Canny( image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]] ) -> edges`
Finds edges in an image using the Canny algorithm.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga04723e007ed888ddf11d9ba04e2232de) | | +| `cv.HoughCircles( image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]] ) -> circles`
Finds circles in a grayscale image using the Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d) | | +| ` cv.HoughCirclesWithAccumulator( image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]] ) -> circles`
Finds circles in a grayscale image using the Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#aed6d238ceede74293152437228c603ce) | | +| `cv.HoughLines( image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]] ) -> lines`
Finds lines in a binary image using the standard Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga0b7ee275f89bb1a799ab70a42131f39d) | | +| `cv.HoughLinesP( image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]] ) -> lines`
Finds line segments in a binary image using the probabilistic Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb) | `lines` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.HoughLinesWithAccumulator( image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]] ) -> lines`
Finds lines in a binary image using the standard Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#ad5e6dca5163cd4bd0135cb808f1ddfe8) | | + +### [Object Detection](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html) + +| Function | Notes | +| --- | --- | +| `cv.matchTemplate( image, templ, method[, result[, mask]] ) -> result`
Compares a template against overlapped image regions.
[Documentation](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be) | | + +| Function | Notes | +| --- | --- | +| `cv.imread( filename[, flags] ) -> retval`
Loads an image from a file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#gacbaa02cffc4ec2422dfa2e24412a99e2) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | +| `cv.imwrite( filename, img[, params] ) -> retval`
Saves an image to a specified file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#ga8ac397bd09e48851665edbe12aa28f25) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | + +## [`highgui`](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html) + +| Function | Notes | +| --- | --- | +| `cv.imshow( winname, mat ) -> None`
Displays an image in the specified window.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563) | `winname` must actually be a display driver object that implements an `imshow()` method that takes a NumPy array as input. | +| `cv.waitKey( [, delay] ) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. | + +# Building + +Below are instructions to build the MicroPython-OpenCV firmware from scratch. Instructions are only provided for Linux systems. + +1. Install dependencies + * `sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib` +2. Clone this repo + * `cd ~` + * `git clone https://github.com/sparkfun/micropython-opencv.git` + * `cd micropython-opencv` + * `git submodule update --init` +3. Build mpy-cross + * `make -C micropython/mpy-cross` +4. Clone submodules for your board + * `make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules` + * Replace `rp2` and `SPARKFUN_XRP_CONTROLLER` with your platform and board name respectively +5. Set environment variables (optional) + * Some platforms require environment variables to be set. Examples: + * `export PICO_SDK_PATH=~/micropython-opencv/micropython/lib/pico-sdk` +6. Build OpenCV + * `make -C src/opencv PLATFORM=rp2350 --no-print-directory -j4` + * Replace `rp2350` with your board's platform +7. Build firmware + * `make BOARD=SPARKFUN_XRP_CONTROLLER -j4` + * Replace `SPARKFUN_XRP_CONTROLLER` with your board name + * Your firmware file(s) will be located in `~/micropython-opencv/micropython/ports//build--OPENCV/` + +# Adding New Board + +Because OpenCV adds ~3MiB to the firmware size, it is necessary to define variants that reduce the storage size to avoid it overlapping with the firmware. It is also beneficial to adjust the board name to include `OpenCV` (or similar) to help customers and tech support identify whether the MicroPython-OpenCV is actually flashed to the board. + +Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called `mpconfigvariant_OPENCV.cmake` in [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER`](micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER) with contents: + +``` +list(APPEND MICROPY_DEF_BOARD + # Board name + "MICROPY_HW_BOARD_NAME=\"SparkFun XRP Controller (OpenCV)\"" + # 8MB (8 * 1024 * 1024) + "MICROPY_HW_FLASH_STORAGE_BYTES=8388608" +) +``` + +Some board definitions do not have `#ifndef` wrappers in `mpconfigboard.h` for `MICROPY_HW_BOARD_NAME` and `MICROPY_HW_FLASH_STORAGE_BYTES`. That should be added if needed so the variant can build properly. + +Then, the firmware can be built with `make BOARD= -j4` + +# Adding New Platform + +Only RP2350 exists currently, so the all requirements for adding new platforms is not fully known yet. However, it should be along the lines of: + +1. Create a valid toolchain file for the platform + * See [rp2350.toolchain.cmake](src/opencv/platforms/rp2350.toolchain.cmake) for reference + * This loosely follow's [OpenCV's platform definitions](src/opencv/opencv/platforms) +2. Ensure OpenCV builds correctly + * `make -C src/opencv PLATFORM= --no-print-directory -j4` +3. Create new board(s) for that platform + * See [#Adding New Board](#Adding%20New%20Board) +4. Build MicroPython-OpenCV firmware for that board + * `make BOARD= -j4` + +# Contributing + +Found a bug? Is there a discrepancy between standard OpenCV and MicroPython-OpenCV? Have a feature request? Want support for other hardware? + +First, please see if there is an [existing issue](https://github.com/sparkfun/micropython-opencv/issues). If not, then please [open a new issue](https://github.com/sparkfun/micropython-opencv/issues/new) so we can discuss the topic! + +Pull requests are welcome! Please keep the scope of your pull request focused (make separate ones if needed), and keep file changes limited to the scope of your pull request. + +Keep in mind that we only intend to support SparkFun products in this repository, though we may be open to hosting support for some hardware from other vendors. Please first open an issue to check if we're open to it. If not, you're always welcome to create your own fork following our license requirements! From aa49d59d5eb535c3ffd756e70365b2479a683eee Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 16:12:56 -0600 Subject: [PATCH 114/158] Remove tabs from Functions section in readme Formats poorly in GitHub --- README.md | 122 +++++++++++++++++++++++++++--------------------------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/README.md b/README.md index cec024e..6772efa 100644 --- a/README.md +++ b/README.md @@ -155,9 +155,9 @@ If you need help understanding how to use these functions, see the documentation | Function | Notes | | --- | --- | -| `cv.convertScaleAbs( src[, dst[, alpha[, beta]]] ) -> dst`
Scales, calculates absolute values, and converts the result to 8-bit.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga3460e9c9f37b563ab9dd550c4d8c4e7d) | | -| `cv.inRange( src, lowerb, upperb[, dst] ) -> dst`
Checks if array elements lie between the elements of two other arrays.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981) | | -| ` cv.minMaxLoc( src[, mask] ) -> minVal, maxVal, minLoc, maxLoc`
Finds the global minimum and maximum in an array.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#gab473bf2eb6d14ff97e89b355dac20707) | | +| `cv.convertScaleAbs(src[, dst[, alpha[, beta]]]) -> dst`
Scales, calculates absolute values, and converts the result to 8-bit.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga3460e9c9f37b563ab9dd550c4d8c4e7d) | | +| `cv.inRange(src, lowerb, upperb[, dst]) -> dst`
Checks if array elements lie between the elements of two other arrays.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#ga48af0ab51e36436c5d04340e036ce981) | | +| `cv.minMaxLoc(src[, mask]) -> minVal, maxVal, minLoc, maxLoc`
Finds the global minimum and maximum in an array.
[Documentation](https://docs.opencv.org/4.11.0/d2/de8/group__core__array.html#gab473bf2eb6d14ff97e89b355dac20707) | | ## [`imgproc`](https://docs.opencv.org/4.11.0/d7/dbd/group__imgproc.html) @@ -165,102 +165,102 @@ If you need help understanding how to use these functions, see the documentation | Function | Notes | | --- | --- | -| `cv.bilateralFilter( src, d, sigmaColor, sigmaSpace[, dst[, borderType]] ) -> dst`
Applies the bilateral filter to an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed) | | -| `cv.blur( src, ksize[, dst[, anchor[, borderType]]] ) -> dst`
Blurs an image using the normalized box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga8c45db9afe636703801b0b2e440fce37) | | -| `cv.boxFilter( src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]] ) -> dst`
Blurs an image using the box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad533230ebf2d42509547d514f7d3fbc3) | | -| `cv.dilate( src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Dilates an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga4ff0f3318642c4f469d0e11f242f3b6c) | | -| `cv.erode( src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Erodes an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaeb1e0c1033e3f6b891a25d0511362aeb) | | -| `cv.filter2D( src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]] ) -> dst`
Convolves an image with the kernel.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04) | | -| `cv.GaussianBlur( src, ksize, sigmaX[, dst[, sigmaY[, borderType[, hint]]]] ) -> dst`
Blurs an image using a Gaussian filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gae8bdcd9154ed5ca3cbc1766d960f45c1) | | -| `cv.getStructuringElement( shape, ksize[, anchor] ) -> retval`
Returns a structuring element of the specified size and shape for morphological operations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gac342a1bb6eabf6f55c803b09268e36dc) | | -| `cv.Laplacian( src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]] ) -> dst`
Calculates the Laplacian of an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad78703e4c8fe703d479c1860d76429e6) | | -| `cv.medianBlur( src, ksize[, dst] ) -> dst`
Blurs an image using the median filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga564869aa33e58769b4469101aac458f9) | | -| `cv.morphologyEx( src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]] ) -> dst`
Performs advanced morphological transformations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f) | | -| `cv.Scharr( src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]] ) -> dst`
Calculates the first x- or y- image derivative using Scharr operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaa13106761eedf14798f37aa2d60404c9) | | -| `cv.Sobel( src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]] ) -> dst`
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gacea54f142e81b6758cb6f375ce782c8d) | | -| `cv.spatialGradient( src[, dx[, dy[, ksize[, borderType]]]] ) -> dx, dy`
Calculates the first order image derivative in both x and y using a Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga405d03b20c782b65a4daf54d233239a2) | | +| `cv.bilateralFilter(src, d, sigmaColor, sigmaSpace[, dst[, borderType]]) -> dst`
Applies the bilateral filter to an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga9d7064d478c95d60003cf839430737ed) | | +| `cv.blur(src, ksize[, dst[, anchor[, borderType]]]) -> dst`
Blurs an image using the normalized box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga8c45db9afe636703801b0b2e440fce37) | | +| `cv.boxFilter(src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]]) -> dst`
Blurs an image using the box filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad533230ebf2d42509547d514f7d3fbc3) | | +| `cv.dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Dilates an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga4ff0f3318642c4f469d0e11f242f3b6c) | | +| `cv.erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Erodes an image by using a specific structuring element.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaeb1e0c1033e3f6b891a25d0511362aeb) | | +| `cv.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst`
Convolves an image with the kernel.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga27c049795ce870216ddfb366086b5a04) | | +| `cv.GaussianBlur(src, ksize, sigmaX[, dst[, sigmaY[, borderType[, hint]]]]) -> dst`
Blurs an image using a Gaussian filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gae8bdcd9154ed5ca3cbc1766d960f45c1) | | +| `cv.getStructuringElement(shape, ksize[, anchor]) -> retval`
Returns a structuring element of the specified size and shape for morphological operations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gac342a1bb6eabf6f55c803b09268e36dc) | | +| `cv.Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst`
Calculates the Laplacian of an image.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gad78703e4c8fe703d479c1860d76429e6) | | +| `cv.medianBlur(src, ksize[, dst]) -> dst`
Blurs an image using the median filter.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga564869aa33e58769b4469101aac458f9) | | +| `cv.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst`
Performs advanced morphological transformations.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga67493776e3ad1a3df63883829375201f) | | +| `cv.Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst`
Calculates the first x- or y- image derivative using Scharr operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gaa13106761eedf14798f37aa2d60404c9) | | +| `cv.Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst`
Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#gacea54f142e81b6758cb6f375ce782c8d) | | +| `cv.spatialGradient(src[, dx[, dy[, ksize[, borderType]]]]) -> dx, dy`
Calculates the first order image derivative in both x and y using a Sobel operator.
[Documentation](https://docs.opencv.org/4.11.0/d4/d86/group__imgproc__filter.html#ga405d03b20c782b65a4daf54d233239a2) | | ### [Miscellaneous Image Transformations](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html) | Function | Notes | | --- | --- | -| `cv.adaptiveThreshold( src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst] ) -> dst`
Applies an adaptive threshold to an array.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3) | | -| `cv.threshold( src, thresh, maxval, type[, dst] ) -> retval, dst`
Applies a fixed-level threshold to each array element.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57) | | +| `cv.adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst`
Applies an adaptive threshold to an array.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#ga72b913f352e4a1b1b397736707afcde3) | | +| `cv.threshold(src, thresh, maxval, type[, dst]) -> retval, dst`
Applies a fixed-level threshold to each array element.
[Documentation](https://docs.opencv.org/4.11.0/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57) | | ### [Drawing Functions](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html) | Function | Notes | | --- | --- | -| `cv.arrowedLine( img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]] ) -> img`
Draws an arrow segment pointing from the first point to the second one.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga0a165a3ca093fd488ac709fdf10c05b2) | | -| `cv.circle( img, center, radius, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a circle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#gaf10604b069374903dbd0f0488cb43670) | | -| `cv.drawContours( image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]] ) -> image`
Draws contours outlines or filled contours.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga746c0625f1781f1ffc9056259103edbc) | | -| `cv.drawMarker( img, position, color[, markerType[, markerSize[, thickness[, line_type]]]] ) -> img`
Draws a marker on a predefined position in an image.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga644c4a170d4799a56b29f864ce984b7e) | | -| `cv.ellipse( img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a simple or thick elliptic arc or fills an ellipse sector.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga28b2267d35786f5f890ca167236cbc69) | | -| `cv.fillConvexPoly( img, points, color[, lineType[, shift]] ) -> img`
Fills a convex polygon.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga9bb982be9d641dc51edd5e8ae3624e1f) | | -| `cv.fillPoly( img, pts, color[, lineType[, shift[, offset]]] ) -> img`
Fills the area bounded by one or more polygons.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga311160e71d37e3b795324d097cb3a7dc) | | -| `cv.line( img, pt1, pt2, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a line segment connecting two points.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2) | | -| `cv.putText( img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]] ) -> img`
Draws a text string.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576) | | -| `cv.rectangle( img, pt1, pt2, color[, thickness[, lineType[, shift]]] ) -> img`
Draws a simple, thick, or filled up-right rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) | | +| `cv.arrowedLine(img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]]) -> img`
Draws an arrow segment pointing from the first point to the second one.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga0a165a3ca093fd488ac709fdf10c05b2) | | +| `cv.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> img`
Draws a circle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#gaf10604b069374903dbd0f0488cb43670) | | +| `cv.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> image`
Draws contours outlines or filled contours.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga746c0625f1781f1ffc9056259103edbc) | | +| `cv.drawMarker(img, position, color[, markerType[, markerSize[, thickness[, line_type]]]]) -> img`
Draws a marker on a predefined position in an image.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga644c4a170d4799a56b29f864ce984b7e) | | +| `cv.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> img`
Draws a simple or thick elliptic arc or fills an ellipse sector.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga28b2267d35786f5f890ca167236cbc69) | | +| `cv.fillConvexPoly(img, points, color[, lineType[, shift]]) -> img`
Fills a convex polygon.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga9bb982be9d641dc51edd5e8ae3624e1f) | | +| `cv.fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> img`
Fills the area bounded by one or more polygons.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga311160e71d37e3b795324d097cb3a7dc) | | +| `cv.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img`
Draws a line segment connecting two points.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2) | | +| `cv.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> img`
Draws a text string.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576) | | +| `cv.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img`
Draws a simple, thick, or filled up-right rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) | | ### [Color Space Conversions](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html) | Function | Notes | | --- | --- | -| `cv.cvtColor( src, code[, dst[, dstCn[, hint]]] ) -> dst`
Converts an image from one color space to another.
[Documentation](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html#gaf86c09fe702ed037c03c2bc603ceab14) | | +| `cv.cvtColor(src, code[, dst[, dstCn[, hint]]]) -> dst`
Converts an image from one color space to another.
[Documentation](https://docs.opencv.org/4.11.0/d8/d01/group__imgproc__color__conversions.html#gaf86c09fe702ed037c03c2bc603ceab14) | | ### [Structural Analysis and Shape Descriptors](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html) | Function | Notes | | --- | --- | -| `cv.approxPolyDP( curve, epsilon, closed[, approxCurve] ) -> approxCurve`
Approximates a polygonal curve(s) with the specified precision.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga0012a5fdaea70b8a9970165d98722b4c) | | -| `cv.approxPolyN( curve, nsides[, approxCurve[, epsilon_percentage[, ensure_convex]]] ) -> approxCurve`
Approximates a polygon with a convex hull with a specified accuracy and number of sides.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga88981607a2d61b95074688aac55625cc) | | -| `cv.arcLength( curve, closed ) -> retval`
Calculates a contour perimeter or a curve length.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8d26483c636be6b35c3ec6335798a47c) | | -| `cv.boundingRect( array ) -> retval`
Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga103fcbda2f540f3ef1c042d6a9b35ac7) | | -| `cv.boxPoints( box[, points] ) -> points`
Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf78d467e024b4d7936cf9397185d2f5c) | | -| `cv.connectedComponents( image[, labels[, connectivity[, ltype]]] ) -> retval, labels`
computes the connected components labeled image of boolean image
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5) | `ltype` defaults to `CV_16U` instead of `CV_32S` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| ` cv.connectedComponentsWithStats( image[, labels[, stats[, centroids[, connectivity[, ltype]]]]] ) -> retval, labels, stats, centroids`
computes the connected components labeled image of boolean image and also produces a statistics output for each label
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f) | `labels`, `stats`, and `centroids` are returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| `cv.contourArea( contour[, oriented] ) -> retval`
Calculates a contour area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga2c759ed9f497d4a618048a2f56dc97f1) | | -| `cv.convexHull( points[, hull[, clockwise[, returnPoints]]] ) -> hull`
Finds the convex hull of a point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga014b28e56cb8854c0de4a211cb2be656) | `hull` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| `cv.convexityDefects( contour, convexhull[, convexityDefects] ) -> convexityDefects`
Finds the convexity defects of a contour.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gada4437098113fd8683c932e0567f47ba) | `convexityDefects` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| `cv.findContours( image, mode, method[, contours[, hierarchy[, offset]]] ) -> contours, hierarchy`
Finds contours in a binary image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0) | `contours` and `hierarchy` are returned with `dtype=np.float` and `dtype=np.int16` respectively instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| `cv.fitEllipse( points ) -> retval`
Fits an ellipse around a set of 2D points.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa) | | -| `cv.fitLine( points, distType, param, reps, aeps[, line] ) -> line`
Fits a line to a 2D or 3D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf849da1fdafa67ee84b1e9a23b93f91f) | | -| `cv.isContourConvex( contour ) -> retval`
Tests a contour convexity.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b) | | -| `cv.matchShapes( contour1, contour2, method, parameter ) -> retval`
Compares two shapes.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317) | | -| `cv.minAreaRect( points ) -> retval`
Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9) | | -| `cv.minEnclosingCircle( points ) -> center, radius`
Finds a circle of the minimum area enclosing a 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8ce13c24081bbc7151e9326f412190f1) | | -| `cv.minEnclosingTriangle( points[, triangle] ) -> retval, triangle`
Finds a triangle of minimum area enclosing a 2D point set and returns its area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f) | | -| `cv.moments( array[, binaryImage] ) -> retval`
Calculates all of the moments up to the third order of a polygon or rasterized shape.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga556a180f43cab22649c23ada36a8a139) | | -| `cv.pointPolygonTest( contour, pt, measureDist ) -> retval`
Performs a point-in-contour test.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722) | | +| `cv.approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve`
Approximates a polygonal curve(s) with the specified precision.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga0012a5fdaea70b8a9970165d98722b4c) | | +| `cv.approxPolyN(curve, nsides[, approxCurve[, epsilon_percentage[, ensure_convex]]]) -> approxCurve`
Approximates a polygon with a convex hull with a specified accuracy and number of sides.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga88981607a2d61b95074688aac55625cc) | | +| `cv.arcLength(curve, closed) -> retval`
Calculates a contour perimeter or a curve length.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8d26483c636be6b35c3ec6335798a47c) | | +| `cv.boundingRect(array) -> retval`
Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga103fcbda2f540f3ef1c042d6a9b35ac7) | | +| `cv.boxPoints(box[, points]) -> points`
Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf78d467e024b4d7936cf9397185d2f5c) | | +| `cv.connectedComponents(image[, labels[, connectivity[, ltype]]]) -> retval, labels`
computes the connected components labeled image of boolean image
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaedef8c7340499ca391d459122e51bef5) | `ltype` defaults to `CV_16U` instead of `CV_32S` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.connectedComponentsWithStats(image[, labels[, stats[, centroids[, connectivity[, ltype]]]]]) -> retval, labels, stats, centroids`
computes the connected components labeled image of boolean image and also produces a statistics output for each label
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga107a78bf7cd25dec05fb4dfc5c9e765f) | `labels`, `stats`, and `centroids` are returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.contourArea(contour[, oriented]) -> retval`
Calculates a contour area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga2c759ed9f497d4a618048a2f56dc97f1) | | +| `cv.convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull`
Finds the convex hull of a point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga014b28e56cb8854c0de4a211cb2be656) | `hull` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects`
Finds the convexity defects of a contour.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gada4437098113fd8683c932e0567f47ba) | `convexityDefects` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy`
Finds contours in a binary image.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0) | `contours` and `hierarchy` are returned with `dtype=np.float` and `dtype=np.int16` respectively instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.fitEllipse(points) -> retval`
Fits an ellipse around a set of 2D points.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf259efaad93098103d6c27b9e4900ffa) | | +| `cv.fitLine(points, distType, param, reps, aeps[, line]) -> line`
Fits a line to a 2D or 3D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaf849da1fdafa67ee84b1e9a23b93f91f) | | +| `cv.isContourConvex(contour) -> retval`
Tests a contour convexity.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8abf8010377b58cbc16db6734d92941b) | | +| `cv.matchShapes(contour1, contour2, method, parameter) -> retval`
Compares two shapes.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#gaadc90cb16e2362c9bd6e7363e6e4c317) | | +| `cv.minAreaRect(points) -> retval`
Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9) | | +| `cv.minEnclosingCircle(points) -> center, radius`
Finds a circle of the minimum area enclosing a 2D point set.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga8ce13c24081bbc7151e9326f412190f1) | | +| `cv.minEnclosingTriangle(points[, triangle]) -> retval, triangle`
Finds a triangle of minimum area enclosing a 2D point set and returns its area.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1513e72f6bbdfc370563664f71e0542f) | | +| `cv.moments(array[, binaryImage]) -> retval`
Calculates all of the moments up to the third order of a polygon or rasterized shape.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga556a180f43cab22649c23ada36a8a139) | | +| `cv.pointPolygonTest(contour, pt, measureDist) -> retval`
Performs a point-in-contour test.
[Documentation](https://docs.opencv.org/4.11.0/d3/dc0/group__imgproc__shape.html#ga1a539e8db2135af2566103705d7a5722) | | ### [Feature Detection](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html) | Function | Notes | | --- | --- | -| `cv.Canny( image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]] ) -> edges`
Finds edges in an image using the Canny algorithm.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga04723e007ed888ddf11d9ba04e2232de) | | -| `cv.HoughCircles( image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]] ) -> circles`
Finds circles in a grayscale image using the Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d) | | -| ` cv.HoughCirclesWithAccumulator( image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]] ) -> circles`
Finds circles in a grayscale image using the Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#aed6d238ceede74293152437228c603ce) | | -| `cv.HoughLines( image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]] ) -> lines`
Finds lines in a binary image using the standard Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga0b7ee275f89bb1a799ab70a42131f39d) | | -| `cv.HoughLinesP( image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]] ) -> lines`
Finds line segments in a binary image using the probabilistic Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb) | `lines` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | -| `cv.HoughLinesWithAccumulator( image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]] ) -> lines`
Finds lines in a binary image using the standard Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#ad5e6dca5163cd4bd0135cb808f1ddfe8) | | +| `cv.Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges`
Finds edges in an image using the Canny algorithm.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga04723e007ed888ddf11d9ba04e2232de) | | +| `cv.HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles`
Finds circles in a grayscale image using the Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga47849c3be0d0406ad3ca45db65a25d2d) | | +| `cv.HoughCirclesWithAccumulator(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles`
Finds circles in a grayscale image using the Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#aed6d238ceede74293152437228c603ce) | | +| `cv.HoughLines(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]]) -> lines`
Finds lines in a binary image using the standard Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga0b7ee275f89bb1a799ab70a42131f39d) | | +| `cv.HoughLinesP(image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]]) -> lines`
Finds line segments in a binary image using the probabilistic Hough transform.
[Documentation](https://docs.opencv.org/4.11.0/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb) | `lines` is returned with `dtype=np.float` instead of `np.int32` due to ulab not supporting 32-bit integers. See: https://github.com/v923z/micropython-ulab/issues/719 | +| `cv.HoughLinesWithAccumulator(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta[, use_edgeval]]]]]]) -> lines`
Finds lines in a binary image using the standard Hough transform and get accumulator.
[Documentation](https://docs.opencv.org/4.x/d2/d75/namespacecv.html#ad5e6dca5163cd4bd0135cb808f1ddfe8) | | ### [Object Detection](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html) | Function | Notes | | --- | --- | -| `cv.matchTemplate( image, templ, method[, result[, mask]] ) -> result`
Compares a template against overlapped image regions.
[Documentation](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be) | | +| `cv.matchTemplate(image, templ, method[, result[, mask]]) -> result`
Compares a template against overlapped image regions.
[Documentation](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be) | | | Function | Notes | | --- | --- | -| `cv.imread( filename[, flags] ) -> retval`
Loads an image from a file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#gacbaa02cffc4ec2422dfa2e24412a99e2) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | -| `cv.imwrite( filename, img[, params] ) -> retval`
Saves an image to a specified file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#ga8ac397bd09e48851665edbe12aa28f25) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | +| `cv.imread(filename[, flags]) -> retval`
Loads an image from a file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#gacbaa02cffc4ec2422dfa2e24412a99e2) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | +| `cv.imwrite(filename, img[, params]) -> retval`
Saves an image to a specified file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#ga8ac397bd09e48851665edbe12aa28f25) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | ## [`highgui`](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html) | Function | Notes | | --- | --- | -| `cv.imshow( winname, mat ) -> None`
Displays an image in the specified window.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563) | `winname` must actually be a display driver object that implements an `imshow()` method that takes a NumPy array as input. | -| `cv.waitKey( [, delay] ) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. | +| `cv.imshow(winname, mat) -> None`
Displays an image in the specified window.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563) | `winname` must actually be a display driver object that implements an `imshow()` method that takes a NumPy array as input. | +| `cv.waitKey([, delay]) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. | # Building From 258e01832c90bf1d605c91de49e6d0dcb57e94ee Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 16:24:00 -0600 Subject: [PATCH 115/158] Fix hyperlinks in readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6772efa..c5c0a13 100644 --- a/README.md +++ b/README.md @@ -293,7 +293,7 @@ Below are instructions to build the MicroPython-OpenCV firmware from scratch. In Because OpenCV adds ~3MiB to the firmware size, it is necessary to define variants that reduce the storage size to avoid it overlapping with the firmware. It is also beneficial to adjust the board name to include `OpenCV` (or similar) to help customers and tech support identify whether the MicroPython-OpenCV is actually flashed to the board. -Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called `mpconfigvariant_OPENCV.cmake` in [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER`](micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER) with contents: +Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called `mpconfigvariant_OPENCV.cmake` in [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER`](https://github.com/sparkfun/micropython/blob/7e728e8c6aad74ca244183f3e0705db6f332abd9/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_LARGE_BINARY.cmake) with contents: ``` list(APPEND MICROPY_DEF_BOARD @@ -314,11 +314,11 @@ Only RP2350 exists currently, so the all requirements for adding new platforms i 1. Create a valid toolchain file for the platform * See [rp2350.toolchain.cmake](src/opencv/platforms/rp2350.toolchain.cmake) for reference - * This loosely follow's [OpenCV's platform definitions](src/opencv/opencv/platforms) + * This loosely follow's [OpenCV's platform definitions](https://github.com/opencv/opencv/tree/4.x/platforms) 2. Ensure OpenCV builds correctly * `make -C src/opencv PLATFORM= --no-print-directory -j4` 3. Create new board(s) for that platform - * See [#Adding New Board](#Adding%20New%20Board) + * See [#Adding New Board](#Adding-New-Board) 4. Build MicroPython-OpenCV firmware for that board * `make BOARD= -j4` From 53d53ab38e6846fdc84ad669dad1ba2ee5796bba Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 16:28:59 -0600 Subject: [PATCH 116/158] Update readme Quick Start info about hardware drivers --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c5c0a13..b0cb104 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,8 @@ Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroP * `cd micropython-opencv/examples` * `mpremote cp -r . :` 3. Configure hardware drivers - * The MicroPython port of OpenCV depends on hardware drivers to interface with cameras and displays. Drivers are built into the firmware, so there is no need to install them manually, but you will likely need to configure them for your specific hardware and board configuration. - * [Examples](examples/cv2_hardware_init/) are provided for supported hardware. Edit the examples to work with your specific hardware and board configuration. + * The MicroPython port of OpenCV depends on hardware drivers to interface with cameras and displays. Drivers are built into the firmware, so there is no need to install them manually. + * An example module called [cv2_hardware_init](examples/cv2_hardware_init/) is imported by all examples to initialize the drivers. You will likely need to edit the files for your specific hardware and board configuration. 4. Write OpenCV code! * Any IDE should work, so use your favorite! * The code block below contains snippets from various examples to highlight major features. From 8f163248b3e76d30887e4c963f4ce0f8c976472f Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 14 Jul 2025 16:43:18 -0600 Subject: [PATCH 117/158] Add missing imgcodecs header in readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index b0cb104..34538fa 100644 --- a/README.md +++ b/README.md @@ -250,6 +250,8 @@ If you need help understanding how to use these functions, see the documentation | --- | --- | | `cv.matchTemplate(image, templ, method[, result[, mask]]) -> result`
Compares a template against overlapped image regions.
[Documentation](https://docs.opencv.org/4.11.0/df/dfb/group__imgproc__object.html#ga586ebfb0a7fb604b35a23d85391329be) | | +## [`imgcodecs`](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html) + | Function | Notes | | --- | --- | | `cv.imread(filename[, flags]) -> retval`
Loads an image from a file.
[Documentation](https://docs.opencv.org/4.11.0/d4/da8/group__imgcodecs.html#gacbaa02cffc4ec2422dfa2e24412a99e2) | `filename` can be anywhere in the full MicroPython filesystem, including SD cards if mounted.
Only BMP and PNG formats are currently supported. | From 3ca9229c8140f75dba855bf0efc7a3a012021d18 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 15 Jul 2025 10:16:53 -0600 Subject: [PATCH 118/158] Fix threshrold to return tuple Fixes #32 --- src/imgproc.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 50b1aeb..9620c85 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -2127,13 +2127,18 @@ mp_obj_t cv2_imgproc_threshold(size_t n_args, const mp_obj_t *pos_args, mp_map_t int type = args[ARG_type].u_int; Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t retval; + // Call the corresponding OpenCV function try { - threshold(src, dst, thresh, maxval, type); + retval = threshold(src, dst, thresh, maxval, type); } catch(Exception& e) { mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); } - // Return the result - return mat_to_mp_obj(dst); + // Return the result as a tuple + mp_obj_t result_tuple[2]; + result_tuple[0] = mp_obj_new_float(retval); + result_tuple[1] = mat_to_mp_obj(dst); + return mp_obj_new_tuple(2, result_tuple); } From 9e4649fd98cbb13b016b142f2cca83fa1879d57d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 15 Jul 2025 12:39:31 -0600 Subject: [PATCH 119/158] Add waitKeyEx() Fixes #15 --- README.md | 1 + src/highgui.cpp | 109 +++++++++++++++++++++++++++++++++++++----------- src/highgui.h | 5 ++- 3 files changed, 90 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 34538fa..ba3fadd 100644 --- a/README.md +++ b/README.md @@ -263,6 +263,7 @@ If you need help understanding how to use these functions, see the documentation | --- | --- | | `cv.imshow(winname, mat) -> None`
Displays an image in the specified window.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga453d42fe4cb60e5723281a89973ee563) | `winname` must actually be a display driver object that implements an `imshow()` method that takes a NumPy array as input. | | `cv.waitKey([, delay]) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. | +| `cv.waitKeyEx([, delay]) -> retval`
Similar to waitKey, but returns full key code.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL.
Full key code is implementation specific, so special key codes in MicroPython will not match other Python environments. | # Building diff --git a/src/highgui.cpp b/src/highgui.cpp index dbeb957..11743b9 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -46,6 +46,23 @@ mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *k } mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Call waitKeyEx to do the heavy lifting + mp_obj_t key = cv2_highgui_waitKeyEx(n_args, pos_args, kw_args); + + // Get the key code as an integer + int32_t key_code = mp_obj_get_int(key); + + // If the key code is -1, it means no key was pressed + if (key_code == -1) { + // Return the original key object + return key; + } else { + // Return the last byte of the key code + return MP_OBJ_NEW_SMALL_INT(key_code & 0xFF); + } +} + +mp_obj_t cv2_highgui_waitKeyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { Arg_delay }; static const mp_arg_t allowed_args[] = { @@ -88,35 +105,79 @@ mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t * // of 0 to wait indefinitely, whereas `select.poll` uses -1 mp_obj_t timeout = MP_OBJ_NEW_SMALL_INT(delay <= 0 ? -1 : delay); - // TODO: Some key presses return multiple characters (eg. up arrow key - // returns 3 characters: "\x1b[A"). Need to handle this case properly. - // Should also look into implementing waitKeyEx() for these extra cases - - // Call `poll.poll(timeout)` + // Load the `poll.poll()` method to check for key presses mp_obj_t poll_poll_method[3]; mp_load_method(poll_obj, MP_QSTR_poll, poll_poll_method); - poll_poll_method[2] = timeout; - mp_obj_t result = mp_call_method_n_kw(1, 0, poll_poll_method); - - // Extract the items from the result list - mp_obj_t *items; - size_t len; - mp_obj_list_get(result, &len, &items); - - // Check if any items were returned - if(len == 0) { - // If no items were returned, return -1 to indicate no key was pressed - return MP_OBJ_NEW_SMALL_INT(-1); - } - // Since something was returned, a key was pressed. We need to extract it - // with `sys.stdin.read(1)` + // Load the `sys.stdin.read(1)` method to read a single character mp_obj_t read_method[3]; mp_load_method(stdin_obj, MP_QSTR_read, read_method); read_method[2] = MP_OBJ_NEW_SMALL_INT(1); - mp_obj_t key_str = mp_call_method_n_kw(1, 0, read_method); - // Convert the key character to an integer and return it - const char *key_chars = mp_obj_str_get_str(key_str); - return MP_OBJ_NEW_SMALL_INT(key_chars[0]); + // Initialize key code to -1, which indicates no key was pressed + int32_t key_code = -1; + + // Some key presses return multiple bytes (eg. up arrow key returns 3 bytes: + // `\x1b[A`). To handle this, we will loop until no more bytes are available + for (int i = 0; true; i++) { + // Call `poll.poll(timeout)` if this is the first iteration, otherwise + // call `poll.poll(1)` to quickly check for any remaining bytes. Can't + // wait 0ms, because it takes a moment for all bytes to arrive + poll_poll_method[2] = i == 0 ? timeout : MP_OBJ_NEW_SMALL_INT(1); + mp_obj_t result = mp_call_method_n_kw(1, 0, poll_poll_method); + + // Extract the items from the result list + mp_obj_t *items; + size_t len; + mp_obj_list_get(result, &len, &items); + + // Check if any items were returned + if(len == 0) { + // No more bytes available, so we're done. If multiple bytes were + // read, we want the last byte to be 0 so it doesn't get confused + // in `waitKey()` with a normal key press. So we can simply shift + // the key code left by 8 bits again + if (i > 1) { + key_code <<= 8; + } + break; + } + + // Since something was returned, a byte is available. We need to + // extract it by calling `sys.stdin.read(1)` + mp_obj_t byte_str = mp_call_method_n_kw(1, 0, read_method); + + // Convert the byte object to an actual byte + uint8_t byte_val = mp_obj_str_get_str(byte_str)[0]; + + // Check which iteration this is + if(i == 0) { + // This is the first iteration, set the key code to this byte + key_code = byte_val; + + // Special keys always start with an escape character (0x1b). If + // this is not the escape character, we can assume it's a normal key + // press and break immediately. This helps mitigate the problem of + // interpreting 2 key simultaneous key presses as 1 special key + if (byte_val != 0x1b) { + break; + } + } else if (i == 1) { + // This is the second iteration, meaning the first byte was the + // escape character. We don't want that to be part of the key code + // (special keys will be indicated by having multiple bytes, and the + // last byte being zero), so we'll just overwrite the key code with + // the second byte + key_code = byte_val; + } else { + // This is a subsequent iteration, meaning we have already read the + // escape character and the second byte. For all subsequent bytes, + // we will shift the key code left by 8 bits and add the new byte to + // it to create a multi-byte key + key_code = (key_code << 8) | byte_val; + } + } + + // Return the final key code + return MP_OBJ_NEW_SMALL_INT(key_code); } diff --git a/src/highgui.h b/src/highgui.h index 1c4438d..ab17f90 100644 --- a/src/highgui.h +++ b/src/highgui.h @@ -4,13 +4,16 @@ // Function declarations extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_highgui_waitKeyEx(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); // Python references to the functions static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKeyEx_obj, 0, cv2_highgui_waitKeyEx); // Global definitions for functions and constants #define OPENCV_HIGHGUI_GLOBALS \ /* Functions */ \ { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, \ - { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) } + { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, \ + { MP_ROM_QSTR(MP_QSTR_waitKeyEx), MP_ROM_PTR(&cv2_highgui_waitKeyEx_obj) } From 6a81d86dcb78177f786f315e1756403d844bf197 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 15 Jul 2025 13:14:30 -0600 Subject: [PATCH 120/158] Fix ndarray_to_mat to copy if needed Fixes #16 --- src/convert.cpp | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/src/convert.cpp b/src/convert.cpp index 957cb44..e317079 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -60,6 +60,7 @@ Mat ndarray_to_mat(ndarray_obj_t *ndarray) // We have an ndarray_obj_t, so these checks have already been done. // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L130-L172 + bool needcopy = false; int type = ndarray_type_to_mat_depth(ndarray->dtype); int ndims = ndarray->ndim; @@ -71,17 +72,41 @@ Mat ndarray_to_mat(ndarray_obj_t *ndarray) _strides[i] = ndarray->strides[ULAB_MAX_DIMS - ndarray->ndim + i]; } - // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L176-L221 + // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L176-L241 bool ismultichannel = ndims == 3; + for( int i = ndims-1; i >= 0 && !needcopy; i-- ) + { + // these checks handle cases of + // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases + // b) transposed arrays, where _strides[] elements go in non-descending order + // c) flipped arrays, where some of _strides[] elements are negative + // the _sizes[i] > 1 is needed to avoid spurious copies when NPY_RELAXED_STRIDES is set + if( (i == ndims-1 && _sizes[i] > 1 && (size_t)_strides[i] != elemsize) || + (i < ndims-1 && _sizes[i] > 1 && _strides[i] < _strides[i+1]) ) + needcopy = true; + } + if (ismultichannel) { int channels = ndims >= 1 ? (int)_sizes[ndims - 1] : 1; ndims--; type |= CV_MAKETYPE(0, channels); + + if (ndims >= 1 && _strides[ndims - 1] != (size_t)elemsize*_sizes[ndims]) + needcopy = true; + elemsize = CV_ELEM_SIZE(type); } + if (needcopy) + { + ndarray = ndarray_from_mp_obj(ndarray_copy(ndarray), 0); + for (int i = 0; i < ndarray->ndim; i++) { + _strides[i] = ndarray->strides[ULAB_MAX_DIMS - ndarray->ndim + i]; + } + } + // https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_convert.cpp#L243-L261 int size[CV_MAX_DIM+1] = {}; size_t step[CV_MAX_DIM+1] = {}; From f37d860aae8e33d2d6e0952d5c9b22ba55e11f36 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 15 Jul 2025 15:49:21 -0600 Subject: [PATCH 121/158] Fix example 5 Reflects change in 3ca9229 --- examples/ex05_detect_sfe_logo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py index f0b4940..7c9e7bc 100644 --- a/examples/ex05_detect_sfe_logo.py +++ b/examples/ex05_detect_sfe_logo.py @@ -54,7 +54,7 @@ # logo on a light background (or vice versa), but you can modify this to # find specific colors or use other methods if desired gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) - thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) + ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) # Find contours in the binary image, which represent the boundaries of # shapes. Contours are a powerful tool in OpenCV for shape analysis and From 3285909a642322513fc8800deefd6ed89f0071e1 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 15 Jul 2025 16:39:28 -0600 Subject: [PATCH 122/158] Add header comments to examples --- examples/ex01_hello_opencv.py | 13 +++++++++++++ examples/ex02_camera.py | 12 ++++++++++++ examples/ex03_touch_screen.py | 12 ++++++++++++ examples/ex04_imread_imwrite.py | 13 +++++++++++++ examples/ex05_detect_sfe_logo.py | 31 +++++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index 61e01ce..e544b81 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -1,3 +1,16 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex01_hello_opencv.py +# +# This example demonstrates near-minimal code to get started with OpenCV in +# MicroPython. It can be used to verify that OpenCV is working correctly, and +# that the display driver is functioning. It simpy imports the required modules, +# creates a blank image, draws some things on it, and shows it on the display. +#------------------------------------------------------------------------------- + # Import OpenCV, just as you would in any other Python environment! import cv2 as cv diff --git a/examples/ex02_camera.py b/examples/ex02_camera.py index 6b18757..432fa38 100644 --- a/examples/ex02_camera.py +++ b/examples/ex02_camera.py @@ -1,3 +1,15 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex02_camera.py +# +# This example demonstrates how to read frames from a camera and show them on a +# display using OpenCV in MicroPython. It can be used to verify that the camera +# driver is functioning. +#------------------------------------------------------------------------------- + # Import OpenCV and hardware initialization module import cv2 as cv from cv2_hardware_init import * diff --git a/examples/ex03_touch_screen.py b/examples/ex03_touch_screen.py index ab0753c..3af6645 100644 --- a/examples/ex03_touch_screen.py +++ b/examples/ex03_touch_screen.py @@ -1,3 +1,15 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex03_touch_screen.py +# +# This example demonstrates how to read input from a touch screen, which can be +# used to verify that the touch screen driver is functioning. It simply draws +# lines on a blank image based on touch input, similar to a drawing application. +#------------------------------------------------------------------------------- + # Import OpenCV and hardware initialization module import cv2 as cv from cv2_hardware_init import * diff --git a/examples/ex04_imread_imwrite.py b/examples/ex04_imread_imwrite.py index 26d7387..ce3b62e 100644 --- a/examples/ex04_imread_imwrite.py +++ b/examples/ex04_imread_imwrite.py @@ -1,3 +1,16 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex04_imread_imwrite.py +# +# This example demonstrates how to read and write images to and from the +# MicroPython filesystem using `cv.imread()` and `cv.imwrite()`. Any paths +# accessible to the MicroPython filesystem can be used, including an SD card if +# your board has one connected. +#------------------------------------------------------------------------------- + # Import OpenCV and hardware initialization module import cv2 as cv from cv2_hardware_init import * diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py index 7c9e7bc..622303e 100644 --- a/examples/ex05_detect_sfe_logo.py +++ b/examples/ex05_detect_sfe_logo.py @@ -1,3 +1,34 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex05_detect_sfe_logo.py +# +# This example demonstrates a basic vision processing pipeline. It reads frames +# from the camera, finds contours in the image, and compares them to a reference +# contour to detect the SparkFun flame logo. Below is some (bad) ASCII art of +# the logo for reference. The example draws the actual contour that it's looking +# for in the top left corner of the display. +# +# ___ +# / _\ +# \ \ +# /|_| \/\ +# | | +# | | +# | / +# | _____/ +# | / +# |/ +# +# If the logo is detected, it will be highlighted in red on the display. Note +# that this vision pipeline is very simple and does not include many of the +# steps that would typically be included in more robust pipelines for the sake +# of simplicity and performance. So it may produce false positives or miss the +# logo entirely in some cases. +#------------------------------------------------------------------------------- + # Import OpenCV import cv2 as cv from cv2_hardware_init import * From ba609580eb968c266fd298b1e598c2ba4853422f Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 16 Jul 2025 12:24:01 -0600 Subject: [PATCH 123/158] Improve logo detection example Re-organize and improve comments Move pipeline to function --- examples/ex05_detect_sfe_logo.py | 117 ++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 42 deletions(-) diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py index 622303e..65ac6b6 100644 --- a/examples/ex05_detect_sfe_logo.py +++ b/examples/ex05_detect_sfe_logo.py @@ -5,12 +5,34 @@ #------------------------------------------------------------------------------- # ex05_detect_sfe_logo.py # -# This example demonstrates a basic vision processing pipeline. It reads frames -# from the camera, finds contours in the image, and compares them to a reference -# contour to detect the SparkFun flame logo. Below is some (bad) ASCII art of -# the logo for reference. The example draws the actual contour that it's looking -# for in the top left corner of the display. -# +# This example demonstrates a basic vision processing pipeline. A pipeline is +# just a sequence of steps used to extract meaningful data from an image. The +# pipeline in this example attempts to detect the SparkFun flame logo using +# contour matching. If it's detected, it will be outlined on the display for +# visualization. The bounding box and center of the logo will also be drawn, +# demonstrating how to acquire useful numerical data from an image (eg. the +# position and size of an object). +# +# Note that this pipeline is very simple and does not include many of the steps +# that would typically be included in more robust pipelines. This was done for +# simplicity and performance, so it may produce false positives or miss the logo +# entirely sometimes. +#------------------------------------------------------------------------------- + +# Import OpenCV and hardware initialization module +import cv2 as cv +from cv2_hardware_init import * + +# Import NumPy +from ulab import numpy as np + +# Import time for frame rate calculation +import time + +# Here we define a reference contour for the SparkFun flame logo. This was +# created manually by picking points on the boundary of a small image of the +# logo in an image editor. Below is also ASCII art of the logo for reference, +# but the actual contour is drawn in the top left corner of the display. # ___ # / _\ # \ \ @@ -21,24 +43,6 @@ # | _____/ # | / # |/ -# -# If the logo is detected, it will be highlighted in red on the display. Note -# that this vision pipeline is very simple and does not include many of the -# steps that would typically be included in more robust pipelines for the sake -# of simplicity and performance. So it may produce false positives or miss the -# logo entirely in some cases. -#------------------------------------------------------------------------------- - -# Import OpenCV -import cv2 as cv -from cv2_hardware_init import * -from ulab import numpy as np -import time - -# Here we define a reference contour for the SparkFun flame logo. This was -# created manually by picking points on the boundary of a small image of the -# logo in an image editor. This gets drawn in the top left corner of the -# display for reference logo_contour = np.array( [[[0,48]], [[0,22]], @@ -65,20 +69,9 @@ [[20,36]], [[12,36]]], dtype=np.float) -# Initialize a loop timer to calculate processing speed in FPS -loop_time = time.ticks_us() - -# Open the camera -camera.open() - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Loop to continuously read frames from the camera and display them -while True: - # Read a frame from the camera - success, frame = camera.read() - +# This is the pipeline implementation. This gets called for each frame captured +# by the camera in the main loop +def my_pipeline(frame): # Here we binarize the image. There are many ways to do this, but here we # simply convert the image to grayscale and then apply Otsu's thresholding # method to create a binary image. This means it will only detect a dark @@ -87,9 +80,9 @@ gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) - # Find contours in the binary image, which represent the boundaries of - # shapes. Contours are a powerful tool in OpenCV for shape analysis and - # object detection + # Find contours in the binary image, which are simply lists of points around + # the boundaries of shapes. Contours are a powerful tool in OpenCV for shape + # analysis and object detection contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) # It's possible that no contours were found, so first check if any were @@ -130,9 +123,49 @@ # that good matches are usually around 0.5, so we'll use a slightly # higher threshold of 1.0 if best_similarity < 1.0: - # Now we'll draw the best contour found on the original image + # The best contour found is a good match, so we'll draw it on the + # frame to outline the detected logo for visualization frame = cv.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) + # Visualization is great, but the purpose of most real pipelines is + # to extract useful data from the image. For example, suppose we + # want to know where the logo is located in the image and how large + # it is. We can use the bounding rectangle of the contour to get the + # position and size of the logo + left, top, width, height = cv.boundingRect(best_contour) + center_x = left + width // 2 + center_y = top + height // 2 + + # Now we could use this data for some task! For example, if we had + # a robot that needed to drive up to the logo, we could turn to face + # the logo with the center point, then drive towards it until the + # size is big enough. + # + # This example doesn't actually make use of the data, so we'll just + # draw the bounding box and center of the logo for visualization + frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) + frame = cv.circle(frame, (center_x, center_y), 5, (0, 255, 0), -1) + +# Initialize a loop timer to calculate processing speed in FPS +loop_time = time.ticks_us() + +# Open the camera +camera.open() + +# Prompt the user to press a key to continue +print("Press any key to continue") + +# Loop to continuously read frames from the camera and display them +while True: + # Read a frame from the camera + success, frame = camera.read() + if not success: + print("Failed to read frame from camera") + break + + # Call the pipeline function to process the frame + my_pipeline(frame) + # All processing is done! Calculate the frame rate and display it current_time = time.ticks_us() fps = 1000000 / (current_time - loop_time) From 9a034f8a49a1f86d2a0008e03ea0012eb402f0e8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 16 Jul 2025 16:28:39 -0600 Subject: [PATCH 124/158] Add center and size text to logo detection example --- examples/ex05_detect_sfe_logo.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex05_detect_sfe_logo.py index 65ac6b6..226e23f 100644 --- a/examples/ex05_detect_sfe_logo.py +++ b/examples/ex05_detect_sfe_logo.py @@ -3,7 +3,7 @@ # # Copyright (c) 2025 SparkFun Electronics #------------------------------------------------------------------------------- -# ex05_detect_sfe_logo.py +# ex06_detect_sfe_logo.py # # This example demonstrates a basic vision processing pipeline. A pipeline is # just a sequence of steps used to extract meaningful data from an image. The @@ -136,15 +136,18 @@ def my_pipeline(frame): center_x = left + width // 2 center_y = top + height // 2 - # Now we could use this data for some task! For example, if we had - # a robot that needed to drive up to the logo, we could turn to face - # the logo with the center point, then drive towards it until the - # size is big enough. - # + # Now we could use this data for some task! For example, if we were + # detecting an object that a robot needs to drive in front of, we + # could turn to face it with the center point, then drive forwards + # until the size is big enough (meaning we're close enough to it). + # # This example doesn't actually make use of the data, so we'll just - # draw the bounding box and center of the logo for visualization + # draw the bounding box and center of the logo for visualization, + # and add text of the position and size of the logo frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) - frame = cv.circle(frame, (center_x, center_y), 5, (0, 255, 0), -1) + frame = cv.circle(frame, (center_x, center_y), 3, (0, 255, 0), -1) + frame = cv.putText(frame, f"({center_x}, {center_y})", (center_x - 45, center_y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + frame = cv.putText(frame, f"{width}x{height}", (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) # Initialize a loop timer to calculate processing speed in FPS loop_time = time.ticks_us() @@ -168,7 +171,7 @@ def my_pipeline(frame): # All processing is done! Calculate the frame rate and display it current_time = time.ticks_us() - fps = 1000000 / (current_time - loop_time) + fps = 1_000_000 / (current_time - loop_time) loop_time = current_time frame = cv.putText(frame, f"FPS: {fps:.2f}", (40, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) From 15ffc29b64e0427b18ca702dfbed4ab439219df1 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 17 Jul 2025 11:31:41 -0600 Subject: [PATCH 125/158] Add performance example Renumber logo detection example --- examples/ex05_performance.py | 69 +++++++++++++++++++ ...ct_sfe_logo.py => ex06_detect_sfe_logo.py} | 0 2 files changed, 69 insertions(+) create mode 100644 examples/ex05_performance.py rename examples/{ex05_detect_sfe_logo.py => ex06_detect_sfe_logo.py} (100%) diff --git a/examples/ex05_performance.py b/examples/ex05_performance.py new file mode 100644 index 0000000..7b0d73e --- /dev/null +++ b/examples/ex05_performance.py @@ -0,0 +1,69 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex05_performance.py +# +# This example +#------------------------------------------------------------------------------- + +# Import OpenCV and hardware initialization module +import cv2 as cv +from cv2_hardware_init import * + +# Import NumPy +from ulab import numpy as np + +# Import time for frame rate calculation +import time + +# Initialize a loop timer to calculate processing speed in FPS +loop_time = time.ticks_us() + +# Open the camera +camera.open() + +# The `read()` method of OpenCV camera drivers can optionally take an output +# image as an argument. When it's not provided, the camera driver must allocate +# a whole new image for the frame, which can be slow and waste memory. If the +# image argument is provided, then the camera driver will write the data to the +# provided image. The image must be a NumPy array with the same shape and data +# type as the camera's +success, frame = camera.read() + +# Prompt the user to press a key to continue +print("Press any key to continue") + +# Loop to continuously read frames from the camera and display them +while True: + # Read a frame from the camera + success, frame = camera.read() + if not success: + print("Failed to read frame from camera") + break + + # Now we'll + + # It's a good idea to measure the frame rate of the main loop to see how + # fast the entire pipeline is running. This will include not only the + # processing steps, but also any overhead from the hardware drivers and + # other code. We can calculate the FPS with the loop timer and draw it on + # the frame for visualization + current_time = time.ticks_us() + fps = 1_000_000 / (current_time - loop_time) + loop_time = current_time + frame = cv.putText(frame, f"FPS: {fps:.2f}", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + + # Display the frame + cv.imshow(display, frame) + + # Check for key presses + key = cv.waitKey(1) + + # If any key is pressed, exit the loop + if key != -1: + break + +# Release the camera +camera.release() diff --git a/examples/ex05_detect_sfe_logo.py b/examples/ex06_detect_sfe_logo.py similarity index 100% rename from examples/ex05_detect_sfe_logo.py rename to examples/ex06_detect_sfe_logo.py From 08a7dbcbd5012fecafcfc14fe60e4e02da299505 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 17 Jul 2025 13:51:24 -0600 Subject: [PATCH 126/158] Update performance example with all changes this time --- examples/ex05_performance.py | 83 +++++++++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/examples/ex05_performance.py b/examples/ex05_performance.py index 7b0d73e..4bbf37b 100644 --- a/examples/ex05_performance.py +++ b/examples/ex05_performance.py @@ -12,38 +12,63 @@ import cv2 as cv from cv2_hardware_init import * -# Import NumPy +# Import NumPy to create arrays from ulab import numpy as np # Import time for frame rate calculation import time -# Initialize a loop timer to calculate processing speed in FPS -loop_time = time.ticks_us() +# Import garbage collector to measure memory usage +import gc + +# Many OpenCV functions can take an optional output argument to store the result +# of the operation. If it's not provided, OpenCV allocates a new array to store +# the result, which can be slow and waste memory. When it is provided, OpenCV +# instead writes the result to the provided array, reducing memory usage and +# improving performance. The array must have the same shape and data type as the +# expected output of the operation, otherwise a new array will be allocated. +# +# Here we preallocate arrays for the destination arguments of this example. If +# the shapes or data types are incorrect, OpenCV will simply allocate new arrays +# for each on the first loop iteration. The variables will then be re-assigned, +# so this only negatively affects the first loop iteration. +frame = np.zeros((240, 320, 3), dtype=np.uint8) +result_image = np.zeros((240, 320, 3), dtype=np.uint8) # Open the camera camera.open() -# The `read()` method of OpenCV camera drivers can optionally take an output -# image as an argument. When it's not provided, the camera driver must allocate -# a whole new image for the frame, which can be slow and waste memory. If the -# image argument is provided, then the camera driver will write the data to the -# provided image. The image must be a NumPy array with the same shape and data -# type as the camera's -success, frame = camera.read() +# Initialize a loop timer to calculate processing speed in FPS +loop_time = time.ticks_us() + +# Initialize a variable to track memory usage +last_mem_free = gc.mem_free() # Prompt the user to press a key to continue print("Press any key to continue") # Loop to continuously read frames from the camera and display them while True: - # Read a frame from the camera - success, frame = camera.read() + # Read a frame from the camera and measure how long it takes. Try running + # this both with and without the preallocated `frame` array to see the + # difference in performance + t0 = time.ticks_us() + success, frame = camera.read(frame) + t1 = time.ticks_us() + print("Read frame: %.2f ms" % ((t1 - t0) / 1_000), end='\t') + + # Check if the frame was read successfully if not success: print("Failed to read frame from camera") break - # Now we'll + # Now we'll do some processing on the frame. Try running this with and + # without the preallocated `result_image` array, and try different OpenCV + # functions to compare performance + t0 = time.ticks_us() + result_image = cv.cvtColor(frame, cv.COLOR_BGR2HSV, result_image) + t1 = time.ticks_us() + print("Processing: %.2f ms" % ((t1 - t0) / 1_000), end='\t') # It's a good idea to measure the frame rate of the main loop to see how # fast the entire pipeline is running. This will include not only the @@ -53,10 +78,38 @@ current_time = time.ticks_us() fps = 1_000_000 / (current_time - loop_time) loop_time = current_time - frame = cv.putText(frame, f"FPS: {fps:.2f}", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + print("FPS: %.2f" % fps, end='\t') + result_image = cv.putText(result_image, f"FPS: {fps:.2f}", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # Display the frame - cv.imshow(display, frame) + cv.imshow(display, result_image) + + # We can also measure memory usage to see how much RAM is being consumed by + # this code. If you remove the output arguments from the functions above, + # you'll see that the memory consumption increases significantly as new + # arrays must be allocated each loop iteration + mem_free = gc.mem_free() + memory_used = last_mem_free - mem_free + last_mem_free = mem_free + print("Memory free: %d KiB" % (mem_free // 1024), end='\t') + print("Memory consumed: %d KiB" % (memory_used // 1024), end='\n') + + # If the memory usage is negative, it means the garbage collector triggered + # and freed some memory. Garbage collection can take some time, so you'll + # notice a drop in FPS when it happens, and you may see a stutter in the + # video stream on the display. This is another reason to preallocate arrays, + # since it mitigates how frequently garbage collection is triggered + if memory_used < 0: + print("Garbage collection triggered!") + + # Something to try is triggering the garbage collector manually each loop + # iteration to immediately free up memory. Garbage collection can be faster + # if less memory has been allocated, so this can help avoid long stutters + # from occasional garbage collection. However garbage collection will always + # take *some* time, so this will lower the average FPS. You can choose to do + # this if you prefer a consistent frame rate, or don't if you prefer maximum + # frame rate and are okay with occasional stutters + # gc.collect() # Check for key presses key = cv.waitKey(1) From e2872c20cb77729318433e274da7f2819ed60f3f Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 18 Jul 2025 11:34:27 -0600 Subject: [PATCH 127/158] Add XRP touch drive example --- examples/xrp_examples/ex01_touch_drive.py | 144 ++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 examples/xrp_examples/ex01_touch_drive.py diff --git a/examples/xrp_examples/ex01_touch_drive.py b/examples/xrp_examples/ex01_touch_drive.py new file mode 100644 index 0000000..b97c631 --- /dev/null +++ b/examples/xrp_examples/ex01_touch_drive.py @@ -0,0 +1,144 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex01_touch_drive.py +# +# This example creates a simple touch screen interface to drive the XRP robot. +# It creates arrow buttons to drive around, and a stop button to exit the +# example. The XRP is available from SparkFun: +# https://www.sparkfun.com/experiential-robotics-platform-xrp-kit.html +#------------------------------------------------------------------------------- + +# Import XRPLib defaults +from XRPLib.defaults import * + +# Import OpenCV and hardware initialization module +import cv2 as cv +from cv2_hardware_init import * + +# Import NumPy +from ulab import numpy as np + +# Initialize arrow button image +btn_arrow_shape = (50, 50, 3) +btn_arrow_cx = btn_arrow_shape[1] // 2 +btn_arrow_cy = btn_arrow_shape[0] // 2 +btn_arrow_length = 30 +btn_arrow_thickness = 5 +btn_arrow_tip_length = 0.5 +btn_arrow_offset = 75 +img_btn_arrow_vertical = np.zeros(btn_arrow_shape, dtype=np.uint8) +img_btn_arrow_vertical[:, :] = (255, 0, 0) +img_btn_arrow_horizontal = img_btn_arrow_vertical.copy() +img_btn_arrow_vertical = cv.arrowedLine( + img_btn_arrow_vertical, + (btn_arrow_cx, btn_arrow_cy + btn_arrow_length // 2), + (btn_arrow_cx, btn_arrow_cy - btn_arrow_length // 2), + (255, 255, 255), + btn_arrow_thickness, + cv.FILLED, + 0, + btn_arrow_tip_length +) +img_btn_arrow_horizontal = cv.arrowedLine( + img_btn_arrow_horizontal, + (btn_arrow_cx - btn_arrow_length // 2, btn_arrow_cy), + (btn_arrow_cx + btn_arrow_length // 2, btn_arrow_cy), + (255, 255, 255), + btn_arrow_thickness, + cv.FILLED, + 0, + btn_arrow_tip_length +) + +# Initialize stop button image +btn_stop_shape = (50, 50, 3) +btn_stop_cx = btn_stop_shape[1] // 2 +btn_stop_cy = btn_stop_shape[0] // 2 +btn_stop_size = 25 +img_btn_stop = np.zeros(btn_stop_shape, dtype=np.uint8) +img_btn_stop[:, :] = (0, 0, 255) # Red color +img_btn_stop = cv.rectangle( + img_btn_stop, + (btn_stop_cx - btn_stop_size // 2, btn_stop_cy - btn_stop_size // 2), + (btn_stop_cx + btn_stop_size // 2, btn_stop_cy + btn_stop_size // 2), + (255, 255, 255), # White border + -1 # Fill the rectangle +) + +# Initialize UI image +ui_img = np.zeros((240, 320, 3), dtype=np.uint8) +# Draw the stop button in the center +center_x = ui_img.shape[1] // 2 +center_y = ui_img.shape[0] // 2 +ui_img[ + center_y-btn_stop_cy:center_y+btn_stop_cy, + center_x-btn_stop_cx:center_x+btn_stop_cx +] = img_btn_stop +# Draw the forward arrow above the stop button +ui_img[ + center_y-btn_arrow_offset-btn_arrow_cy:center_y-btn_arrow_offset+btn_arrow_cy, + center_x-btn_arrow_cx:center_x+btn_arrow_cx +] = img_btn_arrow_vertical +# Draw the backward arrow below the stop button +ui_img[ + center_y+btn_arrow_offset-btn_arrow_cy:center_y+btn_arrow_offset+btn_arrow_cy, + center_x-btn_arrow_cx:center_x+btn_arrow_cx +] = img_btn_arrow_vertical[::-1, :, :] # Flip the arrow image vertically +# Draw the right arrow to the right of the stop button +ui_img[ + center_y-btn_arrow_cy:center_y+btn_arrow_cy, + center_x+btn_arrow_offset-btn_arrow_cx:center_x+btn_arrow_offset+btn_arrow_cx +] = img_btn_arrow_horizontal +# Draw the left arrow to the left of the stop button +ui_img[ + center_y-btn_arrow_cy:center_y+btn_arrow_cy, + center_x-btn_arrow_offset-btn_arrow_cx:center_x-btn_arrow_offset+btn_arrow_cx +] = img_btn_arrow_horizontal[:, ::-1, :] # Flip the arrow image horizontally + +# Show the UI image on the display +cv.imshow(display, ui_img) + +# Prompt the user to touch the screen to drive around +print("Touch the screen to drive around. Press any key to exit.") + +# Loop to continuously read touch input and drive around +while True: + # Read touch input + x, y, touch_num = touch_screen.get_touch() + + if touch_num > 0: + # Check if the stop button was pressed + if (center_x - btn_stop_cx <= x <= center_x + btn_stop_cx and + center_y - btn_stop_cy <= y <= center_y + btn_stop_cy): + print("Stop") + break + # Check if the forward arrow was pressed + elif (center_x - btn_arrow_cx <= x <= center_x + btn_arrow_cx and + center_y - btn_arrow_offset - btn_arrow_cy <= y <= center_y - btn_arrow_offset + btn_arrow_cy): + print("Forward") + drivetrain.straight(20, 0.5) + # Check if the backward arrow was pressed + elif (center_x - btn_arrow_cx <= x <= center_x + btn_arrow_cx and + center_y + btn_arrow_offset - btn_arrow_cy <= y <= center_y + btn_arrow_offset + btn_arrow_cy): + print("Backward") + drivetrain.straight(-20, 0.5) + # Check if the right arrow was pressed + elif (center_y - btn_arrow_cy <= y <= center_y + btn_arrow_cy and + center_x + btn_arrow_offset - btn_arrow_cx <= x <= center_x + btn_arrow_offset + btn_arrow_cx): + print("Right") + drivetrain.turn(-90, 0.5) + # Check if the left arrow was pressed + elif (center_y - btn_arrow_cy <= y <= center_y + btn_arrow_cy and + center_x - btn_arrow_offset - btn_arrow_cx <= x <= center_x - btn_arrow_offset + btn_arrow_cx): + print("Left") + drivetrain.turn(90, 0.5) + + if cv.waitKey(1) != -1: + # Exit the loop if any key is pressed + break + +# Clear the display +display.clear() From 471115fd2b69fd6854d7c8587282afafe9d9c92f Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 18 Jul 2025 11:37:03 -0600 Subject: [PATCH 128/158] Change default sm_id values of PIO drivers Camera drivers no longer have a default, must be specified Example driver initialization now uses sm_id 4 and 5 to not conflict with XRP's encoders --- cv2_drivers/cameras/hm01b0_pio.py | 2 +- cv2_drivers/cameras/ov5640_pio.py | 2 +- examples/cv2_hardware_init/camera.py | 1 + examples/cv2_hardware_init/display.py | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 3e3c69c..088e220 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -10,9 +10,9 @@ def __init__( pin_vsync, pin_hsync, pin_pclk, + sm_id, pin_xclk = None, xclk_freq = 25_000_000, - sm_id = 0, num_data_pins = 1, i2c_address = 0x24, ): diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py index b6021bd..5e3fa40 100644 --- a/cv2_drivers/cameras/ov5640_pio.py +++ b/cv2_drivers/cameras/ov5640_pio.py @@ -10,9 +10,9 @@ def __init__( pin_vsync, pin_hsync, pin_pclk, + sm_id, pin_xclk = None, xclk_freq = 5_000_000, - sm_id = 0, i2c_address = 0x3c ): # Create the frame buffer diff --git a/examples/cv2_hardware_init/camera.py b/examples/cv2_hardware_init/camera.py index e41bc90..2f5d51a 100644 --- a/examples/cv2_hardware_init/camera.py +++ b/examples/cv2_hardware_init/camera.py @@ -19,6 +19,7 @@ pin_vsync = 13, pin_hsync = 14, pin_pclk = 15, + sm_id = 5, pin_xclk = None, # Optional xclock pin, specify if needed num_data_pins = 1 # Number of data pins used by the camera (1, 4, or 8) ) diff --git a/examples/cv2_hardware_init/display.py b/examples/cv2_hardware_init/display.py index 56da285..2d411be 100644 --- a/examples/cv2_hardware_init/display.py +++ b/examples/cv2_hardware_init/display.py @@ -28,7 +28,7 @@ # display = st7789_pio.ST7789_PIO( # width = 240, # height = 320, -# sm_id = 1, +# sm_id = 4, # pin_clk = 18, # pin_tx = 19, # pin_dc = 16, From a2f461f81afc271ae5456644f4dfd3721999e4b1 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 18 Jul 2025 11:47:35 -0600 Subject: [PATCH 129/158] Move display splash screen code to base display class --- cv2_drivers/displays/cv2_display.py | 7 +++++++ examples/cv2_hardware_init/__init__.py | 22 ++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index f95213b..800c2f0 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -137,3 +137,10 @@ def savePinModeAlt(self, pin): # Return the mode and alt as a tuple return (mode, alt) + + def splash(self): + try: + self.imshow(cv2.imread("splash.png")) + return True + except Exception: + return False diff --git a/examples/cv2_hardware_init/__init__.py b/examples/cv2_hardware_init/__init__.py index d5e0db3..8a72ee4 100644 --- a/examples/cv2_hardware_init/__init__.py +++ b/examples/cv2_hardware_init/__init__.py @@ -3,21 +3,31 @@ # script to automatically initialize the hardware when the board boots up. See: # https://micropython.org/resources/docs/en/latest/reference/reset_boot.html#id4 +# Import the display driver try: from .display import display except: print("Display initialization failed, skipping...") +# Optional - show a splash image on the display if one is available, or clear +# the display of any previous content +if not display.splash(): + if hasattr(display, 'clear'): + display.clear() + +# Import the camera driver try: from .camera import camera except: print("Camera initialization failed, skipping...") +# Import the touch screen driver try: from .touch_screen import touch_screen except: print("Touch screen initialization failed, skipping...") +# Mount the SD card try: # We don't actually need to import anything here, just want to run the # sd_card module so the SD card gets mounted to the filesystem. So just @@ -26,15 +36,3 @@ del sdcard except: print("SD card initialization failed, skipping...") - -# Optional - show a splash image on the display if one is available, or clear -# the display of any previous content -try: - # Load and display a splash image, if one is available - import cv2 - splash_image = cv2.imread("splash.png") - cv2.imshow(display, splash_image) -except Exception: - # No splash image, instead clear the display if the driver supports it - if hasattr(display, 'clear'): - display.clear() From 0df676a7bb72d4a5080b9c6e7055048136143e4e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 18 Jul 2025 11:49:42 -0600 Subject: [PATCH 130/158] Add optional splash image filename --- cv2_drivers/displays/cv2_display.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index 800c2f0..34f0029 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -138,9 +138,9 @@ def savePinModeAlt(self, pin): # Return the mode and alt as a tuple return (mode, alt) - def splash(self): + def splash(self, filename="splash.png"): try: - self.imshow(cv2.imread("splash.png")) + self.imshow(cv2.imread(filename)) return True except Exception: return False From ad298cf2b9115a5e3802718dc54c1a3a8c5b2e5c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 21 Jul 2025 13:29:39 -0600 Subject: [PATCH 131/158] Update logo detection example to draw marker instead of circle for center --- examples/ex06_detect_sfe_logo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ex06_detect_sfe_logo.py b/examples/ex06_detect_sfe_logo.py index 226e23f..28a7d50 100644 --- a/examples/ex06_detect_sfe_logo.py +++ b/examples/ex06_detect_sfe_logo.py @@ -145,7 +145,7 @@ def my_pipeline(frame): # draw the bounding box and center of the logo for visualization, # and add text of the position and size of the logo frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) - frame = cv.circle(frame, (center_x, center_y), 3, (0, 255, 0), -1) + frame = cv.drawMarker(frame, (center_x, center_y), (0, 255, 0), cv.MARKER_CROSS, 10, 2) frame = cv.putText(frame, f"({center_x}, {center_y})", (center_x - 45, center_y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) frame = cv.putText(frame, f"{width}x{height}", (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) From dc112bae46c5f139784aedd7f933c309815d263d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 21 Jul 2025 17:02:26 -0600 Subject: [PATCH 132/158] Add XRP orange ring example --- .../xrp_examples/ex02_grab_orange_ring.py | 193 ++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 examples/xrp_examples/ex02_grab_orange_ring.py diff --git a/examples/xrp_examples/ex02_grab_orange_ring.py b/examples/xrp_examples/ex02_grab_orange_ring.py new file mode 100644 index 0000000..f70eb9d --- /dev/null +++ b/examples/xrp_examples/ex02_grab_orange_ring.py @@ -0,0 +1,193 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex02_grab_orange_ring.py +# +# The XRP can act as a bridge to FIRST programs, which includes summer camps +# with FIRST-style games. Learn more here: +# https://experientialrobotics.org/bridge-to-first/ +# +# FIRST-style games often include game elements with randomized locations that +# can be detected with a camera. The exact game elements and tasks change every +# year, but this example assumes there is an orange ring in front of the robot +# that needs to be grabbed. This example demonstrates how to detect the ring, +# calculate its distance and position relative to the robot in real-world units, +# then drive the robot to grab it. +#------------------------------------------------------------------------------- + +# Import XRPLib defaults +from XRPLib.defaults import * + +# Import OpenCV and hardware initialization module +import cv2 as cv +from cv2_hardware_init import * + +# Import time for delays +import time + +# Import math for calculations +import math + +# This is the pipeline implementation that attempts to find an orange ring in +# an image, and returns the real-world distance to the object and its left/right +# position relative to the center of the image in centimeters +def my_pipeline(frame): + # Convert the frame to HSV color space, which is often more effective for + # color-based segmentation tasks than RGB or BGR color spaces + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + + # Here we use the `cv.inRange()` function to find all the orange pixels. + # This outputs a binary image where pixels that fall within the specified + # lower and upper bounds are set to 255 (white), and all other pixels are + # set to 0 (black). This is applied to the HSV image, so the lower and upper + # bounds are in HSV color space. The bounds were determined experimentally: + # + # Hue: Orange hue is around 20, so we use a range of 15 to 25 + # Saturation: Anything above 50 is saturated enough + # Value: Anything above 30 is bright enough + lower_bound = (15, 50, 30) + upper_bound = (25, 255, 255) + inRange = cv.inRange(hsv, lower_bound, upper_bound) + + # Noise in the image often causes `cv.inRange()` to return false positives + # and false negatives, meaning there are some incorrect pixels in the binary + # image. These can be cleaned up with morphological operations, which + # effectively grow and shrink regions in the binary image to remove tiny + # blobs of noise + kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) + morphOpen = cv.morphologyEx(inRange, cv.MORPH_OPEN, kernel) + morphClose = cv.morphologyEx(morphOpen, cv.MORPH_CLOSE, kernel) + + # Now we use `cv.findContours()` to find the contours in the binary image, + # which are the boundaries of the regions in the binary image + contours, hierarchy = cv.findContours(morphClose, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) + + # It's possible that no contours were found, so first check if any were + # found before proceeding + best_contour = None + if contours: + # It's possible that some tiny blobs of noise are still present in the + # binary image, or other objects entirely, leading to extra contours. A + # proper pipeline would make an effort to filter out unwanted contours + # based on size, shape, or other criteria. This example keeps it simple; + # the contour of a ring is a circle, meaning many points are needed to + # represent it. A contour with only a few points is obviously not a + # circle, so we can ignore it. This example assumes the ring is the only + # large orange object in the image, so the first contour that's complex + # enough is probably the one we're looking for + for i in range(len(contours)): + if len(contours[i]) < 50: + continue + best_contour = contours[i] + break + + # If no contour was found, return invalid values to indicate that + if best_contour is None: + return (-1, -1) + + # Calculate the bounding rectangle of the contour, and use that to calculate + # the center coordinates of the object + left, top, width, height = cv.boundingRect(best_contour) + center_x = left + width // 2 + center_y = top + height // 2 + + # Now we can calculate the real-world distance to the object based on its + # size. We'll first estimate the diameter of the ring in pixels by taking + # the maximum of the width and height of the bounding rectangle. This + # compensates for the fact that the ring may be tilted + diameter_px = max(width, height) + + # If the camera has a perfect lens, the distance can be calculated with: + # + # distance_cm = diameter_cm * focal_length_px / diameter_px + # + # However almost every camera lens has some distortion, so there are + # corrections needed to account for that. This example has been tested with + # the HM01B0, and the calculation below gives a decent estimate of the + # distance in centimeters + focal_length_px = 180 + diameter_cm = 12.7 + distance_cm = diameter_cm * focal_length_px / diameter_px - 10 + + # Now with our distance estimate, we can calculate how far left or right the + # object is from the center in the same real-world units. Assuming a perfect + # lens, the position can be calculated as: + # + # position_x_cm = distance_cm * position_x_px / focal_length_px + position_x_px = center_x - (frame.shape[1] // 2) + position_x_cm = distance_cm * position_x_px / focal_length_px + + # Draw the contour, bounding box, center, and text for visualization + frame = cv.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) + frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) + frame = cv.drawMarker(frame, (center_x, center_y), (0, 255, 0), cv.MARKER_CROSS, 10, 2) + frame = cv.putText(frame, f"({center_x}, {center_y})", (center_x - 45, center_y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + frame = cv.putText(frame, f"{width}x{height}", (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) + frame = cv.putText(frame, f"D={distance_cm:.1f}cm", (left, top - 25), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) + frame = cv.putText(frame, f"X={position_x_cm:.1f}cm", (left, top - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) + + # Now we can return the distance and position of the object in cm, since + # that's the only data we need from this pipeline + return (distance_cm, position_x_cm) + +# Move the servo out of the way of the camera +servo_one.set_angle(90) + +# Open the camera and wait a moment for at least one frame to be captured +camera.open() +time.sleep(0.1) + +# Prompt the user to press a key to continue +print("Detecting ring...") + +# Loop until the object is found or the user presses a key +while True: + # Read a frame from the camera + success, frame = camera.read() + if success == False: + print("Error reading frame from camera") + break + + # Call the pipeline function to find the object + distance_cm, position_x_cm = my_pipeline(frame) + + # Display the frame + cv.imshow(display, frame) + + # If the distance is valid, break the loop + if distance_cm >= 0: + break + + # Check for key presses + key = cv.waitKey(1) + + # If any key is pressed, exit the loop + if key != -1: + break + +# Print the distance and position of the object +print(f"Found object at distance {distance_cm:.1f} cm, position {position_x_cm:.1f} cm from center") + +# Release the camera, we're done with it +camera.release() + +# Move the servo to pick up the object +servo_one.set_angle(45) + +# Turn to face the object. We first calculate the angle to turn based on the +# position of the object +angle = -math.atan2(position_x_cm, distance_cm) * 180 / math.pi +drivetrain.turn(angle) + +# Drive forwards to the object. Drive a bit further than the distance to the +# object to ensure the arm goes through the ring +distance_cm += 10 +drivetrain.straight(distance_cm) + +# Rotate the servo to pick up the ring +servo_one.set_angle(90) + +# Drive backwards to pull the ring off the rung +drivetrain.straight(-10) From 9c875a04ffb0d0000a54e5cdbebbb236e881b6f8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Mon, 21 Jul 2025 17:02:42 -0600 Subject: [PATCH 133/158] Clean up touch screen example Also change CST816 driver to have is_tocuhed() method separate from get_touch_xy() --- cv2_drivers/touch_screens/cst816.py | 18 ++++++++++-- examples/ex03_touch_screen.py | 43 +++++++++++++++-------------- 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/cv2_drivers/touch_screens/cst816.py b/cv2_drivers/touch_screens/cst816.py index f854b6e..a6de1df 100644 --- a/cv2_drivers/touch_screens/cst816.py +++ b/cv2_drivers/touch_screens/cst816.py @@ -70,10 +70,22 @@ def is_connected(self): def getChipID(self): return self.read_register_value(self._REG_CHIP_ID) - def get_touch(self): + def is_touched(self): + """ + Check if the touch screen is currently being touched. + + Returns: + bool: True if touching, False otherwise + """ + # Read the number of touches + touch_num = self.read_register_value(self._REG_FINGER_NUM) + + # If there are any touches, return True + return touch_num > 0 + + def get_touch_xy(self): x = self.read_register_value(self._REG_X_POS_H, 2) & 0x0FFF y = self.read_register_value(self._REG_Y_POS_H, 2) & 0x0FFF - touch_num = self.read_register_value(self._REG_FINGER_NUM) # Adjust for the rotation if self.rotation == 0: @@ -85,7 +97,7 @@ def get_touch(self): elif self.rotation == 3: x,y = self.height - y, x - return (x, y, touch_num) + return (x, y) def read_register_value(self, reg, num_bytes=1): """ diff --git a/examples/ex03_touch_screen.py b/examples/ex03_touch_screen.py index 3af6645..f3c9ecd 100644 --- a/examples/ex03_touch_screen.py +++ b/examples/ex03_touch_screen.py @@ -28,32 +28,35 @@ # Create variables to store touch coordinates and state x0, y0, x1, y1 = 0, 0, 0, 0 -touching = False +touch_input = False # Loop to continuously read touch input and draw on the image while True: - # Read touch input - x, y, touch_num = touch_screen.get_touch() - - # Update the touch coordinates and state - if touch_num > 0: - if not touching: - x0 = x - y0 = y - x1 = x - y1 = y - touching = True + # Check if there is touch input + if touch_screen.is_touched(): + # Check if this is the first touch or a continuation + if not touch_input: + # This is the first touch, set both (x0, y0) and (x1, y1) to the + # initial touch coordinates. This will draw a point at the touch + # location if no further touch inputs are made + x0, y0 = touch_screen.get_touch_xy() + x1, y1 = x0, y0 + # Set the state to indicate there is touch input + touch_input = True else: - x0 = x1 - y0 = y1 - x1 = x - y1 = y + # This is a continuation of the touch, set (x0, y0) to the previous + # coordinates and set (x1, y1) to the current touch coordinates so + # we can draw a line between them + x0, y0 = x1, y1 + x1, y1 = touch_screen.get_touch_xy() else: - if touching: - touching = False + # Check if there was touch input before + if touch_input: + # There was touch input before, but not any more + touch_input = False - # Draw a line if touching - if touching: + # Draw a line if there was touch input + if touch_input: img = cv.line(img, (x0, y0), (x1, y1), (255, 255, 255), 2) # Display the frame From 5728bc499c45c980662ae5b880d944136c819fad Mon Sep 17 00:00:00 2001 From: Malcolm McKellips Date: Tue, 22 Jul 2025 09:24:07 -0600 Subject: [PATCH 134/158] Add automatic builds that contain examples on release --- .github/workflows/build.yml | 23 +---- .github/workflows/release.yml | 23 +++++ build.sh | 185 ++++++++++++++++++++++++++++++++++ 3 files changed, 211 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/release.yml create mode 100644 build.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 91ec7d8..fbb9696 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,4 @@ -name: Build firmware +name: Build Firmware on: pull_request: @@ -7,7 +7,6 @@ on: push: branches: - features_for_launch - workflow_dispatch: jobs: build: @@ -17,21 +16,5 @@ jobs: uses: actions/checkout@v4 with: submodules: true - - name: Install packages - run: | - sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib - - name: Build MPY Cross - run: make -C micropython/mpy-cross - - name: MicroPython submodules - run: make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules - - name: Set Pico SDK path - run: echo "PICO_SDK_PATH=$GITHUB_WORKSPACE/micropython/lib/pico-sdk" >> "$GITHUB_ENV" - - name: Build OpenCV - run: make -C src/opencv PLATFORM=rp2350 --no-print-directory -j4 - - name: Build firmware - run: make BOARD=SPARKFUN_XRP_CONTROLLER -j4 - - name: Upload UF2 - uses: actions/upload-artifact@v4 - with: - name: firmware.uf2 - path: micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2 + - name: Build Firmware + run: source build.sh && build_micropython_opencv diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..cf1e5b0 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,23 @@ +name: Build and Deploy Firmware Release + +on: + release: + types: [created] + +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: true + - name: Build Firmware + run: source build.sh && build_micropython_opencv + - name: Upload Release Assets + uses: shogo82148/actions-upload-release-asset@v1 + with: + asset_path: "micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2" + github_token: ${{ secrets.GITHUB_TOKEN }} + upload_url: ${{ github.event.release.upload_url }} + diff --git a/build.sh b/build.sh new file mode 100644 index 0000000..166446c --- /dev/null +++ b/build.sh @@ -0,0 +1,185 @@ +if which nproc > /dev/null; then + MAKEOPTS="-j$(nproc)" +else + MAKEOPTS="-j$(sysctl -n hw.ncpu)" +fi + +# TODO: Could also make these opts into the build_micropython_opencv function if we care... +FROZEN_MODULES_DIR="$(dirname "$0")/frozen_modules" +FROZEN_EXAMPLES_ARCHIVE_SCRIPT="frozen_examples.py" +FROZEN_EXAMPLES_UNPACKED_DIR="micropython-opencv-examples" + +# Uses freezefs to create a frozen filesystem archive for the provided directory. +# See https://github.com/bixb922/freezefs for more details on freezefs +# Options: + # $1: The directory to freeze + # $2: The name that you want the frozen directory to have once unpacked on the board + # $3: The output file name for the frozen archive .py file +function create_frozen_fs { + local DIR_TO_FREEZE=$1 + local DIR_NAME_ON_BOARD=$2 + local OUTPUT_FILE=$3 + + echo "Creating frozen filesystem for directory: $DIR_TO_FREEZE" + echo "The frozen directory will be named: $DIR_NAME_ON_BOARD" + echo "The output file will be: $OUTPUT_FILE" + + cp -r $DIR_TO_FREEZE $DIR_NAME_ON_BOARD + + python -m freezefs $DIR_NAME_ON_BOARD $OUTPUT_FILE +} + +# Options: +# $1: The directory to add to the manifest +# $2: The port (e.g. rp2) +# $3: The board (e.g. SPARKFUN_XRP_CONTROLLER) +# $4: The mpconfigboard file name (e.g. mpconfigboard.cmake or mpconfigboard.m) Default: mpconfigboard.cmake +function add_to_manifest { + local DIR=$1 + local PORT=$2 + local BOARD=$3 + local MPCONFIG_FILE="${4:-mpconfigboard.cmake}" + + # Add the directory to the manifest file + echo "Adding $DIR to the manifest for $PORT on $BOARD using $MPCONFIG_FILE" + local BOARD_DIR="micropython/ports/${PORT}/boards/${BOARD}" + + # Create manifest.py if it doesn't exist + if [ ! -f ${BOARD_DIR}/manifest.py ]; then + echo "include(\"\$(PORT_DIR)/boards/manifest.py\")" > ${BOARD_DIR}/manifest.py + + # also add the necessary frozen manifest line to mpconfigboard.cmake: set(MICROPY_FROZEN_MANIFEST ${MICROPY_BOARD_DIR}/manifest.py) + # We will use the optional MPCONFIG_FILE argument to determine if we should add this line + + if [ -n "$MPCONFIG_FILE" ]; then + if [[ $MPCONFIG_FILE == *.mk ]]; then + # e.g. for TEENSY which uses mpconfigboard.mk instead of mpconfigboard.cmake + echo "Adding frozen manifest line to mpconfigboard.mk for $BOARD" + printf "\nFROZEN_MANIFEST ?= \$(BOARD_DIR)/manifest.py" >> ${BOARD_DIR}/$MPCONFIG_FILE + elif [[ $MPCONFIG_FILE == *.cmake ]]; then + echo "Adding frozen manifest line to mpconfigboard.cmake for $BOARD" + printf "\nset(MICROPY_FROZEN_MANIFEST \"\${MICROPY_BOARD_DIR}/manifest.py\")" >> ${BOARD_DIR}/$MPCONFIG_FILE + fi + fi + fi + + # Add the freeze line to the manifest.py for the board + echo "Adding freeze line to manifest.py for $BOARD" + printf "\nfreeze(\"${DIR}\")" >> ${BOARD_DIR}/manifest.py + + # Helpful for debugging during the build process, but can be removed if we'd rather not see this output... + echo "Manifest.py for $BOARD:" + cat ${BOARD_DIR}/manifest.py +} + +# Adds the frozen data filesystem to the boot.py file for the given port +# Options: + # $1: Port name + # $2: Frozen data file path + # $3: Copy Source: If copying imported frozen data to a mutable location, this is the directory name of the source (optional) + # $4: Copy Destination: If copying imported frozen data to a mutable location, this is the directory name of the destination (optional) + # $5: Add destination to sys.path? If true, the destination directory will be added to sys.path in _boot.py (optional) + # NOTE: By providing the source and destination, the frozen data filesystem will be copied to a mutable location on the board + # If they are not provided, the frozen data filesystem will still be accessible, but will be read-only. +function add_frozen_data_to_boot_for_port { + local TARGET_PORT_NAME=$1 + local FROZEN_DATA_FILE=$2 + local SOURCE_DIR=$3 + local DESTINATION_DIR=$4 + local ADD_TO_SYSPATH=${5:-false} + + # Remove the ".py" extension from the frozen data file + local FROZEN_DATA_BASENAME=$(basename $FROZEN_DATA_FILE .py) + + # Check if the _boot.py file exists in the port's modules directory and error out if it does not + if [ ! -f micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py ]; then + echo "Error: _boot.py file not found in ports/${TARGET_PORT_NAME}/modules/" + exit 1 + fi + + # Add the frozen data filesystem to the _boot.py file + local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" + + echo "Adding frozen data filesystem to ${BOOT_FILE}" + echo "import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} + + # Now, copy the unpacked frozen data filesystem to a mutable location if the source and destination are provided + if [ -n "$SOURCE_DIR" ] && [ -n "$DESTINATION_DIR" ]; then + echo "Copying frozen data from ${SOURCE_DIR} to ${DESTINATION_DIR} in _boot.py" + local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" + echo "import os" >> ${BOOT_FILE} + # Simple recursive function to copy the directory tree (since i.e. shutil.copytree is not available on MicroPython) + echo "def copytree(src, dst):" >> ${BOOT_FILE} + echo " try:" >> ${BOOT_FILE} + echo " os.mkdir(dst)" >> ${BOOT_FILE} + echo " except OSError:" >> ${BOOT_FILE} + echo " pass" >> ${BOOT_FILE} + echo " for entry in os.ilistdir(src):" >> ${BOOT_FILE} + echo " fname, typecode, _, _ = entry" >> ${BOOT_FILE} + echo " src_path = src + '/' + fname" >> ${BOOT_FILE} + echo " dst_path = dst + '/' + fname" >> ${BOOT_FILE} + echo " if typecode == 0x4000:" >> ${BOOT_FILE} # typecode == 0x4000 means directory + echo " copytree(src_path, dst_path)" >> ${BOOT_FILE} + echo " else:" >> ${BOOT_FILE} + echo " with open(src_path, 'rb') as fsrc:" >> ${BOOT_FILE} + echo " with open(dst_path, 'wb') as fdst:" >> ${BOOT_FILE} + echo " fdst.write(fsrc.read())" >> ${BOOT_FILE} + echo "copytree('${SOURCE_DIR}', '${DESTINATION_DIR}')" >> ${BOOT_FILE} + fi + + # If the ADD_TO_SYSPATH flag is true, add the destination directory to sys.path + if [ "$ADD_TO_SYSPATH" = true ]; then + echo "Adding ${DESTINATION_DIR} to sys.path in _boot.py" + echo "import sys" >> ${BOOT_FILE} + echo "sys.path.append('/${DESTINATION_DIR}')" >> ${BOOT_FILE} + fi + + # Helpful for debugging during the build process, but can be removed if we'd rather not see this output... + echo "Content of _boot.py after adding frozen data filesystem:" + cat micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py +} + +# Installs necessary dependencies and builds OpenCV and the firmware +# Also freezes the examples directory in a filesystem archive on the board +function build_micropython_opencv { + # Install necessary packages (Could move into an install_dependencies.sh if we want this to be more explicit/modular) + sudo apt-get update + sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib + # Install necessary python packages (could also move this to a requirements.txt file) + pip install freezefs + + # Create a directory for frozen modules, we can add arbitrary .py files to this directory in the future. + # For now it will just contain the archived examples script. + mkdir "$FROZEN_MODULES_DIR" + + # Create our frozen filesystem archive for the examples directory + # Note the "." to make the read-only version of the examples directory hidden in IDEs like Thonny + create_frozen_fs "examples" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" + + # Add necessary content to the manifest file to freeze the modules in the provided directory + add_to_manifest "$FROZEN_MODULES_DIR" "rp2" "SPARKFUN_XRP_CONTROLLER" "mpconfigvariant_LARGE_BINARY.cmake" + + # Add necessary content to the boot.py file to unpack the frozen data filesystem on boot + # Provide the source and destination directories to copy the frozen data filesystem to a mutable (and non-hidden) location + # Provide "true" as the last argument to add the destination directory to sys.path (since our examples directory contains modules that we want to be importable...) + add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_EXAMPLES_UNPACKED_DIR" true + + # Set Pico SDK path to $GITHUB_WORKSPACE/micropython/lib/pico-sdk if $GITHUB_WORKSPACE is set, otherwise use the current directory + if [ -n "$GITHUB_WORKSPACE" ]; then + export PICO_SDK_PATH="$GITHUB_WORKSPACE/micropython/lib/pico-sdk" + else + export PICO_SDK_PATH=$(dirname "$0")/micropython/lib/pico-sdk + fi + + # Build MPY Cross compiler + make -C micropython/mpy-cross + + # Update necessary MicroPython submodules + make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules + + # Build OpenCV + make -C src/opencv PLATFORM=rp2350 --no-print-directory ${MAKEOPTS} + + # Build firmware + make BOARD=SPARKFUN_XRP_CONTROLLER ${MAKEOPTS} +} From 5de809f928ed999365f16cc834d8b745b54cd89b Mon Sep 17 00:00:00 2001 From: Malcolm McKellips Date: Tue, 22 Jul 2025 13:05:06 -0600 Subject: [PATCH 135/158] Add unmount of immutable directory and persistent tracking so expanding examples only happens once --- build.sh | 49 +++++++++++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/build.sh b/build.sh index 166446c..a315248 100644 --- a/build.sh +++ b/build.sh @@ -8,6 +8,7 @@ fi FROZEN_MODULES_DIR="$(dirname "$0")/frozen_modules" FROZEN_EXAMPLES_ARCHIVE_SCRIPT="frozen_examples.py" FROZEN_EXAMPLES_UNPACKED_DIR="micropython-opencv-examples" +PERSISTENT_FILE_FOR_UNPACK="/keep_opencv_example_changes" # Uses freezefs to create a frozen filesystem archive for the provided directory. # See https://github.com/bixb922/freezefs for more details on freezefs @@ -100,31 +101,43 @@ function add_frozen_data_to_boot_for_port { # Add the frozen data filesystem to the _boot.py file local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" + # Create our "persistent file for unpack" that will be used to check if the frozen data filesystem has already been unpacked + # If it has not been unpacked, we will import the frozen data filesystem echo "Adding frozen data filesystem to ${BOOT_FILE}" - echo "import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} + echo "import os" >> ${BOOT_FILE} + echo "try:" >> ${BOOT_FILE} + echo " os.stat('${PERSISTENT_FILE_FOR_UNPACK}')" >> ${BOOT_FILE} + echo "except OSError:" >> ${BOOT_FILE} + echo " import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} + echo " with open('${PERSISTENT_FILE_FOR_UNPACK}', 'w') as f:" >> ${BOOT_FILE} + echo " f.write('Hi! Delete this file to restore the ${FROZEN_EXAMPLES_UNPACKED_DIR} to its default state. WARNING: This will override ALL of your changes to that directory.')" >> ${BOOT_FILE} # Now, copy the unpacked frozen data filesystem to a mutable location if the source and destination are provided + # Simple recursive function to copy the directory tree (since i.e. shutil.copytree is not available on MicroPython) if [ -n "$SOURCE_DIR" ] && [ -n "$DESTINATION_DIR" ]; then echo "Copying frozen data from ${SOURCE_DIR} to ${DESTINATION_DIR} in _boot.py" local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" - echo "import os" >> ${BOOT_FILE} - # Simple recursive function to copy the directory tree (since i.e. shutil.copytree is not available on MicroPython) - echo "def copytree(src, dst):" >> ${BOOT_FILE} + echo " def copytree(src, dst):" >> ${BOOT_FILE} + echo " try:" >> ${BOOT_FILE} + echo " os.mkdir(dst)" >> ${BOOT_FILE} + echo " except OSError:" >> ${BOOT_FILE} + echo " pass" >> ${BOOT_FILE} + echo " for entry in os.ilistdir(src):" >> ${BOOT_FILE} + echo " fname, typecode, _, _ = entry" >> ${BOOT_FILE} + echo " src_path = src + '/' + fname" >> ${BOOT_FILE} + echo " dst_path = dst + '/' + fname" >> ${BOOT_FILE} + echo " if typecode == 0x4000:" >> ${BOOT_FILE} # typecode == 0x4000 means directory + echo " copytree(src_path, dst_path)" >> ${BOOT_FILE} + echo " else:" >> ${BOOT_FILE} + echo " with open(src_path, 'rb') as fsrc:" >> ${BOOT_FILE} + echo " with open(dst_path, 'wb') as fdst:" >> ${BOOT_FILE} + echo " fdst.write(fsrc.read())" >> ${BOOT_FILE} + echo " copytree('${SOURCE_DIR}', '${DESTINATION_DIR}')" >> ${BOOT_FILE} + # Finally, unmount the source directory if it is mounted echo " try:" >> ${BOOT_FILE} - echo " os.mkdir(dst)" >> ${BOOT_FILE} - echo " except OSError:" >> ${BOOT_FILE} - echo " pass" >> ${BOOT_FILE} - echo " for entry in os.ilistdir(src):" >> ${BOOT_FILE} - echo " fname, typecode, _, _ = entry" >> ${BOOT_FILE} - echo " src_path = src + '/' + fname" >> ${BOOT_FILE} - echo " dst_path = dst + '/' + fname" >> ${BOOT_FILE} - echo " if typecode == 0x4000:" >> ${BOOT_FILE} # typecode == 0x4000 means directory - echo " copytree(src_path, dst_path)" >> ${BOOT_FILE} - echo " else:" >> ${BOOT_FILE} - echo " with open(src_path, 'rb') as fsrc:" >> ${BOOT_FILE} - echo " with open(dst_path, 'wb') as fdst:" >> ${BOOT_FILE} - echo " fdst.write(fsrc.read())" >> ${BOOT_FILE} - echo "copytree('${SOURCE_DIR}', '${DESTINATION_DIR}')" >> ${BOOT_FILE} + echo " os.umount('/${SOURCE_DIR}')" >> ${BOOT_FILE} + echo " except Exception as e:" >> ${BOOT_FILE} + echo " print('umount failed:', e)" >> ${BOOT_FILE} fi # If the ADD_TO_SYSPATH flag is true, add the destination directory to sys.path From 3439047a9f24a4a77e4d0179880035d3bac99e85 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 22 Jul 2025 14:16:02 -0600 Subject: [PATCH 136/158] Fix XRP touch drive example Should have been updated in 9c875a0 --- examples/xrp_examples/ex01_touch_drive.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/xrp_examples/ex01_touch_drive.py b/examples/xrp_examples/ex01_touch_drive.py index b97c631..8d7c603 100644 --- a/examples/xrp_examples/ex01_touch_drive.py +++ b/examples/xrp_examples/ex01_touch_drive.py @@ -106,10 +106,10 @@ # Loop to continuously read touch input and drive around while True: - # Read touch input - x, y, touch_num = touch_screen.get_touch() - - if touch_num > 0: + # Check if there is touch input + if touch_screen.is_touched(): + # Read touch coordinates + x, y = touch_screen.get_touch_xy() # Check if the stop button was pressed if (center_x - btn_stop_cx <= x <= center_x + btn_stop_cx and center_y - btn_stop_cy <= y <= center_y + btn_stop_cy): @@ -141,4 +141,4 @@ break # Clear the display -display.clear() +display.splash() From 3d65c02a19a0fbf7a9316c3b3755bb3d58654223 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Tue, 22 Jul 2025 17:44:47 -0600 Subject: [PATCH 137/158] Clean up examples and drivers Safe imports of RP2 drivers Add docstring comments Make driver members private Update comments in examples --- cv2_drivers/cameras/__init__.py | 9 +- cv2_drivers/cameras/cv2_camera.py | 32 +- cv2_drivers/cameras/dvp_camera.py | 45 ++- cv2_drivers/cameras/dvp_rp2_pio.py | 103 +++-- cv2_drivers/cameras/hm01b0.py | 373 ++++++++++-------- cv2_drivers/cameras/hm01b0_pio.py | 38 +- cv2_drivers/cameras/ov5640.py | 239 ++++++----- cv2_drivers/cameras/ov5640_pio.py | 36 +- cv2_drivers/displays/__init__.py | 9 +- cv2_drivers/displays/cv2_display.py | 83 ++-- cv2_drivers/displays/st7789.py | 157 ++++---- cv2_drivers/displays/st7789_pio.py | 161 ++++---- cv2_drivers/displays/st7789_spi.py | 80 ++-- cv2_drivers/touch_screens/__init__.py | 3 +- examples/cv2_hardware_init/__init__.py | 4 +- examples/cv2_hardware_init/camera.py | 3 +- examples/ex01_hello_opencv.py | 6 + examples/ex04_imread_imwrite.py | 2 +- examples/ex05_performance.py | 18 +- examples/ex06_detect_sfe_logo.py | 4 +- examples/xrp_examples/ex01_touch_drive.py | 211 +++++----- .../xrp_examples/ex02_grab_orange_ring.py | 59 +-- 22 files changed, 1010 insertions(+), 665 deletions(-) diff --git a/cv2_drivers/cameras/__init__.py b/cv2_drivers/cameras/__init__.py index f52e57c..b6a2fed 100644 --- a/cv2_drivers/cameras/__init__.py +++ b/cv2_drivers/cameras/__init__.py @@ -1,2 +1,7 @@ -from . import hm01b0_pio -from . import ov5640_pio \ No newline at end of file +# Import sys module to check platform +import sys + +# Import RP2 drivers +if 'rp2' in sys.platform: + from . import hm01b0_pio + from . import ov5640_pio diff --git a/cv2_drivers/cameras/cv2_camera.py b/cv2_drivers/cameras/cv2_camera.py index 990cef5..c4641a7 100644 --- a/cv2_drivers/cameras/cv2_camera.py +++ b/cv2_drivers/cameras/cv2_camera.py @@ -1,5 +1,35 @@ class CV2_Camera(): + """ + Base class for OpenCV camera drivers. + """ def __init__(self): + """ + Initializes the camera. + """ pass - # TODO: Implement common methods for all cameras + def open(self): + """ + Opens the camera and prepares it for capturing images. + """ + raise NotImplementedError("open() must be implemented by driver") + + def release(self): + """ + Releases the camera and frees any resources. + """ + raise NotImplementedError("release() must be implemented by driver") + + def read(self, image=None): + """ + Reads an image from the camera. + + Args: + image (ndarray, optional): Image to read into + + Returns: + tuple: (success, image) + - success (bool): True if the image was read, otherwise False + - image (ndarray): The captured image, or None if reading failed + """ + raise NotImplementedError("read() must be implemented by driver") diff --git a/cv2_drivers/cameras/dvp_camera.py b/cv2_drivers/cameras/dvp_camera.py index a04a734..93faaaa 100644 --- a/cv2_drivers/cameras/dvp_camera.py +++ b/cv2_drivers/cameras/dvp_camera.py @@ -1,25 +1,50 @@ from .cv2_camera import CV2_Camera -from machine import Pin -from time import sleep_us class DVP_Camera(CV2_Camera): + """ + Base class for OpenCV DVP (Digital Video Port) camera drivers. + """ def __init__( self, i2c, i2c_address ): + """ + Initializes the DVP camera with I2C communication. + + Args: + i2c (I2C): I2C object for communication + i2c_address (int): I2C address of the camera + """ super().__init__() - self.i2c = i2c - self.i2c_address = i2c_address + self._i2c = i2c + self._i2c_address = i2c_address + + def _read_register(self, reg, nbytes=1): + """ + Reads a register from the camera over I2C. + + Args: + reg (int): Register address to read + nbytes (int): Number of bytes to read from the register + + Returns: + bytes: Data read from the register + """ + self._i2c.writeto(self._i2c_address, bytes([reg >> 8, reg & 0xFF])) + return self._i2c.readfrom(self._i2c_address, nbytes) + + def _write_register(self, reg, data): + """ + Writes data to a register on the camera over I2C. - def readRegister(self, reg, nbytes=1): - self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF])) - return self.i2c.readfrom(self.i2c_address, nbytes) - - def writeRegister(self, reg, data): + Args: + reg (int): Register address to write + data (bytes, int, list, tuple): Data to write to the register + """ if isinstance(data, int): data = bytes([data]) elif isinstance(data, (list, tuple)): data = bytes(data) - self.i2c.writeto(self.i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) + self._i2c.writeto(self._i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index 9a1879b..ec07a52 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -1,7 +1,16 @@ import rp2 from machine import Pin, PWM +# This class is derived from: +# https://github.com/adafruit/Adafruit_ImageCapture/blob/main/src/arch/rp2040.cpp +# Released under the MIT license. +# Copyright (c) 2021 Adafruit Industries class DVP_RP2_PIO(): + """ + This class implements a DVP (Digital Video Port) interface using the RP2 PIO + (Programmable Input/Output) interface. This is only available on Raspberry + Pi RP2 processors. + """ def __init__( self, pin_d0, @@ -15,12 +24,27 @@ def __init__( bytes_per_frame, byte_swap ): - self.pin_d0 = pin_d0 - self.pin_vsync = pin_vsync - self.pin_hsync = pin_hsync - self.pin_pclk = pin_pclk - self.pin_xclk = pin_xclk - self.sm_id = sm_id + """ + Initializes the DVP interface with the specified parameters. + + Args: + pin_d0 (int): Data 0 pin number for DVP interface + pin_vsync (int): Vertical sync pin number + pin_hsync (int): Horizontal sync pin number + pin_pclk (int): Pixel clock pin number + pin_xclk (int): External clock pin number + xclk_freq (int): Frequency in Hz for the external clock + sm_id (int): PIO state machine ID + num_data_pins (int): Number of data pins used in DVP interface + bytes_per_frame (int): Number of bytes per frame to capture + byte_swap (bool): Whether to swap bytes in the captured data + """ + self._pin_d0 = pin_d0 + self._pin_vsync = pin_vsync + self._pin_hsync = pin_hsync + self._pin_pclk = pin_pclk + self._pin_xclk = pin_xclk + self._sm_id = sm_id # Initialize DVP pins as inputs for i in range(num_data_pins): @@ -30,86 +54,98 @@ def __init__( Pin(pin_pclk, Pin.IN) # Set up XCLK pin if provided - if self.pin_xclk is not None: - self.xclk = PWM(Pin(pin_xclk)) - self.xclk.freq(xclk_freq) - self.xclk.duty_u16(32768) # 50% duty cycle + if self._pin_xclk is not None: + self._xclk = PWM(Pin(pin_xclk)) + self._xclk.freq(xclk_freq) + self._xclk.duty_u16(32768) # 50% duty cycle # Copy the PIO program program = self._pio_read_dvp # Mask in the GPIO pins - program[0][0] |= self.pin_hsync & 0x1F - program[0][1] |= self.pin_pclk & 0x1F - program[0][3] |= self.pin_pclk & 0x1F + program[0][0] |= self._pin_hsync & 0x1F + program[0][1] |= self._pin_pclk & 0x1F + program[0][3] |= self._pin_pclk & 0x1F # Mask in the number of data pins program[0][2] &= 0xFFFFFFE0 program[0][2] |= num_data_pins # Create PIO state machine to capture DVP data - self.sm = rp2.StateMachine( - self.sm_id, + self._sm = rp2.StateMachine( + self._sm_id, program, in_base = pin_d0 ) # Create DMA controller to transfer data from PIO to buffer - self.dma = rp2.DMA() - req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) + 4 + self._dma = rp2.DMA() + req_num = ((self._sm_id // 4) << 3) + (self._sm_id % 4) + 4 bytes_per_transfer = 4 - dma_ctrl = self.dma.pack_ctrl( + dma_ctrl = self._dma.pack_ctrl( # 0 = 1 byte, 1 = 2 bytes, 2 = 4 bytes size = {1:0, 2:1, 4:2}[bytes_per_transfer], inc_read = False, treq_sel = req_num, bswap = byte_swap ) - self.dma.config( - read = self.sm, + self._dma.config( + read = self._sm, count = bytes_per_frame // bytes_per_transfer, ctrl = dma_ctrl ) - def active(self, active = None): + def _active(self, active=None): + """ + Sets or gets the active state of the DVP interface. + + Args: + active (bool, optional): + - True: Activate the DVP interface + - False: Deactivate the DVP interface + - None: Get the current active state + """ # If no argument is provided, return the current active state if active == None: - return self.sm.active() + return self._sm.active() # Disable the DMA, the VSYNC handler will re-enable it when needed - self.dma.active(False) + self._dma.active(False) # Set the active state of the state machine - self.sm.active(active) + self._sm.active(active) # If active, set up the VSYNC interrupt handler if active: - Pin(self.pin_vsync).irq( + Pin(self._pin_vsync).irq( trigger = Pin.IRQ_FALLING, handler = lambda pin: self._vsync_handler() ) # If not active, disable the VSYNC interrupt handler else: - Pin(self.pin_vsync).irq( + Pin(self._pin_vsync).irq( handler = None ) def _vsync_handler(self): + """ + Handles the VSYNC interrupt to capture a frame of data. + """ # Disable DMA before reconfiguring it - self.dma.active(False) + self._dma.active(False) # Reset state machine to ensure ISR is cleared - self.sm.restart() + self._sm.restart() # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) - while self.sm.rx_fifo() > 0: - self.sm.get() + while self._sm.rx_fifo() > 0: + self._sm.get() # Reset the DMA write address - self.dma.write = self.buffer + self._dma.write = self._buffer # Start the DMA - self.dma.active(True) + self._dma.active(True) # Here is the PIO program, which is configurable to mask in the GPIO pins # and the number of data pins. It must be configured before the state @@ -121,6 +157,9 @@ def _vsync_handler(self): fifo_join = rp2.PIO.JOIN_RX ) def _pio_read_dvp(): + """ + PIO program to read DVP data from the GPIO pins. + """ wait(1, gpio, 0) # Mask in HSYNC pin wait(1, gpio, 0) # Mask in PCLK pin in_(pins, 1) # Mask in number of pins diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py index e42713b..3b781fc 100644 --- a/cv2_drivers/cameras/hm01b0.py +++ b/cv2_drivers/cameras/hm01b0.py @@ -2,134 +2,138 @@ from time import sleep_us import cv2 -# Derived from: -# https:#github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c +# This class is derived from: +# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c +# Released under the MIT license. +# Copyright (C) 2013-2024 OpenMV, LLC. class HM01B0(DVP_Camera): - + """ + Base class for OpenCV HM01B0 camera drivers. + """ # Read only registers - MODEL_ID_H = 0x0000 - MODEL_ID_L = 0x0001 - FRAME_COUNT = 0x0005 - PIXEL_ORDER = 0x0006 + _MODEL_ID_H = 0x0000 + _MODEL_ID_L = 0x0001 + _FRAME_COUNT = 0x0005 + _PIXEL_ORDER = 0x0006 # Sensor mode control - MODE_SELECT = 0x0100 - IMG_ORIENTATION = 0x0101 - SW_RESET = 0x0103 - GRP_PARAM_HOLD = 0x0104 + _MODE_SELECT = 0x0100 + _IMG_ORIENTATION = 0x0101 + _SW_RESET = 0x0103 + _GRP_PARAM_HOLD = 0x0104 # Sensor exposure gain control - INTEGRATION_H = 0x0202 - INTEGRATION_L = 0x0203 - ANALOG_GAIN = 0x0205 - DIGITAL_GAIN_H = 0x020E - DIGITAL_GAIN_L = 0x020F + _INTEGRATION_H = 0x0202 + _INTEGRATION_L = 0x0203 + _ANALOG_GAIN = 0x0205 + _DIGITAL_GAIN_H = 0x020E + _DIGITAL_GAIN_L = 0x020F # Frame timing control - FRAME_LEN_LINES_H = 0x0340 - FRAME_LEN_LINES_L = 0x0341 - LINE_LEN_PCK_H = 0x0342 - LINE_LEN_PCK_L = 0x0343 + _FRAME_LEN_LINES_H = 0x0340 + _FRAME_LEN_LINES_L = 0x0341 + _LINE_LEN_PCK_H = 0x0342 + _LINE_LEN_PCK_L = 0x0343 # Binning mode control - READOUT_X = 0x0383 - READOUT_Y = 0x0387 - BINNING_MODE = 0x0390 + _READOUT_X = 0x0383 + _READOUT_Y = 0x0387 + _BINNING_MODE = 0x0390 # Test pattern control - TEST_PATTERN_MODE = 0x0601 + _TEST_PATTERN_MODE = 0x0601 # Black level control - BLC_CFG = 0x1000 - BLC_TGT = 0x1003 - BLI_EN = 0x1006 - BLC2_TGT = 0x1007 + _BLC_CFG = 0x1000 + _BLC_TGT = 0x1003 + _BLI_EN = 0x1006 + _BLC2_TGT = 0x1007 # Sensor reserved - DPC_CTRL = 0x1008 - SINGLE_THR_HOT = 0x100B - SINGLE_THR_COLD = 0x100C + _DPC_CTRL = 0x1008 + _SINGLE_THR_HOT = 0x100B + _SINGLE_THR_COLD = 0x100C # VSYNC,HSYNC and pixel shift register - VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 + _VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 # Automatic exposure gain control - AE_CTRL = 0x2100 - AE_TARGET_MEAN = 0x2101 - AE_MIN_MEAN = 0x2102 - CONVERGE_IN_TH = 0x2103 - CONVERGE_OUT_TH = 0x2104 - MAX_INTG_H = 0x2105 - MAX_INTG_L = 0x2106 - MIN_INTG = 0x2107 - MAX_AGAIN_FULL = 0x2108 - MAX_AGAIN_BIN2 = 0x2109 - MIN_AGAIN = 0x210A - MAX_DGAIN = 0x210B - MIN_DGAIN = 0x210C - DAMPING_FACTOR = 0x210D - FS_CTRL = 0x210E - FS_60HZ_H = 0x210F - FS_60HZ_L = 0x2110 - FS_50HZ_H = 0x2111 - FS_50HZ_L = 0x2112 - FS_HYST_TH = 0x2113 + _AE_CTRL = 0x2100 + _AE_TARGET_MEAN = 0x2101 + _AE_MIN_MEAN = 0x2102 + _CONVERGE_IN_TH = 0x2103 + _CONVERGE_OUT_TH = 0x2104 + _MAX_INTG_H = 0x2105 + _MAX_INTG_L = 0x2106 + _MIN_INTG = 0x2107 + _MAX_AGAIN_FULL = 0x2108 + _MAX_AGAIN_BIN2 = 0x2109 + _MIN_AGAIN = 0x210A + _MAX_DGAIN = 0x210B + _MIN_DGAIN = 0x210C + _DAMPING_FACTOR = 0x210D + _FS_CTRL = 0x210E + _FS_60HZ_H = 0x210F + _FS_60HZ_L = 0x2110 + _FS_50HZ_H = 0x2111 + _FS_50HZ_L = 0x2112 + _FS_HYST_TH = 0x2113 # Motion detection control - MD_CTRL = 0x2150 - I2C_CLEAR = 0x2153 - WMEAN_DIFF_TH_H = 0x2155 - WMEAN_DIFF_TH_M = 0x2156 - WMEAN_DIFF_TH_L = 0x2157 - MD_THH = 0x2158 - MD_THM1 = 0x2159 - MD_THM2 = 0x215A - MD_THL = 0x215B - STATISTIC_CTRL = 0x2000 - MD_LROI_X_START_H = 0x2011 - MD_LROI_X_START_L = 0x2012 - MD_LROI_Y_START_H = 0x2013 - MD_LROI_Y_START_L = 0x2014 - MD_LROI_X_END_H = 0x2015 - MD_LROI_X_END_L = 0x2016 - MD_LROI_Y_END_H = 0x2017 - MD_LROI_Y_END_L = 0x2018 - MD_INTERRUPT = 0x2160 + _MD_CTRL = 0x2150 + _I2C_CLEAR = 0x2153 + _WMEAN_DIFF_TH_H = 0x2155 + _WMEAN_DIFF_TH_M = 0x2156 + _WMEAN_DIFF_TH_L = 0x2157 + _MD_THH = 0x2158 + _MD_THM1 = 0x2159 + _MD_THM2 = 0x215A + _MD_THL = 0x215B + _STATISTIC_CTRL = 0x2000 + _MD_LROI_X_START_H = 0x2011 + _MD_LROI_X_START_L = 0x2012 + _MD_LROI_Y_START_H = 0x2013 + _MD_LROI_Y_START_L = 0x2014 + _MD_LROI_X_END_H = 0x2015 + _MD_LROI_X_END_L = 0x2016 + _MD_LROI_Y_END_H = 0x2017 + _MD_LROI_Y_END_L = 0x2018 + _MD_INTERRUPT = 0x2160 # Sensor timing control - QVGA_WIN_EN = 0x3010 - SIX_BIT_MODE_EN = 0x3011 - PMU_AUTOSLEEP_FRAMECNT = 0x3020 - ADVANCE_VSYNC = 0x3022 - ADVANCE_HSYNC = 0x3023 - EARLY_GAIN = 0x3035 + _QVGA_WIN_EN = 0x3010 + _SIX_BIT_MODE_EN = 0x3011 + _PMU_AUTOSLEEP_FRAMECNT = 0x3020 + _ADVANCE_VSYNC = 0x3022 + _ADVANCE_HSYNC = 0x3023 + _EARLY_GAIN = 0x3035 # IO and clock control - BIT_CONTROL = 0x3059 - OSC_CLK_DIV = 0x3060 - ANA_Register_11 = 0x3061 - IO_DRIVE_STR = 0x3062 - IO_DRIVE_STR2 = 0x3063 - ANA_Register_14 = 0x3064 - OUTPUT_PIN_STATUS_CONTROL = 0x3065 - ANA_Register_17 = 0x3067 - PCLK_POLARITY = 0x3068 + _BIT_CONTROL = 0x3059 + _OSC_CLK_DIV = 0x3060 + _ANA_Register_11 = 0x3061 + _IO_DRIVE_STR = 0x3062 + _IO_DRIVE_STR2 = 0x3063 + _ANA_Register_14 = 0x3064 + _OUTPUT_PIN_STATUS_CONTROL = 0x3065 + _ANA_Register_17 = 0x3067 + _PCLK_POLARITY = 0x3068 # Useful values of Himax registers - HIMAX_RESET = 0x01 - HIMAX_MODE_STANDBY = 0x00 - HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable - HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames - HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger - # HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) - # HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) + _HIMAX_RESET = 0x01 + _HIMAX_MODE_STANDBY = 0x00 + _HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable + _HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames + _HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger + # _HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) + # _HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) - PCLK_RISING_EDGE = 0x00 - PCLK_FALLING_EDGE = 0x01 - AE_CTRL_ENABLE = 0x00 - AE_CTRL_DISABLE = 0x01 - - HIMAX_BOOT_RETRY = 10 - HIMAX_LINE_LEN_PCK_FULL = 0x178 - HIMAX_FRAME_LENGTH_FULL = 0x109 + _PCLK_RISING_EDGE = 0x00 + _PCLK_FALLING_EDGE = 0x01 + _AE_CTRL_ENABLE = 0x00 + _AE_CTRL_DISABLE = 0x01 - HIMAX_LINE_LEN_PCK_QVGA = 0x178 - HIMAX_FRAME_LENGTH_QVGA = 0x104 + _HIMAX_BOOT_RETRY = 10 + _HIMAX_LINE_LEN_PCK_FULL = 0x178 + _HIMAX_FRAME_LENGTH_FULL = 0x109 - HIMAX_LINE_LEN_PCK_QQVGA = 0x178 - HIMAX_FRAME_LENGTH_QQVGA = 0x084 + _HIMAX_LINE_LEN_PCK_QVGA = 0x178 + _HIMAX_FRAME_LENGTH_QVGA = 0x104 - INIT_COMMANDS = ( - (BLC_TGT, 0x08), # BLC target :8 at 8 bit mode - (BLC2_TGT, 0x08), # BLI target :8 at 8 bit mode + _HIMAX_LINE_LEN_PCK_QQVGA = 0x178 + _HIMAX_FRAME_LENGTH_QQVGA = 0x084 + + _INIT_COMMANDS = ( + (_BLC_TGT, 0x08), # BLC target :8 at 8 bit mode + (_BLC2_TGT, 0x08), # BLI target :8 at 8 bit mode (0x3044, 0x0A), # Increase CDS time for settling (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg (0x3047, 0x0A), # Increase CDS time for settling @@ -145,23 +149,23 @@ class HM01B0(DVP_Camera): (0x3059, 0x1E), (0x3064, 0x00), (0x3065, 0x04), # pad pull 0 - (ANA_Register_17, 0x00), # Disable internal oscillator + (_ANA_Register_17, 0x00), # Disable internal oscillator - (BLC_CFG, 0x43), # BLC_on, IIR + (_BLC_CFG, 0x43), # BLC_on, IIR (0x1001, 0x43), # BLC dithering en (0x1002, 0x43), # blc_darkpixel_thd (0x0350, 0x7F), # Dgain Control - (BLI_EN, 0x01), # BLI enable + (_BLI_EN, 0x01), # BLI enable (0x1003, 0x00), # BLI Target [Def: 0x20] - (DPC_CTRL, 0x01), # DPC option 0: DPC off 1 : mono 3 : bayer1 5 : bayer2 + (_DPC_CTRL, 0x01), # DPC option 0: DPC off 1 : mono 3 : bayer1 5 : bayer2 (0x1009, 0xA0), # cluster hot pixel th (0x100A, 0x60), # cluster cold pixel th - (SINGLE_THR_HOT, 0x90), # single hot pixel th - (SINGLE_THR_COLD, 0x40), # single cold pixel th + (_SINGLE_THR_HOT, 0x90), # single hot pixel th + (_SINGLE_THR_COLD, 0x40), # single cold pixel th (0x1012, 0x00), # Sync. shift disable - (STATISTIC_CTRL, 0x07), # AE stat en | MD LROI stat en | magic + (_STATISTIC_CTRL, 0x07), # AE stat en | MD LROI stat en | magic (0x2003, 0x00), (0x2004, 0x1C), (0x2007, 0x00), @@ -175,46 +179,46 @@ class HM01B0(DVP_Camera): (0x2017, 0x00), (0x2018, 0x9B), - (AE_CTRL, 0x01), #Automatic Exposure - (AE_TARGET_MEAN, 0x64), #AE target mean [Def: 0x3C] - (AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] - (CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] - (CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] - (MAX_INTG_H, (HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] - (MAX_INTG_L, (HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] - (MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] - (MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] - (MAX_DGAIN, 0xC0), + (_AE_CTRL, 0x01), #Automatic Exposure + (_AE_TARGET_MEAN, 0x64), #AE target mean [Def: 0x3C] + (_AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] + (_CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] + (_CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] + (_MAX_INTG_H, (_HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] + (_MAX_INTG_L, (_HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] + (_MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] + (_MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] + (_MAX_DGAIN, 0xC0), - (INTEGRATION_H, 0x01), #Integration H [Def: 0x01] - (INTEGRATION_L, 0x08), #Integration L [Def: 0x08] - (ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] - (DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] - (DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] - (DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] + (_INTEGRATION_H, 0x01), #Integration H [Def: 0x01] + (_INTEGRATION_L, 0x08), #Integration L [Def: 0x08] + (_ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] + (_DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] + (_DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] + (_DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] - (FS_CTRL, 0x00), #Flicker Control + (_FS_CTRL, 0x00), #Flicker Control - (FS_60HZ_H, 0x00), - (FS_60HZ_L, 0x3C), - (FS_50HZ_H, 0x00), - (FS_50HZ_L, 0x32), + (_FS_60HZ_H, 0x00), + (_FS_60HZ_L, 0x3C), + (_FS_50HZ_H, 0x00), + (_FS_50HZ_L, 0x32), - (MD_CTRL, 0x00), - (FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH_QVGA >> 8), - (FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH_QVGA & 0xFF), - (LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK_QVGA >> 8), - (LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK_QVGA & 0xFF), - (QVGA_WIN_EN, 0x01), # Enable QVGA window readout + (_MD_CTRL, 0x00), + (_FRAME_LEN_LINES_H, _HIMAX_FRAME_LENGTH_QVGA >> 8), + (_FRAME_LEN_LINES_L, _HIMAX_FRAME_LENGTH_QVGA & 0xFF), + (_LINE_LEN_PCK_H, _HIMAX_LINE_LEN_PCK_QVGA >> 8), + (_LINE_LEN_PCK_L, _HIMAX_LINE_LEN_PCK_QVGA & 0xFF), + (_QVGA_WIN_EN, 0x01), # Enable QVGA window readout (0x0383, 0x01), (0x0387, 0x01), (0x0390, 0x00), (0x3011, 0x70), (0x3059, 0x22), # 1-bit mode - (OSC_CLK_DIV, 0x14), - (IMG_ORIENTATION, 0x00), # change the orientation + (_OSC_CLK_DIV, 0x14), + (_IMG_ORIENTATION, 0x00), # change the orientation (0x0104, 0x01), - (MODE_SELECT, 0x01), # Streaming mode + (_MODE_SELECT, 0x01), # Streaming mode ) def __init__( @@ -223,16 +227,34 @@ def __init__( i2c_address = 0x24, num_data_pins = 1 ): + """ + Initializes the HM01B0 camera with default settings. + + Args: + i2c (I2C): I2C object for communication + i2c_address (int, optional): I2C address (default: 0x24) + num_data_pins (int, optional): Number of data pins + - 1 (Default) + - 4 + - 8 + """ super().__init__(i2c, i2c_address) - self.soft_reset() - self.send_init(num_data_pins) + self._soft_reset() + self._send_init(num_data_pins) - def is_connected(self): + def _is_connected(self): + """ + Checks if the camera is connected by reading the chip ID. + + Returns: + bool: True if the camera is connected and the chip ID is correct, + otherwise False. + """ try: # Try to read the chip ID # If it throws an I/O error - the device isn't connected - id = self.getChipID() + id = self._get_chip_id() # Confirm the chip ID is correct if id == 0x01B0: @@ -242,49 +264,50 @@ def is_connected(self): except: return False - def getChipID(self): + def _get_chip_id(self): """ - Reads the chip ID from the HM01B0 sensor. + Reads the chip ID. + Returns: - int: The chip ID as a 16-bit integer. + int: The chip ID of the HM01B0 (should be 0x01B0). """ - data = self.readRegister(self.MODEL_ID_H, 2) + data = self._read_register(self._MODEL_ID_H, 2) return (data[0] << 8) | data[1] - def soft_reset(self): + def _soft_reset(self): """ Performs a software reset of the HM01B0 sensor. This resets the sensor to its default state. """ # HM01B0 can require multiple attempts to reset properly - for i in range(self.HIMAX_BOOT_RETRY): - self.writeRegister(self.SW_RESET, self.HIMAX_RESET) + for i in range(self._HIMAX_BOOT_RETRY): + self._write_register(self._SW_RESET, self._HIMAX_RESET) sleep_us(1000) - mode = self.readRegister(self.MODE_SELECT) - if mode[0] == self.HIMAX_MODE_STANDBY: + mode = self._read_register(self._MODE_SELECT) + if mode[0] == self._HIMAX_MODE_STANDBY: break sleep_us(10000) - def setMode(self, mode): + def _set_mode(self, mode): """ Sets the operating mode of the HM01B0 sensor. Args: mode (int): The mode to set, e.g., MODE_STREAMING. """ - self.writeRegister(self.MODE_SELECT, mode) + self._write_register(self._MODE_SELECT, mode) - def trigger(self): - self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) + def _trigger(self): + self._write_register(self._MODE_SELECT, self._HIMAX_MODE_STREAMING_NFRAMES) - def set_n_frames(self, n_frames): - self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) + def _set_n_frames(self, n_frames): + self._write_register(self._PMU_AUTOSLEEP_FRAMECNT, n_frames) - def send_init(self, num_data_pins): + def _send_init(self, num_data_pins): """ Initializes the HM01B0 sensor with default settings. This includes setting up exposure, gain, and frame timing. """ - for reg, value in self.INIT_COMMANDS: + for reg, value in self._INIT_COMMANDS: if reg == 0x3059: # Set the data pin mode based on the number of data pins if num_data_pins == 1: @@ -293,13 +316,19 @@ def send_init(self, num_data_pins): value = 0x42 else: value = 0x02 - self.writeRegister(reg, value) + self._write_register(reg, value) sleep_us(1000) - - def read(self, image = None): + + def read(self, image=None): """ - Reads a frame from the camera. + Reads an image from the camera. + + Args: + image (ndarray, optional): Image to read into + Returns: - tuple: (success, frame) + tuple: (success, image) + - success (bool): True if the image was read, otherwise False + - image (ndarray): The captured image, or None if reading failed """ - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BayerRG2BGR, image)) + return (True, cv2.cvtColor(self._buffer, cv2.COLOR_BayerRG2BGR, image)) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 088e220..8ed6b01 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -3,21 +3,43 @@ from ulab import numpy as np class HM01B0_PIO(HM01B0, DVP_RP2_PIO): + """ + OpenCV HM01B0 camera driver using a PIO interface. Only available on + Raspberry Pi RP2 processors. + """ def __init__( self, i2c, + sm_id, pin_d0, pin_vsync, pin_hsync, pin_pclk, - sm_id, pin_xclk = None, xclk_freq = 25_000_000, num_data_pins = 1, i2c_address = 0x24, ): + """ + Initializes the HM01B0 PIO camera driver. + + Args: + i2c (I2C): I2C object for communication + sm_id (int): PIO state machine ID + pin_d0 (int): Data 0 pin number for DVP interface + pin_vsync (int): Vertical sync pin number + pin_hsync (int): Horizontal sync pin number + pin_pclk (int): Pixel clock pin number + pin_xclk (int, optional): External clock pin number + xclk_freq (int, optional): Frequency in Hz for the external clock + Default is 25 MHz + num_data_pins (int, optional): Number of data pins used in DVP interface + Default is 1 + i2c_address (int, optional): I2C address of the camera + Default is 0x24 + """ # Create the frame buffer - self.buffer = np.zeros((244, 324), dtype=np.uint8) + self._buffer = np.zeros((244, 324), dtype=np.uint8) # Call both parent constructors DVP_RP2_PIO.__init__( @@ -30,7 +52,7 @@ def __init__( xclk_freq, sm_id, num_data_pins, - bytes_per_frame = self.buffer.size, + bytes_per_frame = self._buffer.size, byte_swap = True ) HM01B0.__init__( @@ -41,7 +63,13 @@ def __init__( ) def open(self): - self.active(True) + """ + Opens the camera and prepares it for capturing images. + """ + self._active(True) def release(self): - self.active(False) + """ + Releases the camera and frees any resources. + """ + self._active(False) diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py index 140ddda..3a26132 100644 --- a/cv2_drivers/cameras/ov5640.py +++ b/cv2_drivers/cameras/ov5640.py @@ -1,16 +1,19 @@ from .dvp_camera import DVP_Camera from time import sleep_us import cv2 -# from micropython import const -# Derived from: +# This class is derived from: # https://github.com/adafruit/Adafruit_CircuitPython_OV5640 +# Released under the MIT license. +# Copyright (c) 2021 Jeff Epler for Adafruit Industries class OV5640(DVP_Camera): - - OV5640_COLOR_RGB = 0 - OV5640_COLOR_YUV = 1 - OV5640_COLOR_GRAYSCALE = 2 - OV5640_COLOR_JPEG = 3 + """ + Base class for OpenCV OV5640 camera drivers. + """ + _OV5640_COLOR_RGB = 0 + _OV5640_COLOR_YUV = 1 + _OV5640_COLOR_GRAYSCALE = 2 + _OV5640_COLOR_JPEG = 3 # fmt: off @@ -301,24 +304,24 @@ class OV5640(DVP_Camera): _TIMING_TC_REG21_HMIRROR = 0x06 # Horizontal mirror enable - OV5640_SIZE_96X96 = 0 # 96x96 - OV5640_SIZE_QQVGA = 1 # 160x120 - OV5640_SIZE_QCIF = 2 # 176x144 - OV5640_SIZE_HQVGA = 3 # 240x176 - OV5640_SIZE_240X240 = 4 # 240x240 - OV5640_SIZE_QVGA = 5 # 320x240 - OV5640_SIZE_CIF = 6 # 400x296 - OV5640_SIZE_HVGA = 7 # 480x320 - OV5640_SIZE_VGA = 8 # 640x480 - OV5640_SIZE_SVGA = 9 # 800x600 - OV5640_SIZE_XGA = 10 # 1024x768 - OV5640_SIZE_HD = 11 # 1280x720 - OV5640_SIZE_SXGA = 12 # 1280x1024 - OV5640_SIZE_UXGA = 13 # 1600x1200 - OV5640_SIZE_QHDA = 14 # 2560x1440 - OV5640_SIZE_WQXGA = 15 # 2560x1600 - OV5640_SIZE_PFHD = 16 # 1088x1920 - OV5640_SIZE_QSXGA = 17 # 2560x1920 + _OV5640_SIZE_96X96 = 0 # 96x96 + _OV5640_SIZE_QQVGA = 1 # 160x120 + _OV5640_SIZE_QCIF = 2 # 176x144 + _OV5640_SIZE_HQVGA = 3 # 240x176 + _OV5640_SIZE_240X240 = 4 # 240x240 + _OV5640_SIZE_QVGA = 5 # 320x240 + _OV5640_SIZE_CIF = 6 # 400x296 + _OV5640_SIZE_HVGA = 7 # 480x320 + _OV5640_SIZE_VGA = 8 # 640x480 + _OV5640_SIZE_SVGA = 9 # 800x600 + _OV5640_SIZE_XGA = 10 # 1024x768 + _OV5640_SIZE_HD = 11 # 1280x720 + _OV5640_SIZE_SXGA = 12 # 1280x1024 + _OV5640_SIZE_UXGA = 13 # 1600x1200 + _OV5640_SIZE_QHDA = 14 # 2560x1440 + _OV5640_SIZE_WQXGA = 15 # 2560x1600 + _OV5640_SIZE_PFHD = 16 # 1088x1920 + _OV5640_SIZE_QSXGA = 17 # 2560x1920 _ASPECT_RATIO_4X3 = 0 _ASPECT_RATIO_3X2 = 1 @@ -725,10 +728,10 @@ class OV5640(DVP_Camera): ] _ov5640_color_settings = { - OV5640_COLOR_RGB: _sensor_format_rgb565, - OV5640_COLOR_YUV: _sensor_format_yuv422, - OV5640_COLOR_GRAYSCALE: _sensor_format_grayscale, - OV5640_COLOR_JPEG: _sensor_format_jpeg, + _OV5640_COLOR_RGB: _sensor_format_rgb565, + _OV5640_COLOR_YUV: _sensor_format_yuv422, + _OV5640_COLOR_GRAYSCALE: _sensor_format_grayscale, + _OV5640_COLOR_JPEG: _sensor_format_jpeg, } _contrast_settings = [ @@ -763,11 +766,11 @@ class OV5640(DVP_Camera): [0x30, 0x28, 0x61, 0x30, 0x28, 0x10], # -1 ] - OV5640_WHITE_BALANCE_AUTO = 0 - OV5640_WHITE_BALANCE_SUNNY = 1 - OV5640_WHITE_BALANCE_FLUORESCENT = 2 - OV5640_WHITE_BALANCE_CLOUDY = 3 - OV5640_WHITE_BALANCE_INCANDESCENT = 4 + _OV5640_WHITE_BALANCE_AUTO = 0 + _OV5640_WHITE_BALANCE_SUNNY = 1 + _OV5640_WHITE_BALANCE_FLUORESCENT = 2 + _OV5640_WHITE_BALANCE_CLOUDY = 3 + _OV5640_WHITE_BALANCE_INCANDESCENT = 4 _light_registers = [0x3406, 0x3400, 0x3401, 0x3402, 0x3403, 0x3404, 0x3405] _light_modes = [ @@ -779,13 +782,13 @@ class OV5640(DVP_Camera): ] - OV5640_SPECIAL_EFFECT_NONE = 0 - OV5640_SPECIAL_EFFECT_NEGATIVE = 1 - OV5640_SPECIAL_EFFECT_GRAYSCALE = 2 - OV5640_SPECIAL_EFFECT_RED_TINT = 3 - OV5640_SPECIAL_EFFECT_GREEN_TINT = 4 - OV5640_SPECIAL_EFFECT_BLUE_TINT = 5 - OV5640_SPECIAL_EFFECT_SEPIA = 6 + _OV5640_SPECIAL_EFFECT_NONE = 0 + _OV5640_SPECIAL_EFFECT_NEGATIVE = 1 + _OV5640_SPECIAL_EFFECT_GRAYSCALE = 2 + _OV5640_SPECIAL_EFFECT_RED_TINT = 3 + _OV5640_SPECIAL_EFFECT_GREEN_TINT = 4 + _OV5640_SPECIAL_EFFECT_BLUE_TINT = 5 + _OV5640_SPECIAL_EFFECT_SEPIA = 6 _sensor_special_effects = [ [0x06, 0x40, 0x10, 0x08], # Normal @@ -817,7 +820,7 @@ class OV5640(DVP_Camera): 0x5490, 0x1D, ] - sensor_regs_gamma1 = [ + _sensor_regs_gamma1 = [ 0x5480, 0x1, 0x5481, 0x0, 0x5482, 0x1E, @@ -837,7 +840,7 @@ class OV5640(DVP_Camera): 0x5490, 0x1D, ] - sensor_regs_awb0 = [ + _sensor_regs_awb0 = [ 0x5180, 0xFF, 0x5181, 0xF2, 0x5182, 0x00, @@ -877,16 +880,23 @@ def __init__( i2c, i2c_address = 0x3C ): + """ + Initializes the OV5640 camera sensor with default settings. + + Args: + i2c (I2C): I2C object for communication + i2c_address (int, optional): I2C address (default: 0x3C) + """ super().__init__(i2c, i2c_address) - self.write_list(self._sensor_default_regs) + self._write_list(self._sensor_default_regs) - self._colorspace = self.OV5640_COLOR_RGB + self._colorspace = self._OV5640_COLOR_RGB self._flip_x = False self._flip_y = False self._w = None self._h = None - self._size = self.OV5640_SIZE_QVGA + self._size = self._OV5640_SIZE_QVGA self._test_pattern = False self._binning = False self._scale = False @@ -895,11 +905,18 @@ def __init__( self._set_size_and_colorspace() - def is_connected(self): + def _is_connected(self): + """ + Checks if the camera is connected by reading the chip ID. + + Returns: + bool: True if the camera is connected and the chip ID is correct, + otherwise False. + """ try: # Try to read the chip ID # If it throws an I/O error - the device isn't connected - id = self.getChipID() + id = self._get_chip_id() # Confirm the chip ID is correct if id == 0x5640: @@ -909,39 +926,26 @@ def is_connected(self): except: return False - def getChipID(self): + def _get_chip_id(self): """ - Reads the chip ID from the HM01B0 sensor. + Reads the chip ID. + Returns: - int: The chip ID as a 16-bit integer. + int: The chip ID of the OV5640 (should be 0x5640). """ - data = self.readRegister(self._CHIP_ID_HIGH, 2) + data = self._read_register(self._CHIP_ID_HIGH, 2) return (data[0] << 8) | data[1] - def soft_reset(self): + def _soft_reset(self): """ - Performs a software reset of the HM01B0 sensor. + Performs a software reset of the OV5640 sensor. This resets the sensor to its default state. """ - self.writeRegister(self._SYSTEM_CTROL0, 0x82) - - # def setMode(self, mode): - # """ - # Sets the operating mode of the HM01B0 sensor. - # Args: - # mode (int): The mode to set, e.g., MODE_STREAMING. - # """ - # self.writeRegister(self.MODE_SELECT, mode) + self._write_register(self._SYSTEM_CTROL0, 0x82) - # def trigger(self): - # self.writeRegister(self.MODE_SELECT, self.HIMAX_MODE_STREAMING_NFRAMES) - - # def set_n_frames(self, n_frames): - # self.writeRegister(self.PMU_AUTOSLEEP_FRAMECNT, n_frames) - - def write_list(self, data): + def _write_list(self, data): """ - Initializes the HM01B0 sensor with default settings. + Initializes the OV5640 sensor with default settings. This includes setting up exposure, gain, and frame timing. """ for i in range(len(data) // 2): @@ -950,10 +954,13 @@ def write_list(self, data): if reg == self._REG_DLY: sleep_us(value) else: - self.writeRegister(reg, value) + self._write_register(reg, value) sleep_us(1000) def _set_size_and_colorspace(self) -> None: + """ + Sets the camera resolution and colorspace based on the current size. + """ size = self._size width, height, ratio = self._resolution_info[size] self._w = width @@ -995,11 +1002,11 @@ def _set_size_and_colorspace(self) -> None: self._set_image_options() - if self._colorspace == self.OV5640_COLOR_JPEG: + if self._colorspace == self._OV5640_COLOR_JPEG: sys_mul = 200 - if size < self.OV5640_SIZE_QVGA: + if size < self._OV5640_SIZE_QVGA: sys_mul = 160 - if size < self.OV5640_SIZE_XGA: + if size < self._OV5640_SIZE_XGA: sys_mul = 180 self._set_pll(False, sys_mul, 4, 2, False, 2, True, 4) else: @@ -1018,6 +1025,9 @@ def _set_pll( pclk_manual: bool, pclk_div: int, ) -> None: + """ + Sets the PLL (Phase-Locked Loop) configuration for the OV5640 camera. + """ if ( multiplier > 252 or multiplier < 4 @@ -1028,25 +1038,32 @@ def _set_pll( ): raise ValueError("Invalid argument to internal function") - self.writeRegister(0x3039, 0x80 if bypass else 0) - self.writeRegister(0x3034, 0x1A) - self.writeRegister(0x3035, 1 | ((sys_div & 0xF) << 4)) - self.writeRegister(0x3036, multiplier & 0xFF) - self.writeRegister(0x3037, (pre_div & 0xF) | (0x10 if root_2x else 0)) - self.writeRegister(0x3108, (pclk_root_div & 3) << 4 | 0x06) - self.writeRegister(0x3824, pclk_div & 0x1F) - self.writeRegister(0x460C, 0x22 if pclk_manual else 0x22) - self.writeRegister(0x3103, 0x13) + self._write_register(0x3039, 0x80 if bypass else 0) + self._write_register(0x3034, 0x1A) + self._write_register(0x3035, 1 | ((sys_div & 0xF) << 4)) + self._write_register(0x3036, multiplier & 0xFF) + self._write_register(0x3037, (pre_div & 0xF) | (0x10 if root_2x else 0)) + self._write_register(0x3108, (pclk_root_div & 3) << 4 | 0x06) + self._write_register(0x3824, pclk_div & 0x1F) + self._write_register(0x460C, 0x22 if pclk_manual else 0x22) + self._write_register(0x3103, 0x13) def _set_colorspace(self) -> None: + """ + Sets the colorspace of the OV5640 camera based on the current colorspace + setting. + """ colorspace = self._colorspace settings = self._ov5640_color_settings[colorspace] - self.write_list(settings) + self._write_list(settings) def _set_image_options(self) -> None: + """ + Sets the image options such as binning, flipping, and colorspace. + """ reg20 = reg21 = reg4514 = reg4514_test = 0 - if self._colorspace == self.OV5640_COLOR_JPEG: + if self._colorspace == self._OV5640_COLOR_JPEG: reg21 |= 0x20 if self._binning: @@ -1081,21 +1098,24 @@ def _set_image_options(self) -> None: elif reg4514_test == 7: reg4514 = 0xAA - self.writeRegister(self._TIMING_TC_REG20, reg20) - self.writeRegister(self._TIMING_TC_REG21, reg21) - self.writeRegister(0x4514, reg4514) + self._write_register(self._TIMING_TC_REG20, reg20) + self._write_register(self._TIMING_TC_REG21, reg21) + self._write_register(0x4514, reg4514) if self._binning: - self.writeRegister(0x4520, 0x0B) - self.writeRegister(self._X_INCREMENT, 0x31) - self.writeRegister(self._Y_INCREMENT, 0x31) + self._write_register(0x4520, 0x0B) + self._write_register(self._X_INCREMENT, 0x31) + self._write_register(self._Y_INCREMENT, 0x31) else: - self.writeRegister(0x4520, 0x10) - self.writeRegister(self._X_INCREMENT, 0x11) - self.writeRegister(self._Y_INCREMENT, 0x11) + self._write_register(0x4520, 0x10) + self._write_register(self._X_INCREMENT, 0x11) + self._write_register(self._Y_INCREMENT, 0x11) def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: - self.writeRegister(reg, [ + """ + Writes 2 16-bit values to 4 8-bit registers. + """ + self._write_register(reg, [ (x_value >> 8) & 0xFF, x_value & 0xFF, (y_value >> 8) & 0xFF, @@ -1103,24 +1123,33 @@ def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: ]) def _write_reg_bits(self, reg: int, mask: int, enable: bool) -> None: - val = self.readRegister(reg)[0] + """ + Writes a bitmask to a register, enabling or disabling specific bits. + """ + val = self._read_register(reg)[0] if enable: val |= mask else: val &= ~mask - self.writeRegister(reg, val) + self._write_register(reg, val) def read(self, image = None): """ - Reads a frame from the camera. + Reads an image from the camera. + + Args: + image (ndarray, optional): Image to read into + Returns: - tuple: (success, frame) + tuple: (success, image) + - success (bool): True if the image was read, otherwise False + - image (ndarray): The captured image, or None if reading failed """ - if self._colorspace == self.OV5640_COLOR_RGB: - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_BGR5652BGR, image)) - elif self._colorspace == self.OV5640_COLOR_GRAYSCALE: - return (True, cv2.cvtColor(self.buffer, cv2.COLOR_GRAY2BGR, image)) + if self._colorspace == self._OV5640_COLOR_RGB: + return (True, cv2.cvtColor(self._buffer, cv2.COLOR_BGR5652BGR, image)) + elif self._colorspace == self._OV5640_COLOR_GRAYSCALE: + return (True, cv2.cvtColor(self._buffer, cv2.COLOR_GRAY2BGR, image)) else: NotImplementedError( - f"OV5640:Reading images in colorspace {self._colorspace} is not yet implemented." + f"OV5640: Reading images in colorspace {self._colorspace} is not yet implemented." ) diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py index 5e3fa40..1b7bd88 100644 --- a/cv2_drivers/cameras/ov5640_pio.py +++ b/cv2_drivers/cameras/ov5640_pio.py @@ -3,20 +3,40 @@ from ulab import numpy as np class OV5640_PIO(OV5640, DVP_RP2_PIO): + """ + OpenCV OV5640 camera driver using a PIO interface. Only available on + Raspberry Pi RP2 processors. + """ def __init__( self, i2c, + sm_id, pin_d0, pin_vsync, pin_hsync, pin_pclk, - sm_id, pin_xclk = None, xclk_freq = 5_000_000, i2c_address = 0x3c ): + """ + Initializes the OV5640 PIO camera driver. + + Args: + i2c (I2C): I2C object for communication + sm_id (int): PIO state machine ID + pin_d0 (int): Data 0 pin number for DVP interface + pin_vsync (int): Vertical sync pin number + pin_hsync (int): Horizontal sync pin number + pin_pclk (int): Pixel clock pin number + pin_xclk (int, optional): External clock pin number + xclk_freq (int, optional): Frequency in Hz for the external clock + Default is 5 MHz + i2c_address (int, optional): I2C address of the camera + Default is 0x3c + """ # Create the frame buffer - self.buffer = np.zeros((240, 320, 2), dtype=np.uint8) + self._buffer = np.zeros((240, 320, 2), dtype=np.uint8) # Call both parent constructors DVP_RP2_PIO.__init__( @@ -29,7 +49,7 @@ def __init__( xclk_freq, sm_id, num_data_pins = 8, - bytes_per_frame = self.buffer.size, + bytes_per_frame = self._buffer.size, byte_swap = False ) OV5640.__init__( @@ -39,7 +59,13 @@ def __init__( ) def open(self): - self.active(True) + """ + Opens the camera and prepares it for capturing images. + """ + self._active(True) def release(self): - self.active(False) + """ + Releases the camera and frees any resources. + """ + self._active(False) diff --git a/cv2_drivers/displays/__init__.py b/cv2_drivers/displays/__init__.py index 8028cdc..dbbadf7 100644 --- a/cv2_drivers/displays/__init__.py +++ b/cv2_drivers/displays/__init__.py @@ -1,2 +1,9 @@ +# Import platform agnostic drivers from . import st7789_spi -from . import st7789_pio \ No newline at end of file + +# Import sys module to check platform +import sys + +# Import RP2 drivers +if 'rp2' in sys.platform: + from . import st7789_pio diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index 34f0029..745d0a8 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -3,13 +3,37 @@ from machine import Pin class CV2_Display(): - def __init__(self, buffer_size): + """ + Base class for OpenCV display drivers. + """ + def __init__(self, buffer_shape): + """ + Initializes the display. + + Args: + buffer_shape (tuple): Shape of the buffer as (rows, cols, channels) + """ # Create the frame buffer - self.buffer = np.zeros(buffer_size, dtype=np.uint8) + self._buffer = np.zeros(buffer_shape, dtype=np.uint8) + + def imshow(self, image): + """ + Shows a NumPy image on the display. + + Args: + image (ndarray): Image to show + """ + raise NotImplementedError("imshow() must be implemented by driver") + + def clear(self): + """ + Clears the display by filling it with black color. + """ + raise NotImplementedError("clear() must be implemented by driver") def _get_common_roi_with_buffer(self, image): """ - Get the common region of interest (ROI) between the image and the + Gets the common region of interest (ROI) between the image and the display's internal buffer. Args: @@ -17,6 +41,8 @@ def _get_common_roi_with_buffer(self, image): Returns: tuple: (image_roi, buffer_roi) + - image_roi (ndarray): ROI of the image + - buffer_roi (ndarray): ROI of the display's buffer """ # Ensure image is a NumPy ndarray if type(image) is not np.ndarray: @@ -30,15 +56,15 @@ def _get_common_roi_with_buffer(self, image): image_cols = image.shape[1] # Get the common ROI between the image and the buffer - row_max = min(image_rows, self.height) - col_max = min(image_cols, self.width) + row_max = min(image_rows, self._buffer.shape[0]) + col_max = min(image_cols, self._buffer.shape[1]) img_roi = image[:row_max, :col_max] - buffer_roi = self.buffer[:row_max, :col_max] + buffer_roi = self._buffer[:row_max, :col_max] return img_roi, buffer_roi - def _convert_image_to_uint8(self, image): + def _convert_to_uint8(self, image): """ - Convert the image to uint8 format if necessary. + Converts the image to uint8 format if necessary. Args: image (ndarray): Image to convert @@ -65,37 +91,39 @@ def _convert_image_to_uint8(self, image): else: raise ValueError(f"Unsupported image dtype: {image.dtype}") - def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): + def _convert_to_bgr565(self, src, dst): """ - Convert the image ROI to BGR565 format and write it to the buffer ROI. + Converts an image to BGR565 format. Args: - image_roi (ndarray): Image region of interest - buffer_roi (ndarray): Buffer region of interest + src (ndarray): Input image + dst (ndarray): Output BGR565 buffer """ # Determine the number of channels in the image - if image_roi.ndim < 3: + if src.ndim < 3: ch = 1 else: - ch = image_roi.shape[2] + ch = src.shape[2] + # Convert the image to BGR565 format based on the number of channels if ch == 1: # Grayscale - buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) + dst = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR565, dst) elif ch == 2: # Already in BGR565 format # For some reason, this is relatively slow and creates a new buffer: # https://github.com/v923z/micropython-ulab/issues/726 - buffer_roi[:] = image_roi + dst[:] = src elif ch == 3: # BGR - buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) + dst = cv2.cvtColor(src, cv2.COLOR_BGR2BGR565, dst) else: raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") - def savePinModeAlt(self, pin): + def _save_pin_mode_alt(self, pin): """ Saves the current `mode` and `alt` of the pin so it can be restored - later. Mostly used to restore the SPI mode (MISO) of the DC pin after - communication with the display in case another device is using the same - SPI bus. + later. Mostly used for SPI displays on a shared SPI bus with a driver + that needs non-SPI pin modes, such as the RP2 PIO driver. This allows + other devices on the bus to continue using the SPI interface after the + display driver finishes communicating with the display. Returns: tuple: (mode, alt) @@ -139,8 +167,17 @@ def savePinModeAlt(self, pin): return (mode, alt) def splash(self, filename="splash.png"): + """ + Shows a splash image on the display if one is available, otherwise + clears the display of any previous content. + + Args: + filename (str, optional): Path to a splash image file. Defaults to + "splash.png" + """ try: + # Attempt to load and show the splash image self.imshow(cv2.imread(filename)) - return True except Exception: - return False + # Couldn't load the image, just clear the display as a fallback + self.clear() diff --git a/cv2_drivers/displays/st7789.py b/cv2_drivers/displays/st7789.py index ca3a01f..99a531f 100644 --- a/cv2_drivers/displays/st7789.py +++ b/cv2_drivers/displays/st7789.py @@ -2,27 +2,17 @@ from time import sleep_ms import struct -# Derived from: +# This class is derived from: # https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py +# Released under the MIT license. +# Copyright (c) 2024 Owen Carter +# Copyright (c) 2024 Ethan Lacasse +# Copyright (c) 2020-2023 Russ Hughes +# Copyright (c) 2019 Ivan Belokobylskiy class ST7789(CV2_Display): """ - OpenCV driver for ST7789 displays - - Args: - width (int): display width **Required** - height (int): display height **Required** - rotation (int): Orientation of display - - 0-Portrait, default - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - color_order (int): - - RGB: Red, Green Blue, default - - BGR: Blue, Green, Red - reverse_bytes_in_word (bool): - - Enable if the display uses LSB byte order for color words + Base class for OpenCV ST7789 display drivers. """ - # ST7789 commands _ST7789_SWRESET = b"\x01" _ST7789_SLPIN = b"\x10" @@ -50,19 +40,6 @@ class ST7789(CV2_Display): _ST7789_MADCTL_MH = 0x04 _ST7789_MADCTL_RGB = 0x00 - RGB = 0x00 - BGR = 0x08 - - # 8 basic color definitions - BLACK = 0x0000 - BLUE = 0x001F - RED = 0xF800 - GREEN = 0x07E0 - CYAN = 0x07FF - MAGENTA = 0xF81F - YELLOW = 0xFFE0 - WHITE = 0xFFFF - _ENCODE_POS = ">HH" # Rotation tables @@ -134,123 +111,151 @@ def __init__( width, height, rotation=0, - color_order=BGR, + bgr_order=True, reverse_bytes_in_word=True, ): + """ + Initializes the ST7789 display driver. + + Args: + width (int): Display width in pixels + height (int): Display height in pixels + rotation (int, optional): Orientation of display + - 0: Portrait (default) + - 1: Landscape + - 2: Inverted portrait + - 3: Inverted landscape + bgr_order (bool, optional): Color order + - True: BGR (default) + - False: RGB + reverse_bytes_in_word (bool, optional): + - Enable if the display uses LSB byte order for color words + """ # Initial dimensions and offsets; will be overridden when rotation applied - self.width = width - self.height = height - self.xstart = 0 - self.ystart = 0 + self._width = width + self._height = height + self._xstart = 0 + self._ystart = 0 # Check display is known and get rotation table - self.rotations = self._find_rotations(width, height) - if not self.rotations: + self._rotations = self._find_rotations(width, height) + if not self._rotations: supported_displays = ", ".join( [f"{display[0]}x{display[1]}" for display in self._SUPPORTED_DISPLAYS]) raise ValueError( f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") # Colors - self.color_order = color_order - self.needs_swap = reverse_bytes_in_word + self._bgr_order = bgr_order + self._needs_swap = reverse_bytes_in_word # Reset the display - self.soft_reset() + self._soft_reset() # Yes, send init twice, once is not always enough - self.send_init(self._ST7789_INIT_CMDS) - self.send_init(self._ST7789_INIT_CMDS) + self._send_init(self._ST7789_INIT_CMDS) + self._send_init(self._ST7789_INIT_CMDS) # Initial rotation self._rotation = rotation % 4 # Apply rotation - self.rotation(self._rotation) + self._set_rotation(self._rotation) # Create the framebuffer for the correct rotation - super().__init__((self.height, self.width, 2)) + super().__init__((self._height, self._width, 2)) - def send_init(self, commands): + def _send_init(self, commands): """ - Send initialisation commands to display. + Sends initialization commands to display. + + Args: + commands (list): List of tuples (command, data, delay_ms) """ - for command, data, delay in commands: + for command, data, delay_ms in commands: self._write(command, data) - sleep_ms(delay) + sleep_ms(delay_ms) - def soft_reset(self): + def _soft_reset(self): """ - Soft reset display. + Sends a software reset command to the display. """ self._write(self._ST7789_SWRESET) sleep_ms(150) def _find_rotations(self, width, height): - """ Find the correct rotation for our display or return None """ + """ + Find the correct rotation for our display or returns None. + + Args: + width (int): Display width in pixels + height (int): Display height in pixels + Returns: + list: Rotation table for the display or None if not found + """ for display in self._SUPPORTED_DISPLAYS: if display[0] == width and display[1] == height: return display[2] return None - def rotation(self, rotation): + def _set_rotation(self, rotation): """ - Set display rotation. + Sets display rotation. Args: rotation (int): - - 0-Portrait - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape + - 0: Portrait + - 1: Landscape + - 2: Inverted portrait + - 3: Inverted landscape """ - if ((rotation % 2) != (self._rotation % 2)) and (self.width != self.height): + if ((rotation % 2) != (self._rotation % 2)) and (self._width != self._height): # non-square displays can currently only be rotated by 180 degrees # TODO: can framebuffer of super class be destroyed and re-created # to match the new dimensions? or it's width/height changed? return # find rotation parameters and send command - rotation %= len(self.rotations) + rotation %= len(self._rotations) ( madctl, - self.width, - self.height, - self.xstart, - self.ystart, ) = self.rotations[rotation] - if self.color_order == self.BGR: + self._width, + self._height, + self._xstart, + self._ystart, ) = self._rotations[rotation] + if self._bgr_order: madctl |= self._ST7789_MADCTL_BGR else: madctl &= ~self._ST7789_MADCTL_BGR self._write(self._ST7789_MADCTL, bytes([madctl])) # Set window for writing into self._write(self._ST7789_CASET, - struct.pack(self._ENCODE_POS, self.xstart, self.width + self.xstart - 1)) + struct.pack(self._ENCODE_POS, self._xstart, self._width + self._xstart - 1)) self._write(self._ST7789_RASET, - struct.pack(self._ENCODE_POS, self.ystart, self.height + self.ystart - 1)) + struct.pack(self._ENCODE_POS, self._ystart, self._height + self._ystart - 1)) self._write(self._ST7789_RAMWR) # TODO: Can we swap (modify) framebuffer width/height in the super() class? self._rotation = rotation def imshow(self, image): """ - Display a NumPy image on the screen. + Shows a NumPy image on the display. Args: - image (ndarray): Image to display + image (ndarray): Image to show """ # Get the common ROI between the image and internal display buffer image_roi, buffer_roi = self._get_common_roi_with_buffer(image) # Ensure the image is in uint8 format - image_roi = self._convert_image_to_uint8(image_roi) + image_roi = self._convert_to_uint8(image_roi) # Convert the image to BGR565 format and write it to the buffer - self._write_image_to_buffer_bgr565(image_roi, buffer_roi) + self._convert_to_bgr565(image_roi, buffer_roi) # Write buffer to display. Swap bytes if needed - if self.needs_swap: - self._write(None, self.buffer[:, :, ::-1]) + if self._needs_swap: + self._write(None, self._buffer[:, :, ::-1]) else: - self._write(None, self.buffer) + self._write(None, self._buffer) def clear(self): """ - Clear the display by filling it with black color. + Clears the display by filling it with black color. """ # Clear the buffer by filling it with zeros (black) - self.buffer[:] = 0 + self._buffer[:] = 0 # Write the buffer to the display - self._write(None, self.buffer) + self._write(None, self._buffer) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 34dba5d..4e439ff 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -1,33 +1,13 @@ from .st7789 import ST7789 from machine import Pin import rp2 -# import time # Derived from: # https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd class ST7789_PIO(ST7789): """ - OpenCV PIO driver for ST7789 displays - - Args: - width (int): display width **Required** - height (int): display height **Required** - sm_id (int): State Machine ID for PIO **Required** - pin_clk (Pin): Clock pin number **Required** - pin_tx (Pin): Transmit pin number **Required** - pin_dc (Pin): Data/Command pin number **Required** - pin_cs (Pin): Chip Select pin number - freq (int): State machine frequency in Hz, default -1 (system clock) - rotation (int): Orientation of display - - 0-Portrait, default - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - color_order (int): - - RGB: Red, Green Blue, default - - BGR: Blue, Green, Red - reverse_bytes_in_word (bool): - - Enable if the display uses LSB byte order for color words + OpenCV ST7789 display driver using a PIO interface. Only available on + Raspberry Pi RP2 processors. """ def __init__( self, @@ -40,42 +20,72 @@ def __init__( pin_cs=None, freq=-1, rotation=0, - color_order=ST7789.BGR, + bgr_order=True, reverse_bytes_in_word=True, ): + """ + Initializes the ST7789 PIO display driver. + + Args: + width (int): Display width in pixels + height (int): Display height in pixels + sm_id (int): PIO state machine ID + pin_clk (int): Clock pin number + pin_tx (int): Data pin number + pin_dc (int): Data/Command pin number + pin_cs (int, optional): Chip Select pin number + freq (int, optional): Frequency in Hz for the PIO state machine + Default is -1, which uses the default frequency of 125MHz + rotation (int, optional): Orientation of display + - 0: Portrait (default) + - 1: Landscape + - 2: Inverted portrait + - 3: Inverted landscape + bgr_order (bool, optional): Color order + - True: BGR (default) + - False: RGB + reverse_bytes_in_word (bool, optional): + - Enable if the display uses LSB byte order for color words + """ # Store PIO arguments - self.sm_id = sm_id - self.clk = Pin(pin_clk) # Don't change mode/alt - self.tx = Pin(pin_tx) # Don't change mode/alt - self.dc = Pin(pin_dc) # Don't change mode/alt - self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - self.freq = freq + self._sm_id = sm_id + self._clk = Pin(pin_clk) # Don't change mode/alt + self._tx = Pin(pin_tx) # Don't change mode/alt + self._dc = Pin(pin_dc) # Don't change mode/alt + self._cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + self._freq = freq # Start the PIO state machine and DMA with 1 bytes per transfer self._setup_sm_and_dma(1) # Call the parent class constructor - super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) + super().__init__(width, height, rotation, bgr_order, reverse_bytes_in_word) # Change the transfer size to 2 bytes for faster throughput. Can't do 4 # bytes, because then pairs of pixels get swapped self._setup_sm_and_dma(2) def _setup_sm_and_dma(self, bytes_per_transfer): + """ + Sets up the PIO state machine and DMA for writing to the display. + + Args: + bytes_per_transfer (int): Number of bytes to transfer in each write + """ # Store the bytes per transfer for later use - self.bytes_per_transfer = bytes_per_transfer + self._bytes_per_transfer = bytes_per_transfer # Get the current mode and alt of the pins so they can be restored - txMode, txAlt = self.savePinModeAlt(self.tx) - clkMode, clkAlt = self.savePinModeAlt(self.clk) + txMode, txAlt = self._save_pin_mode_alt(self._tx) + clkMode, clkAlt = self._save_pin_mode_alt(self._clk) # Initialize the PIO state machine - self.sm = rp2.StateMachine( - self.sm_id, + self._sm = rp2.StateMachine( + self._sm_id, self._pio_write_spi, - freq = self.freq, - out_base = self.tx, - sideset_base = self.clk, + freq = self._freq, + out_base = self._tx, + sideset_base = self._clk, pull_thresh = bytes_per_transfer * 8 ) @@ -83,75 +93,83 @@ def _setup_sm_and_dma(self, bytes_per_transfer): # We need to save them again to restore later when _write() is called, # if we haven't already if not hasattr(self, 'txMode'): - self.txMode, self.txAlt = self.savePinModeAlt(self.tx) - self.clkMode, self.clkAlt = self.savePinModeAlt(self.clk) + self._txMode, self._txAlt = self._save_pin_mode_alt(self._tx) + self._clkMode, self._clkAlt = self._save_pin_mode_alt(self._clk) # Now restore the original mode and alt of the pins - self.tx.init(mode=txMode, alt=txAlt) - self.clk.init(mode=clkMode, alt=clkAlt) + self._tx.init(mode=txMode, alt=txAlt) + self._clk.init(mode=clkMode, alt=clkAlt) # Instantiate a DMA controller if not already done if not hasattr(self, 'dma'): - self.dma = rp2.DMA() + self._dma = rp2.DMA() # Configure up DMA to write to the PIO state machine - req_num = ((self.sm_id // 4) << 3) + (self.sm_id % 4) - dma_ctrl = self.dma.pack_ctrl( + req_num = ((self._sm_id // 4) << 3) + (self._sm_id % 4) + dma_ctrl = self._dma.pack_ctrl( size = {1:0, 2:1, 4:2}[bytes_per_transfer], # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit inc_write = False, treq_sel = req_num, bswap = False ) - self.dma.config( - write = self.sm, + self._dma.config( + write = self._sm, ctrl = dma_ctrl ) def _write(self, command=None, data=None): - """SPI write to the device: commands and data.""" + """ + Writes commands and data to the display. + + Args: + command (bytes, optional): Command to send to the display + data (bytes, optional): Data to send to the display + """ # Save the current mode and alt of the spi pins in case they're used by # another device on the same SPI bus - dcMode, dcAlt = self.savePinModeAlt(self.dc) - txMode, txAlt = self.savePinModeAlt(self.tx) - clkMode, clkAlt = self.savePinModeAlt(self.clk) + dcMode, dcAlt = self._save_pin_mode_alt(self._dc) + txMode, txAlt = self._save_pin_mode_alt(self._tx) + clkMode, clkAlt = self._save_pin_mode_alt(self._clk) # Temporarily set the SPI pins to the correct mode and alt for PIO - self.dc.init(mode=Pin.OUT) - self.tx.init(mode=self.txMode, alt=self.txAlt) - self.clk.init(mode=self.clkMode, alt=self.clkAlt) + self._dc.init(mode=Pin.OUT) + self._tx.init(mode=self._txMode, alt=self._txAlt) + self._clk.init(mode=self._clkMode, alt=self._clkAlt) # Write to the display - if self.cs: - self.cs.off() + if self._cs: + self._cs.off() if command is not None: - self.dc.off() + self._dc.off() self._pio_write(command) if data is not None: - self.dc.on() + self._dc.on() self._pio_write(data) - if self.cs: - self.cs.on() + if self._cs: + self._cs.on() # Restore the SPI pins to their original mode and alt - self.dc.init(mode=dcMode, alt=dcAlt) - self.tx.init(mode=txMode, alt=txAlt) - self.clk.init(mode=clkMode, alt=clkAlt) + self._dc.init(mode=dcMode, alt=dcAlt) + self._tx.init(mode=txMode, alt=txAlt) + self._clk.init(mode=clkMode, alt=clkAlt) def _pio_write(self, data): - """Write data to the display using PIO.""" + """ + Writes data to the display using the PIO. + """ # Configure the DMA transfer count and read address count = len(data) if isinstance(data, (bytes, bytearray)) else data.size - self.dma.count = count // self.bytes_per_transfer - self.dma.read = data + self._dma.count = count // self._bytes_per_transfer + self._dma.read = data # Start the state machine and DMA transfer, and wait for it to finish - self.sm.active(1) - self.dma.active(True) - while self.dma.active(): + self._sm.active(1) + self._dma.active(True) + while self._dma.active(): pass # Stop the state machine - self.sm.active(0) + self._sm.active(0) @rp2.asm_pio( out_init = rp2.PIO.OUT_LOW, @@ -160,5 +178,8 @@ def _pio_write(self, data): autopull = True ) def _pio_write_spi(): + """ + PIO program to write data to the display. + """ out(pins, 1).side(0) nop().side(1) diff --git a/cv2_drivers/displays/st7789_spi.py b/cv2_drivers/displays/st7789_spi.py index 11b64d3..6a5cebf 100644 --- a/cv2_drivers/displays/st7789_spi.py +++ b/cv2_drivers/displays/st7789_spi.py @@ -1,28 +1,16 @@ from .st7789 import ST7789 from machine import Pin -# Derived from: +# This class is derived from: # https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py +# Released under the MIT license. +# Copyright (c) 2024 Owen Carter +# Copyright (c) 2024 Ethan Lacasse +# Copyright (c) 2020-2023 Russ Hughes +# Copyright (c) 2019 Ivan Belokobylskiy class ST7789_SPI(ST7789): """ - OpenCV SPI driver for ST7789 displays - - Args: - width (int): display width **Required** - height (int): display height **Required** - spi (SPI): SPI bus **Required** - pin_dc (Pin): Data/Command pin number **Required** - pin_cs (Pin): Chip Select pin number - rotation (int): Orientation of display - - 0-Portrait, default - - 1-Landscape - - 2-Inverted Portrait - - 3-Inverted Landscape - color_order (int): - - RGB: Red, Green Blue, default - - BGR: Blue, Green, Red - reverse_bytes_in_word (bool): - - Enable if the display uses LSB byte order for color words + OpenCV ST7789 display driver using a SPI interface. """ def __init__( self, @@ -32,36 +20,56 @@ def __init__( pin_dc, pin_cs=None, rotation=0, - color_order=ST7789.BGR, + bgr_order=True, reverse_bytes_in_word=True, ): + """ + Initializes the ST7789 SPI display driver. + + Args: + width (int): Display width in pixels + height (int): Display height in pixels + spi (SPI): SPI bus object + pin_dc (int): Data/Command pin number + pin_cs (int, optional): Chip Select pin number + rotation (int, optional): Orientation of display + - 0: Portrait (default) + - 1: Landscape + - 2: Inverted portrait + - 3: Inverted landscape + bgr_order (bool, optional): Color order + - True: BGR (default) + - False: RGB + reverse_bytes_in_word (bool, optional): + - Enable if the display uses LSB byte order for color words + """ # Store SPI arguments - self.spi = spi - self.dc = Pin(pin_dc) # Don't change mode/alt - self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - - super().__init__(width, height, rotation, color_order, reverse_bytes_in_word) + self._spi = spi + self._dc = Pin(pin_dc) # Don't change mode/alt + self._cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + + super().__init__(width, height, rotation, bgr_order, reverse_bytes_in_word) def _write(self, command=None, data=None): """SPI write to the device: commands and data.""" # Save the current mode and alt of the DC pin in case it's used by # another device on the same SPI bus - dcMode, dcAlt = self.savePinModeAlt(self.dc) + dcMode, dcAlt = self._save_pin_mode_alt(self._dc) # Temporarily set the DC pin to output mode - self.dc.init(mode=Pin.OUT) + self._dc.init(mode=Pin.OUT) # Write to the display - if self.cs: - self.cs.off() + if self._cs: + self._cs.off() if command is not None: - self.dc.off() - self.spi.write(command) + self._dc.off() + self._spi.write(command) if data is not None: - self.dc.on() - self.spi.write(data) - if self.cs: - self.cs.on() + self._dc.on() + self._spi.write(data) + if self._cs: + self._cs.on() # Restore the DC pin to its original mode and alt - self.dc.init(mode=dcMode, alt=dcAlt) + self._dc.init(mode=dcMode, alt=dcAlt) diff --git a/cv2_drivers/touch_screens/__init__.py b/cv2_drivers/touch_screens/__init__.py index 780188a..f6931c3 100644 --- a/cv2_drivers/touch_screens/__init__.py +++ b/cv2_drivers/touch_screens/__init__.py @@ -1 +1,2 @@ -from . import cst816 \ No newline at end of file +# Import platform agnostic drivers +from . import cst816 diff --git a/examples/cv2_hardware_init/__init__.py b/examples/cv2_hardware_init/__init__.py index 8a72ee4..7ca8a21 100644 --- a/examples/cv2_hardware_init/__init__.py +++ b/examples/cv2_hardware_init/__init__.py @@ -11,9 +11,7 @@ # Optional - show a splash image on the display if one is available, or clear # the display of any previous content -if not display.splash(): - if hasattr(display, 'clear'): - display.clear() +display.splash() # Import the camera driver try: diff --git a/examples/cv2_hardware_init/camera.py b/examples/cv2_hardware_init/camera.py index 2f5d51a..1856709 100644 --- a/examples/cv2_hardware_init/camera.py +++ b/examples/cv2_hardware_init/camera.py @@ -31,9 +31,10 @@ # PIO interface, only available on Raspberry Pi RP2 processors # camera = ov5640_pio.OV5640_PIO( # i2c, +# sm_id = 5, # pin_d0 = 8, # pin_vsync = 22, # pin_hsync = 21, # pin_pclk = 20, -# pin_xclk = None # Optional xclock pin, specify if needed +# pin_xclk = 3 # Optional xclock pin, specify if needed # ) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index e544b81..d8f7bac 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -75,3 +75,9 @@ # Print the key pressed print("Key pressed:", chr(key)) + +# Normally at the end of OpenCV scripts, you would call `cv.destroyAllWindows()` +# to close all OpenCV windows. That function doesn't exist in the MicroPython +# port of OpenCV, but you can instead call `display.clear()` to set the display +# to a blank state, or `display.splash()` to show the splash screen +display.clear() # Can instead call `display.splash()` with optional filename diff --git a/examples/ex04_imread_imwrite.py b/examples/ex04_imread_imwrite.py index ce3b62e..3400347 100644 --- a/examples/ex04_imread_imwrite.py +++ b/examples/ex04_imread_imwrite.py @@ -26,7 +26,7 @@ print("Loading image...") img = cv.imread("test_images/sparkfun_logo.png") -# Show the image for 1 second +# Show the image # # Note - If the image is larger or smaller than the display, the behavior will # depend on the display driver. For example, the default ST7789 display driver diff --git a/examples/ex05_performance.py b/examples/ex05_performance.py index 4bbf37b..d46cc58 100644 --- a/examples/ex05_performance.py +++ b/examples/ex05_performance.py @@ -5,7 +5,13 @@ #------------------------------------------------------------------------------- # ex05_performance.py # -# This example +# This example demonstrates some performance optimization techniques, and ways +# to measure performance in the MicroPython port of OpenCV. Read through the +# comments in this example to learn more! +# +# Note that most examples do not include these optimizations for simplicity, but +# if maximum performance is needed for your application, use the techniques +# shown here. #------------------------------------------------------------------------------- # Import OpenCV and hardware initialization module @@ -88,6 +94,9 @@ # this code. If you remove the output arguments from the functions above, # you'll see that the memory consumption increases significantly as new # arrays must be allocated each loop iteration + # + # Note that calling `gc.mem_free()` actually takes a relatively long time to + # execute, so it should only be used for debugging, not in production code mem_free = gc.mem_free() memory_used = last_mem_free - mem_free last_mem_free = mem_free @@ -111,6 +120,13 @@ # frame rate and are okay with occasional stutters # gc.collect() + # For advanced users, you can use the internal buffers of the camera and + # display drivers: `camera._buffer` and `display._buffer`. Using these + # buffers directly can avoid the colorspace conversions implemented in + # `camera.read()` and `display.imshow()`, which can improve performance if + # your application can make use of the native colorspaces and improve + # overall performance + # Check for key presses key = cv.waitKey(1) diff --git a/examples/ex06_detect_sfe_logo.py b/examples/ex06_detect_sfe_logo.py index 28a7d50..b15abee 100644 --- a/examples/ex06_detect_sfe_logo.py +++ b/examples/ex06_detect_sfe_logo.py @@ -71,7 +71,7 @@ # This is the pipeline implementation. This gets called for each frame captured # by the camera in the main loop -def my_pipeline(frame): +def sfe_logo_detection_pipeline(frame): # Here we binarize the image. There are many ways to do this, but here we # simply convert the image to grayscale and then apply Otsu's thresholding # method to create a binary image. This means it will only detect a dark @@ -167,7 +167,7 @@ def my_pipeline(frame): break # Call the pipeline function to process the frame - my_pipeline(frame) + sfe_logo_detection_pipeline(frame) # All processing is done! Calculate the frame rate and display it current_time = time.ticks_us() diff --git a/examples/xrp_examples/ex01_touch_drive.py b/examples/xrp_examples/ex01_touch_drive.py index 8d7c603..fb409c3 100644 --- a/examples/xrp_examples/ex01_touch_drive.py +++ b/examples/xrp_examples/ex01_touch_drive.py @@ -21,85 +21,102 @@ # Import NumPy from ulab import numpy as np -# Initialize arrow button image -btn_arrow_shape = (50, 50, 3) -btn_arrow_cx = btn_arrow_shape[1] // 2 -btn_arrow_cy = btn_arrow_shape[0] // 2 -btn_arrow_length = 30 -btn_arrow_thickness = 5 -btn_arrow_tip_length = 0.5 -btn_arrow_offset = 75 -img_btn_arrow_vertical = np.zeros(btn_arrow_shape, dtype=np.uint8) -img_btn_arrow_vertical[:, :] = (255, 0, 0) -img_btn_arrow_horizontal = img_btn_arrow_vertical.copy() -img_btn_arrow_vertical = cv.arrowedLine( - img_btn_arrow_vertical, - (btn_arrow_cx, btn_arrow_cy + btn_arrow_length // 2), - (btn_arrow_cx, btn_arrow_cy - btn_arrow_length // 2), - (255, 255, 255), - btn_arrow_thickness, - cv.FILLED, - 0, - btn_arrow_tip_length -) -img_btn_arrow_horizontal = cv.arrowedLine( - img_btn_arrow_horizontal, - (btn_arrow_cx - btn_arrow_length // 2, btn_arrow_cy), - (btn_arrow_cx + btn_arrow_length // 2, btn_arrow_cy), - (255, 255, 255), - btn_arrow_thickness, - cv.FILLED, - 0, - btn_arrow_tip_length -) +# Dimensions and properties for the UI elements +ui_shape = (240, 320, 3) +ui_cx = ui_shape[1] // 2 +ui_cy = ui_shape[0] // 2 +button_size = 50 +button_cx = button_size // 2 +button_cy = button_size // 2 +button_spacing = 75 +button_shape = (button_size, button_size, 3) +button_color = (255, 255, 255) +arrow_length = 30 +arrow_thickness = 5 +arrow_tip_length = 0.5 +arrow_background_color = (255, 0, 0) +stop_size = 25 +stop_background_color = (0, 0, 255) -# Initialize stop button image -btn_stop_shape = (50, 50, 3) -btn_stop_cx = btn_stop_shape[1] // 2 -btn_stop_cy = btn_stop_shape[0] // 2 -btn_stop_size = 25 -img_btn_stop = np.zeros(btn_stop_shape, dtype=np.uint8) -img_btn_stop[:, :] = (0, 0, 255) # Red color -img_btn_stop = cv.rectangle( - img_btn_stop, - (btn_stop_cx - btn_stop_size // 2, btn_stop_cy - btn_stop_size // 2), - (btn_stop_cx + btn_stop_size // 2, btn_stop_cy + btn_stop_size // 2), - (255, 255, 255), # White border - -1 # Fill the rectangle -) +def create_ui_image(): + # Initialize arrow button images. This could be done with a single image + # that gets transposed and flipped, but ulab's transpose() doesn't support + # the axes argument: + # https://github.com/v923z/micropython-ulab/issues/731 + # So we instead create separate images for vertical and horizontal arrows + img_arrow_vertical = np.zeros(button_shape, dtype=np.uint8) + img_arrow_vertical[:, :] = arrow_background_color + img_arrow_horizontal = img_arrow_vertical.copy() + img_arrow_vertical = cv.arrowedLine( + img_arrow_vertical, + (button_cx, button_cy + arrow_length // 2), + (button_cx, button_cy - arrow_length // 2), + button_color, + arrow_thickness, + cv.FILLED, + 0, + arrow_tip_length + ) + img_arrow_horizontal = cv.arrowedLine( + img_arrow_horizontal, + (button_cx - arrow_length // 2, button_cy), + (button_cx + arrow_length // 2, button_cy), + button_color, + arrow_thickness, + cv.FILLED, + 0, + arrow_tip_length + ) -# Initialize UI image -ui_img = np.zeros((240, 320, 3), dtype=np.uint8) -# Draw the stop button in the center -center_x = ui_img.shape[1] // 2 -center_y = ui_img.shape[0] // 2 -ui_img[ - center_y-btn_stop_cy:center_y+btn_stop_cy, - center_x-btn_stop_cx:center_x+btn_stop_cx -] = img_btn_stop -# Draw the forward arrow above the stop button -ui_img[ - center_y-btn_arrow_offset-btn_arrow_cy:center_y-btn_arrow_offset+btn_arrow_cy, - center_x-btn_arrow_cx:center_x+btn_arrow_cx -] = img_btn_arrow_vertical -# Draw the backward arrow below the stop button -ui_img[ - center_y+btn_arrow_offset-btn_arrow_cy:center_y+btn_arrow_offset+btn_arrow_cy, - center_x-btn_arrow_cx:center_x+btn_arrow_cx -] = img_btn_arrow_vertical[::-1, :, :] # Flip the arrow image vertically -# Draw the right arrow to the right of the stop button -ui_img[ - center_y-btn_arrow_cy:center_y+btn_arrow_cy, - center_x+btn_arrow_offset-btn_arrow_cx:center_x+btn_arrow_offset+btn_arrow_cx -] = img_btn_arrow_horizontal -# Draw the left arrow to the left of the stop button -ui_img[ - center_y-btn_arrow_cy:center_y+btn_arrow_cy, - center_x-btn_arrow_offset-btn_arrow_cx:center_x-btn_arrow_offset+btn_arrow_cx -] = img_btn_arrow_horizontal[:, ::-1, :] # Flip the arrow image horizontally + # Initialize stop button image + img_button_stop = np.zeros(button_shape, dtype=np.uint8) + img_button_stop[:, :] = stop_background_color + img_button_stop = cv.rectangle( + img_button_stop, + (button_cx - stop_size // 2, button_cy - stop_size // 2), + (button_cx + stop_size // 2, button_cy + stop_size // 2), + button_color, + cv.FILLED + ) -# Show the UI image on the display -cv.imshow(display, ui_img) + # Initialize UI image + img_ui = np.zeros(ui_shape, dtype=np.uint8) + + # Draw the stop button in the center + img_ui[ + ui_cy-button_cy:ui_cy+button_cy, + ui_cx-button_cx:ui_cx+button_cx + ] = img_button_stop + + # Draw the forward arrow above the stop button + img_ui[ + ui_cy-button_spacing-button_cy:ui_cy-button_spacing+button_cy, + ui_cx-button_cx:ui_cx+button_cx + ] = img_arrow_vertical + + # Draw the backward arrow below the stop button + img_ui[ + ui_cy+button_spacing-button_cy:ui_cy+button_spacing+button_cy, + ui_cx-button_cx:ui_cx+button_cx + ] = img_arrow_vertical[::-1, :] # Flip the arrow image vertically + + # Draw the right arrow to the right of the stop button + img_ui[ + ui_cy-button_cy:ui_cy+button_cy, + ui_cx+button_spacing-button_cx:ui_cx+button_spacing+button_cx + ] = img_arrow_horizontal + + # Draw the left arrow to the left of the stop button + img_ui[ + ui_cy-button_cy:ui_cy+button_cy, + ui_cx-button_spacing-button_cx:ui_cx-button_spacing+button_cx + ] = img_arrow_horizontal[:, ::-1] # Flip the arrow image horizontally + + # Return the UI image + return img_ui + +# Create the UI image and show it on the display +cv.imshow(display, create_ui_image()) # Prompt the user to touch the screen to drive around print("Touch the screen to drive around. Press any key to exit.") @@ -110,35 +127,43 @@ if touch_screen.is_touched(): # Read touch coordinates x, y = touch_screen.get_touch_xy() + # Check if the stop button was pressed - if (center_x - btn_stop_cx <= x <= center_x + btn_stop_cx and - center_y - btn_stop_cy <= y <= center_y + btn_stop_cy): + if (ui_cx - button_cx <= x <= ui_cx + button_cx and + ui_cy - button_cy <= y <= ui_cy + button_cy): print("Stop") break + # Check if the forward arrow was pressed - elif (center_x - btn_arrow_cx <= x <= center_x + btn_arrow_cx and - center_y - btn_arrow_offset - btn_arrow_cy <= y <= center_y - btn_arrow_offset + btn_arrow_cy): + elif (ui_cx - button_cx <= x <= ui_cx + button_cx and + ui_cy - button_spacing - button_cy <= y <= ui_cy - button_spacing + button_cy): print("Forward") - drivetrain.straight(20, 0.5) + # drivetrain.straight(20, 0.5) + # Check if the backward arrow was pressed - elif (center_x - btn_arrow_cx <= x <= center_x + btn_arrow_cx and - center_y + btn_arrow_offset - btn_arrow_cy <= y <= center_y + btn_arrow_offset + btn_arrow_cy): + elif (ui_cx - button_cx <= x <= ui_cx + button_cx and + ui_cy + button_spacing - button_cy <= y <= ui_cy + button_spacing + button_cy): print("Backward") - drivetrain.straight(-20, 0.5) + # drivetrain.straight(-20, 0.5) + # Check if the right arrow was pressed - elif (center_y - btn_arrow_cy <= y <= center_y + btn_arrow_cy and - center_x + btn_arrow_offset - btn_arrow_cx <= x <= center_x + btn_arrow_offset + btn_arrow_cx): + elif (ui_cy - button_cy <= y <= ui_cy + button_cy and + ui_cx + button_spacing - button_cx <= x <= ui_cx + button_spacing + button_cx): print("Right") - drivetrain.turn(-90, 0.5) + # drivetrain.turn(-90, 0.5) + # Check if the left arrow was pressed - elif (center_y - btn_arrow_cy <= y <= center_y + btn_arrow_cy and - center_x - btn_arrow_offset - btn_arrow_cx <= x <= center_x - btn_arrow_offset + btn_arrow_cx): + elif (ui_cy - button_cy <= y <= ui_cy + button_cy and + ui_cx - button_spacing - button_cx <= x <= ui_cx - button_spacing + button_cx): print("Left") - drivetrain.turn(90, 0.5) + # drivetrain.turn(90, 0.5) + + # Check for key presses + key = cv.waitKey(1) - if cv.waitKey(1) != -1: - # Exit the loop if any key is pressed + # If any key is pressed, exit the loop + if key != -1: break -# Clear the display +# Clear the display to remove the UI display.splash() diff --git a/examples/xrp_examples/ex02_grab_orange_ring.py b/examples/xrp_examples/ex02_grab_orange_ring.py index f70eb9d..10e4bf6 100644 --- a/examples/xrp_examples/ex02_grab_orange_ring.py +++ b/examples/xrp_examples/ex02_grab_orange_ring.py @@ -14,7 +14,13 @@ # year, but this example assumes there is an orange ring in front of the robot # that needs to be grabbed. This example demonstrates how to detect the ring, # calculate its distance and position relative to the robot in real-world units, -# then drive the robot to grab it. +# then drive the robot to grab it. This requires the servo arm to be mounted to +# the front of the chassis right next to the camera, so it can reach through the +# ring to grab it. +# +# The ring used in this example is from the 2020-2021 FIRST Tech Challenge game +# Ultimate Goal, and can be purchased here: +# https://andymark.com/products/5-in-foam-ring #------------------------------------------------------------------------------- # Import XRPLib defaults @@ -31,9 +37,9 @@ import math # This is the pipeline implementation that attempts to find an orange ring in -# an image, and returns the real-world distance to the object and its left/right +# an image, and returns the real-world distance to the ring and its left/right # position relative to the center of the image in centimeters -def my_pipeline(frame): +def find_orange_ring_pipeline(frame): # Convert the frame to HSV color space, which is often more effective for # color-based segmentation tasks than RGB or BGR color spaces hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) @@ -88,12 +94,12 @@ def my_pipeline(frame): return (-1, -1) # Calculate the bounding rectangle of the contour, and use that to calculate - # the center coordinates of the object + # the center coordinates of the ring left, top, width, height = cv.boundingRect(best_contour) center_x = left + width // 2 center_y = top + height // 2 - # Now we can calculate the real-world distance to the object based on its + # Now we can calculate the real-world distance to the ring based on its # size. We'll first estimate the diameter of the ring in pixels by taking # the maximum of the width and height of the bounding rectangle. This # compensates for the fact that the ring may be tilted @@ -103,16 +109,21 @@ def my_pipeline(frame): # # distance_cm = diameter_cm * focal_length_px / diameter_px # - # However almost every camera lens has some distortion, so there are - # corrections needed to account for that. This example has been tested with - # the HM01B0, and the calculation below gives a decent estimate of the - # distance in centimeters - focal_length_px = 180 + # Almost every camera lens has some distortion, so this may not be perfect, + # but testing with the HM01B0 has shown it to be good enough. Note that this + # distance is measured from the camera lens + # + # The focal length depends on the exact camera being used. This example + # assumes the HM01B0 camera board sold by SparkFun, which has an effective + # focal length (EFL) of 0.66mm, and a pixel size of 3.6um. We can calculate + # the focal length in pixels from these, which were found in the datasheet: + # https://mm.digikey.com/Volume0/opasdata/d220001/medias/docus/5458/HM01B0-ANA-00FT870.pdf + focal_length_px = 660 / 3.6 diameter_cm = 12.7 - distance_cm = diameter_cm * focal_length_px / diameter_px - 10 + distance_cm = diameter_cm * focal_length_px / diameter_px # Now with our distance estimate, we can calculate how far left or right the - # object is from the center in the same real-world units. Assuming a perfect + # ring is from the center in the same real-world units. Assuming a perfect # lens, the position can be calculated as: # # position_x_cm = distance_cm * position_x_px / focal_length_px @@ -128,7 +139,7 @@ def my_pipeline(frame): frame = cv.putText(frame, f"D={distance_cm:.1f}cm", (left, top - 25), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) frame = cv.putText(frame, f"X={position_x_cm:.1f}cm", (left, top - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) - # Now we can return the distance and position of the object in cm, since + # Now we can return the distance and position of the ring in cm, since # that's the only data we need from this pipeline return (distance_cm, position_x_cm) @@ -142,7 +153,7 @@ def my_pipeline(frame): # Prompt the user to press a key to continue print("Detecting ring...") -# Loop until the object is found or the user presses a key +# Loop until the ring is found or the user presses a key while True: # Read a frame from the camera success, frame = camera.read() @@ -150,8 +161,8 @@ def my_pipeline(frame): print("Error reading frame from camera") break - # Call the pipeline function to find the object - distance_cm, position_x_cm = my_pipeline(frame) + # Call the pipeline function to find the ring + distance_cm, position_x_cm = find_orange_ring_pipeline(frame) # Display the frame cv.imshow(display, frame) @@ -167,27 +178,25 @@ def my_pipeline(frame): if key != -1: break -# Print the distance and position of the object -print(f"Found object at distance {distance_cm:.1f} cm, position {position_x_cm:.1f} cm from center") +# Print the distance and position of the ring +print(f"Found ring at distance {distance_cm:.1f} cm, X position {position_x_cm:.1f} cm from center") # Release the camera, we're done with it camera.release() -# Move the servo to pick up the object +# Move the servo to go through the center of the ring servo_one.set_angle(45) -# Turn to face the object. We first calculate the angle to turn based on the -# position of the object +# Turn to face the ring. We first calculate the angle to turn based on the +# position of the ring angle = -math.atan2(position_x_cm, distance_cm) * 180 / math.pi drivetrain.turn(angle) -# Drive forwards to the object. Drive a bit further than the distance to the -# object to ensure the arm goes through the ring -distance_cm += 10 +# Drive forwards to put the arm through the ring drivetrain.straight(distance_cm) # Rotate the servo to pick up the ring servo_one.set_angle(90) -# Drive backwards to pull the ring off the rung +# Drive backwards to grab the ring drivetrain.straight(-10) From 10eed928555b1e547b393841aba2028e3432813d Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 23 Jul 2025 19:05:53 -0600 Subject: [PATCH 138/158] Add header comments to files in src Fixes #9 --- src/alloc.c | 14 ++++++++++++++ src/convert.cpp | 13 +++++++++++++ src/convert.h | 13 +++++++++++++ src/core.cpp | 12 ++++++++++++ src/core.h | 12 ++++++++++++ src/highgui.cpp | 12 ++++++++++++ src/highgui.h | 12 ++++++++++++ src/imgcodecs.cpp | 12 ++++++++++++ src/imgcodecs.h | 12 ++++++++++++ src/imgproc.cpp | 12 ++++++++++++ src/imgproc.h | 12 ++++++++++++ src/numpy.cpp | 27 +++++++++++++++++++++++++-- src/numpy.h | 25 +++++++++++++++++++++++++ src/opencv_upy.c | 12 ++++++++++++ src/opencv_upy.cmake | 10 ++++++++++ 15 files changed, 208 insertions(+), 2 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index 6bf07b5..c8fc5ec 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -1,3 +1,17 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * alloc.c + * + * Wrapper functions for malloc(), free(), calloc(), and realloc(). These ensure + * memory gets allocated on the C heap before the MicroPython garbage collector + * has been initialized, and and in the GC pool afterwards. + *------------------------------------------------------------------------------ + */ + // C headers #include "py/runtime.h" diff --git a/src/convert.cpp b/src/convert.cpp index e317079..1e425ba 100644 --- a/src/convert.cpp +++ b/src/convert.cpp @@ -1,3 +1,16 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * convert.cpp + * + * Helper functions to convert between various data types from MicroPython, ulab + * NumPy, and OpenCV + *------------------------------------------------------------------------------ + */ + // C++ headers #include "convert.h" #include "numpy.h" diff --git a/src/convert.h b/src/convert.h index 0458b43..7310dfd 100644 --- a/src/convert.h +++ b/src/convert.h @@ -1,3 +1,16 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * convert.h + * + * Helper functions to convert between various data types from MicroPython, ulab + * NumPy, and OpenCV. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" diff --git a/src/core.cpp b/src/core.cpp index 2160273..56065ed 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * core.cpp + * + * MicroPython wrappers for functions from the OpenCV core module. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" #include "opencv2/imgcodecs.hpp" diff --git a/src/core.h b/src/core.h index 9aabf89..81a525a 100644 --- a/src/core.h +++ b/src/core.h @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * core.h + * + * MicroPython wrappers for functions from the OpenCV core module. + *------------------------------------------------------------------------------ + */ + // C headers #include "py/runtime.h" diff --git a/src/highgui.cpp b/src/highgui.cpp index 11743b9..06945f8 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * highgui.cpp + * + * MicroPython wrappers for functions from the OpenCV highgui module. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" #include "convert.h" diff --git a/src/highgui.h b/src/highgui.h index ab17f90..e99c9a8 100644 --- a/src/highgui.h +++ b/src/highgui.h @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * highgui.h + * + * MicroPython wrappers for functions from the OpenCV highgui module. + *------------------------------------------------------------------------------ + */ + // C headers #include "py/runtime.h" diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index 710364e..4f15851 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * imgcodecs.cpp + * + * MicroPython wrappers for functions from the OpenCV imgcodecs module. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" #include "opencv2/imgcodecs.hpp" diff --git a/src/imgcodecs.h b/src/imgcodecs.h index f7e64f3..d07b777 100644 --- a/src/imgcodecs.h +++ b/src/imgcodecs.h @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * imgcodecs.h + * + * MicroPython wrappers for functions from the OpenCV imgcodecs module. + *------------------------------------------------------------------------------ + */ + // C headers #include "py/runtime.h" diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 9620c85..2cbf30e 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * imgproc.cpp + * + * MicroPython wrappers for functions from the OpenCV imgproc module. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp" diff --git a/src/imgproc.h b/src/imgproc.h index 6ef3dce..0847117 100644 --- a/src/imgproc.h +++ b/src/imgproc.h @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * imgproc.h + * + * MicroPython wrappers for functions from the OpenCV imgproc module. + *------------------------------------------------------------------------------ + */ + // C headers #include "py/runtime.h" diff --git a/src/numpy.cpp b/src/numpy.cpp index ca84850..aeee8d7 100644 --- a/src/numpy.cpp +++ b/src/numpy.cpp @@ -1,9 +1,32 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * numpy.cpp + * + * OpenCV Mat allocator implementation for ulab NumPy. Derived from: + * https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.cpp + * Licensed under the Apache License, Version 2.0 + * Copyright (C) 2000-2022, Intel Corporation, all rights reserved. + * Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. + * Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. + * Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. + * Copyright (C) 2015-2023, OpenCV Foundation, all rights reserved. + * Copyright (C) 2008-2016, Itseez Inc., all rights reserved. + * Copyright (C) 2019-2023, Xperience AI, all rights reserved. + * Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. + * Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved. + * + * Third party copyrights are property of their respective owners. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "numpy.h" #include "convert.h" -// Derived from: -// https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.cpp#L11-L22 UMatData* NumpyAllocator::allocate(ndarray_obj_t* ndarray, int dims, const int* sizes, int type, size_t* step) const { UMatData* u = new UMatData(this); diff --git a/src/numpy.h b/src/numpy.h index 89f0dd2..9f58248 100644 --- a/src/numpy.h +++ b/src/numpy.h @@ -1,3 +1,28 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * numpy.h + * + * OpenCV Mat allocator implementation for ulab NumPy. Derived from: + * https://github.com/opencv/opencv/blob/aee828ac6ed3e45d7ca359d125349a570ca4e098/modules/python/src2/cv2_numpy.hpp + * Licensed under the Apache License, Version 2.0 + * Copyright (C) 2000-2022, Intel Corporation, all rights reserved. + * Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. + * Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. + * Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. + * Copyright (C) 2015-2023, OpenCV Foundation, all rights reserved. + * Copyright (C) 2008-2016, Itseez Inc., all rights reserved. + * Copyright (C) 2019-2023, Xperience AI, all rights reserved. + * Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved. + * Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved. + * + * Third party copyrights are property of their respective owners. + *------------------------------------------------------------------------------ + */ + // C++ headers #include "opencv2/core.hpp" diff --git a/src/opencv_upy.c b/src/opencv_upy.c index e2cbb59..beffda4 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,3 +1,15 @@ +/* + *------------------------------------------------------------------------------ + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2025 SparkFun Electronics + *------------------------------------------------------------------------------ + * opencv_upy.c + * + * OpenCV module registration. + *------------------------------------------------------------------------------ + */ + #include "core.h" #include "highgui.h" #include "imgcodecs.h" diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index 3f39c54..84bf549 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# opencv_upy.cmake +# +# CMake file for the MicroPython port of OpenCV. +#------------------------------------------------------------------------------- + # Create an INTERFACE library for our CPP module. add_library(usermod_cv2 INTERFACE) From 72887c73364872eba447e587928a51c743e4a2ba Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 23 Jul 2025 19:27:51 -0600 Subject: [PATCH 139/158] Add header comments to files in cv2_drivers --- cv2_drivers/__init__.py | 12 ++++++++++- cv2_drivers/cameras/__init__.py | 10 +++++++++ cv2_drivers/cameras/cv2_camera.py | 10 +++++++++ cv2_drivers/cameras/dvp_camera.py | 10 +++++++++ cv2_drivers/cameras/dvp_rp2_pio.py | 19 ++++++++++++++--- cv2_drivers/cameras/hm01b0.py | 19 +++++++++++++---- cv2_drivers/cameras/hm01b0_pio.py | 11 ++++++++++ cv2_drivers/cameras/ov5640.py | 19 +++++++++++++---- cv2_drivers/cameras/ov5640_pio.py | 11 ++++++++++ cv2_drivers/displays/__init__.py | 10 +++++++++ cv2_drivers/displays/cv2_display.py | 10 +++++++++ cv2_drivers/displays/st7789.py | 19 +++++++++++++---- cv2_drivers/displays/st7789_pio.py | 21 +++++++++++++++++-- cv2_drivers/displays/st7789_spi.py | 17 ++++++++++++--- cv2_drivers/touch_screens/__init__.py | 10 +++++++++ cv2_drivers/touch_screens/cst816.py | 17 +++++++++++++-- cv2_drivers/touch_screens/cv2_touch_screen.py | 10 +++++++++ 17 files changed, 212 insertions(+), 23 deletions(-) diff --git a/cv2_drivers/__init__.py b/cv2_drivers/__init__.py index 7de84a0..9db91e7 100644 --- a/cv2_drivers/__init__.py +++ b/cv2_drivers/__init__.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_drivers/touch_screens/__init__.py +# +# Imports all available drivers for MicroPython OpenCV. +#------------------------------------------------------------------------------- + from . import displays from . import cameras -from . import touch_screens \ No newline at end of file +from . import touch_screens diff --git a/cv2_drivers/cameras/__init__.py b/cv2_drivers/cameras/__init__.py index b6a2fed..cc46d38 100644 --- a/cv2_drivers/cameras/__init__.py +++ b/cv2_drivers/cameras/__init__.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_drivers/cameras/__init__.py +# +# Imports all available camera drivers for MicroPython OpenCV. +#------------------------------------------------------------------------------- + # Import sys module to check platform import sys diff --git a/cv2_drivers/cameras/cv2_camera.py b/cv2_drivers/cameras/cv2_camera.py index c4641a7..4cd362b 100644 --- a/cv2_drivers/cameras/cv2_camera.py +++ b/cv2_drivers/cameras/cv2_camera.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_camera.py +# +# Base class for OpenCV camera drivers. +#------------------------------------------------------------------------------- + class CV2_Camera(): """ Base class for OpenCV camera drivers. diff --git a/cv2_drivers/cameras/dvp_camera.py b/cv2_drivers/cameras/dvp_camera.py index 93faaaa..0799498 100644 --- a/cv2_drivers/cameras/dvp_camera.py +++ b/cv2_drivers/cameras/dvp_camera.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# dvp_camera.py +# +# Base class for OpenCV DVP (Digital Video Port) camera drivers. +#------------------------------------------------------------------------------- + from .cv2_camera import CV2_Camera class DVP_Camera(CV2_Camera): diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index ec07a52..1549645 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -1,10 +1,23 @@ -import rp2 -from machine import Pin, PWM - +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# dvp_rp2_pio.py +# +# This class implements a DVP (Digital Video Port) interface using the RP2 PIO +# (Programmable Input/Output) interface. This is only available on Raspberry Pi +# RP2 processors. +# # This class is derived from: # https://github.com/adafruit/Adafruit_ImageCapture/blob/main/src/arch/rp2040.cpp # Released under the MIT license. # Copyright (c) 2021 Adafruit Industries +#------------------------------------------------------------------------------- + +import rp2 +from machine import Pin, PWM + class DVP_RP2_PIO(): """ This class implements a DVP (Digital Video Port) interface using the RP2 PIO diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py index 3b781fc..e89b6ba 100644 --- a/cv2_drivers/cameras/hm01b0.py +++ b/cv2_drivers/cameras/hm01b0.py @@ -1,11 +1,22 @@ -from .dvp_camera import DVP_Camera -from time import sleep_us -import cv2 - +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# hm01b0.py +# +# Base class for OpenCV HM01B0 camera drivers. +# # This class is derived from: # https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c # Released under the MIT license. # Copyright (C) 2013-2024 OpenMV, LLC. +#------------------------------------------------------------------------------- + +from .dvp_camera import DVP_Camera +from time import sleep_us +import cv2 + class HM01B0(DVP_Camera): """ Base class for OpenCV HM01B0 camera drivers. diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py index 8ed6b01..e07dc31 100644 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ b/cv2_drivers/cameras/hm01b0_pio.py @@ -1,3 +1,14 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# hm01b0_pio.py +# +# OpenCV HM01B0 camera driver using a PIO interface. Only available on +# Raspberry Pi RP2 processors. +#------------------------------------------------------------------------------- + from .hm01b0 import HM01B0 from .dvp_rp2_pio import DVP_RP2_PIO from ulab import numpy as np diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py index 3a26132..ce18c9b 100644 --- a/cv2_drivers/cameras/ov5640.py +++ b/cv2_drivers/cameras/ov5640.py @@ -1,11 +1,22 @@ -from .dvp_camera import DVP_Camera -from time import sleep_us -import cv2 - +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ov5640.py +# +# Base class for OpenCV OV5640 camera drivers. +# # This class is derived from: # https://github.com/adafruit/Adafruit_CircuitPython_OV5640 # Released under the MIT license. # Copyright (c) 2021 Jeff Epler for Adafruit Industries +#------------------------------------------------------------------------------- + +from .dvp_camera import DVP_Camera +from time import sleep_us +import cv2 + class OV5640(DVP_Camera): """ Base class for OpenCV OV5640 camera drivers. diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py index 1b7bd88..dd35f05 100644 --- a/cv2_drivers/cameras/ov5640_pio.py +++ b/cv2_drivers/cameras/ov5640_pio.py @@ -1,3 +1,14 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ov5640_pio.py +# +# OpenCV OV5640 camera driver using a PIO interface. Only available on +# Raspberry Pi RP2 processors. +#------------------------------------------------------------------------------- + from .ov5640 import OV5640 from .dvp_rp2_pio import DVP_RP2_PIO from ulab import numpy as np diff --git a/cv2_drivers/displays/__init__.py b/cv2_drivers/displays/__init__.py index dbbadf7..a4b35ed 100644 --- a/cv2_drivers/displays/__init__.py +++ b/cv2_drivers/displays/__init__.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_drivers/displays/__init__.py +# +# Imports all available display drivers for MicroPython OpenCV. +#------------------------------------------------------------------------------- + # Import platform agnostic drivers from . import st7789_spi diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py index 745d0a8..15bf249 100644 --- a/cv2_drivers/displays/cv2_display.py +++ b/cv2_drivers/displays/cv2_display.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_display.py +# +# Base class for OpenCV display drivers. +#------------------------------------------------------------------------------- + import cv2 from ulab import numpy as np from machine import Pin diff --git a/cv2_drivers/displays/st7789.py b/cv2_drivers/displays/st7789.py index 99a531f..80d47bf 100644 --- a/cv2_drivers/displays/st7789.py +++ b/cv2_drivers/displays/st7789.py @@ -1,7 +1,12 @@ -from .cv2_display import CV2_Display -from time import sleep_ms -import struct - +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# st7789.py +# +# Base class for OpenCV ST7789 display drivers. +# # This class is derived from: # https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py # Released under the MIT license. @@ -9,6 +14,12 @@ # Copyright (c) 2024 Ethan Lacasse # Copyright (c) 2020-2023 Russ Hughes # Copyright (c) 2019 Ivan Belokobylskiy +#------------------------------------------------------------------------------- + +from .cv2_display import CV2_Display +from time import sleep_ms +import struct + class ST7789(CV2_Display): """ Base class for OpenCV ST7789 display drivers. diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 4e439ff..9bdca53 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -1,9 +1,26 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# st7789_pio.py +# +# OpenCV ST7789 display driver using a PIO interface. Only available on +# Raspberry Pi RP2 processors. +# +# This class is derived from: +# https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd +# Released under the MIT license. +# Copyright (c) 2024 Owen Carter +# Copyright (c) 2024 Ethan Lacasse +# Copyright (c) 2020-2023 Russ Hughes +# Copyright (c) 2019 Ivan Belokobylskiy +#------------------------------------------------------------------------------- + from .st7789 import ST7789 from machine import Pin import rp2 -# Derived from: -# https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd class ST7789_PIO(ST7789): """ OpenCV ST7789 display driver using a PIO interface. Only available on diff --git a/cv2_drivers/displays/st7789_spi.py b/cv2_drivers/displays/st7789_spi.py index 6a5cebf..0485575 100644 --- a/cv2_drivers/displays/st7789_spi.py +++ b/cv2_drivers/displays/st7789_spi.py @@ -1,6 +1,12 @@ -from .st7789 import ST7789 -from machine import Pin - +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# st7789_spi.py +# +# OpenCV ST7789 display driver using a SPI interface. +# # This class is derived from: # https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py # Released under the MIT license. @@ -8,6 +14,11 @@ # Copyright (c) 2024 Ethan Lacasse # Copyright (c) 2020-2023 Russ Hughes # Copyright (c) 2019 Ivan Belokobylskiy +#------------------------------------------------------------------------------- + +from .st7789 import ST7789 +from machine import Pin + class ST7789_SPI(ST7789): """ OpenCV ST7789 display driver using a SPI interface. diff --git a/cv2_drivers/touch_screens/__init__.py b/cv2_drivers/touch_screens/__init__.py index f6931c3..48e2e4e 100644 --- a/cv2_drivers/touch_screens/__init__.py +++ b/cv2_drivers/touch_screens/__init__.py @@ -1,2 +1,12 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_drivers/touch_screens/__init__.py +# +# Imports all available touch screen drivers for MicroPython OpenCV. +#------------------------------------------------------------------------------- + # Import platform agnostic drivers from . import cst816 diff --git a/cv2_drivers/touch_screens/cst816.py b/cv2_drivers/touch_screens/cst816.py index a6de1df..0819250 100644 --- a/cv2_drivers/touch_screens/cst816.py +++ b/cv2_drivers/touch_screens/cst816.py @@ -1,7 +1,20 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# st7789.py +# +# Base class for OpenCV ST7789 display drivers. +# +# This class is derived from: +# https://github.com/fbiego/CST816S +# Released under the MIT license. +# Copyright (c) 2021 Felix Biego +#------------------------------------------------------------------------------- + from .cv2_touch_screen import CV2_Touch_Screen -# Derived from: -# https://github.com/fbiego/CST816S class CST816(CV2_Touch_Screen): _I2C_ADDRESS = 0x15 _CHIP_ID = 0xB6 diff --git a/cv2_drivers/touch_screens/cv2_touch_screen.py b/cv2_drivers/touch_screens/cv2_touch_screen.py index 3940dd3..094ea80 100644 --- a/cv2_drivers/touch_screens/cv2_touch_screen.py +++ b/cv2_drivers/touch_screens/cv2_touch_screen.py @@ -1,3 +1,13 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# cv2_touch_screen.py +# +# Base class for OpenCV touch screen drivers. +#------------------------------------------------------------------------------- + class CV2_Touch_Screen(): def __init__(self): pass From 94beeac2f880e17e9d67c96e5f7608dc461d74d8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 23 Jul 2025 19:51:10 -0600 Subject: [PATCH 140/158] Add missing docstring comments in drivers --- cv2_drivers/cameras/dvp_rp2_pio.py | 3 ++ cv2_drivers/cameras/hm01b0.py | 9 +++- cv2_drivers/cameras/ov5640.py | 24 ++++++++++ cv2_drivers/displays/st7789_pio.py | 3 ++ cv2_drivers/displays/st7789_spi.py | 8 +++- cv2_drivers/touch_screens/cst816.py | 48 ++++++++++++++++--- cv2_drivers/touch_screens/cv2_touch_screen.py | 6 +++ 7 files changed, 93 insertions(+), 8 deletions(-) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py index 1549645..81d2840 100644 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ b/cv2_drivers/cameras/dvp_rp2_pio.py @@ -117,6 +117,9 @@ def _active(self, active=None): - True: Activate the DVP interface - False: Deactivate the DVP interface - None: Get the current active state + + Returns: + bool: Current active state if no argument is provided """ # If no argument is provided, return the current active state if active == None: diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py index e89b6ba..b847a6b 100644 --- a/cv2_drivers/cameras/hm01b0.py +++ b/cv2_drivers/cameras/hm01b0.py @@ -303,14 +303,21 @@ def _set_mode(self, mode): """ Sets the operating mode of the HM01B0 sensor. Args: - mode (int): The mode to set, e.g., MODE_STREAMING. + mode (int): The mode to set, e.g., _HIMAX_MODE_STREAMING. """ self._write_register(self._MODE_SELECT, mode) def _trigger(self): + """ + Triggers the HM01B0 sensor to capture a number of images. See + _set_n_frames(). + """ self._write_register(self._MODE_SELECT, self._HIMAX_MODE_STREAMING_NFRAMES) def _set_n_frames(self, n_frames): + """ + Sets the number of frames to capture before stopping. See _trigger(). + """ self._write_register(self._PMU_AUTOSLEEP_FRAMECNT, n_frames) def _send_init(self, num_data_pins): diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py index ce18c9b..2539942 100644 --- a/cv2_drivers/cameras/ov5640.py +++ b/cv2_drivers/cameras/ov5640.py @@ -958,6 +958,9 @@ def _write_list(self, data): """ Initializes the OV5640 sensor with default settings. This includes setting up exposure, gain, and frame timing. + + Args: + data (list): List of register-value pairs to write to the sensor. """ for i in range(len(data) // 2): reg = data[i * 2] @@ -1038,6 +1041,16 @@ def _set_pll( ) -> None: """ Sets the PLL (Phase-Locked Loop) configuration for the OV5640 camera. + + Args: + bypass (bool): Whether to bypass the PLL. + multiplier (int): PLL multiplier. + sys_div (int): System divider. + pre_div (int): Pre-divider. + root_2x (bool): Whether to use 2x root clock. + pclk_root_div (int): PCLK root divider. + pclk_manual (bool): Whether to use manual PCLK. + pclk_div (int): PCLK divider. """ if ( multiplier > 252 @@ -1125,6 +1138,11 @@ def _set_image_options(self) -> None: def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: """ Writes 2 16-bit values to 4 8-bit registers. + + Args: + reg (int): The base register address to write to. + x_value (int): The first 16-bit value to write. + y_value (int): The second 16-bit value to write. """ self._write_register(reg, [ (x_value >> 8) & 0xFF, @@ -1136,6 +1154,12 @@ def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: def _write_reg_bits(self, reg: int, mask: int, enable: bool) -> None: """ Writes a bitmask to a register, enabling or disabling specific bits. + + Args: + reg (int): The register address to write to. + mask (int): The bitmask to apply. + enable (bool): If True, enables the bits in the mask; if False, + disables them. """ val = self._read_register(reg)[0] if enable: diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py index 9bdca53..5ef57af 100644 --- a/cv2_drivers/displays/st7789_pio.py +++ b/cv2_drivers/displays/st7789_pio.py @@ -173,6 +173,9 @@ def _write(self, command=None, data=None): def _pio_write(self, data): """ Writes data to the display using the PIO. + + Args: + data (bytes, bytearray, or ndarray): Data to write to the display """ # Configure the DMA transfer count and read address count = len(data) if isinstance(data, (bytes, bytearray)) else data.size diff --git a/cv2_drivers/displays/st7789_spi.py b/cv2_drivers/displays/st7789_spi.py index 0485575..13c34f9 100644 --- a/cv2_drivers/displays/st7789_spi.py +++ b/cv2_drivers/displays/st7789_spi.py @@ -62,7 +62,13 @@ def __init__( super().__init__(width, height, rotation, bgr_order, reverse_bytes_in_word) def _write(self, command=None, data=None): - """SPI write to the device: commands and data.""" + """ + Writes commands and data to the display. + + Args: + command (bytes, optional): Command to send to the display + data (bytes, optional): Data to send to the display + """ # Save the current mode and alt of the DC pin in case it's used by # another device on the same SPI bus dcMode, dcAlt = self._save_pin_mode_alt(self._dc) diff --git a/cv2_drivers/touch_screens/cst816.py b/cv2_drivers/touch_screens/cst816.py index 0819250..8b56448 100644 --- a/cv2_drivers/touch_screens/cst816.py +++ b/cv2_drivers/touch_screens/cst816.py @@ -16,6 +16,9 @@ from .cv2_touch_screen import CV2_Touch_Screen class CST816(CV2_Touch_Screen): + """ + OpenCV CST816 touch screen driver using an I2C interface. + """ _I2C_ADDRESS = 0x15 _CHIP_ID = 0xB6 @@ -53,24 +56,42 @@ class CST816(CV2_Touch_Screen): _REG_IO_CTL = 0xFD _REG_DIS_AUTO_SLEEP = 0xFE - def __init__(self, i2c, address=_I2C_ADDRESS, width=240, height=320, rotation=1): + def __init__(self, i2c, width=240, height=320, rotation=1, address=_I2C_ADDRESS): + """ + Initializes the CST816 driver. + + Args: + i2c (I2C): I2C object for communication + width (int, optional): Touch screen width in pixels. + Default is 240 + height (int, optional): Touch screen height in pixels. + Default is 320 + rotation (int, optional): Orientation of touch screen + - 0: Portrait (default) + - 1: Landscape + - 2: Inverted portrait + - 3: Inverted landscape + address (int, optional): I2C address of the camera. + Default is 0x15 + """ self.i2c = i2c self.address = address self.width = width self.height = height self.rotation = rotation - def is_connected(self): + def _is_connected(self): """ - Check if the CST816 touch screen is connected by reading the chip ID. + Checks if the touch screen is connected by reading the chip ID. Returns: - bool: True if connected, False otherwise + bool: True if the touch screen is connected and the chip ID is + correct, otherwise False. """ try: # Try to read the chip ID # If it throws an I/O error - the device isn't connected - chip_id = self.read_register_value(self._REG_CHIP_ID) + chip_id = self._get_chip_id() # Confirm the chip ID is correct if chip_id == self._CHIP_ID: @@ -80,7 +101,13 @@ def is_connected(self): except: return False - def getChipID(self): + def _get_chip_id(self): + """ + Reads the chip ID. + + Returns: + int: The chip ID of the HM01B0 (should be 0xB6). + """ return self.read_register_value(self._REG_CHIP_ID) def is_touched(self): @@ -97,6 +124,13 @@ def is_touched(self): return touch_num > 0 def get_touch_xy(self): + """ + Get the X and Y coordinates of the touch point. Will return the last + touch point if no touch is currently detected. + + Returns: + tuple: (x, y) coordinates of the touch point + """ x = self.read_register_value(self._REG_X_POS_H, 2) & 0x0FFF y = self.read_register_value(self._REG_Y_POS_H, 2) & 0x0FFF @@ -118,6 +152,8 @@ def read_register_value(self, reg, num_bytes=1): Args: reg (int): Register address to read from + num_bytes (int, optional): Number of bytes to read from the register. + Default is 1 Returns: int: Value read from the register diff --git a/cv2_drivers/touch_screens/cv2_touch_screen.py b/cv2_drivers/touch_screens/cv2_touch_screen.py index 094ea80..439a314 100644 --- a/cv2_drivers/touch_screens/cv2_touch_screen.py +++ b/cv2_drivers/touch_screens/cv2_touch_screen.py @@ -9,7 +9,13 @@ #------------------------------------------------------------------------------- class CV2_Touch_Screen(): + """ + Base class for OpenCV touch screen drivers. + """ def __init__(self): + """ + Initializes the touch screen. + """ pass # TODO: Implement common methods for all touch screens From 0885da71d72cbedc8bafdd90723eb9bd03ba3be8 Mon Sep 17 00:00:00 2001 From: Malcolm McKellips Date: Thu, 24 Jul 2025 10:18:04 -0600 Subject: [PATCH 141/158] update names for frozen examples --- build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.sh b/build.sh index a315248..ff0e607 100644 --- a/build.sh +++ b/build.sh @@ -7,8 +7,8 @@ fi # TODO: Could also make these opts into the build_micropython_opencv function if we care... FROZEN_MODULES_DIR="$(dirname "$0")/frozen_modules" FROZEN_EXAMPLES_ARCHIVE_SCRIPT="frozen_examples.py" -FROZEN_EXAMPLES_UNPACKED_DIR="micropython-opencv-examples" -PERSISTENT_FILE_FOR_UNPACK="/keep_opencv_example_changes" +FROZEN_EXAMPLES_UNPACKED_DIR="opencv-examples" +PERSISTENT_FILE_FOR_UNPACK="/${FROZEN_EXAMPLES_UNPACKED_DIR}/reset_examples.txt" # Uses freezefs to create a frozen filesystem archive for the provided directory. # See https://github.com/bixb922/freezefs for more details on freezefs From fa2d97aeb949cd471f786824ac3746bd76ce884d Mon Sep 17 00:00:00 2001 From: Malcolm McKellips Date: Thu, 24 Jul 2025 10:23:25 -0600 Subject: [PATCH 142/158] pedantic fixes --- build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build.sh b/build.sh index ff0e607..be4161f 100644 --- a/build.sh +++ b/build.sh @@ -53,12 +53,12 @@ function add_to_manifest { # We will use the optional MPCONFIG_FILE argument to determine if we should add this line if [ -n "$MPCONFIG_FILE" ]; then + echo "Attempting to add frozen manifest line to $MPCONFIG_FILE for $BOARD" + if [[ $MPCONFIG_FILE == *.mk ]]; then # e.g. for TEENSY which uses mpconfigboard.mk instead of mpconfigboard.cmake - echo "Adding frozen manifest line to mpconfigboard.mk for $BOARD" printf "\nFROZEN_MANIFEST ?= \$(BOARD_DIR)/manifest.py" >> ${BOARD_DIR}/$MPCONFIG_FILE elif [[ $MPCONFIG_FILE == *.cmake ]]; then - echo "Adding frozen manifest line to mpconfigboard.cmake for $BOARD" printf "\nset(MICROPY_FROZEN_MANIFEST \"\${MICROPY_BOARD_DIR}/manifest.py\")" >> ${BOARD_DIR}/$MPCONFIG_FILE fi fi @@ -73,7 +73,7 @@ function add_to_manifest { cat ${BOARD_DIR}/manifest.py } -# Adds the frozen data filesystem to the boot.py file for the given port +# Adds the frozen data filesystem to the _boot.py file for the given port # Options: # $1: Port name # $2: Frozen data file path From fe4cc0d233d08f75ca2e8c6a99f1453f9716b855 Mon Sep 17 00:00:00 2001 From: Malcolm McKellips Date: Thu, 24 Jul 2025 11:35:55 -0600 Subject: [PATCH 143/158] Use built in extraction of freezefs to simplify build script --- build.sh | 62 ++++++++++++++++---------------------------------------- 1 file changed, 17 insertions(+), 45 deletions(-) diff --git a/build.sh b/build.sh index be4161f..7bf2d5f 100644 --- a/build.sh +++ b/build.sh @@ -27,9 +27,14 @@ function create_frozen_fs { cp -r $DIR_TO_FREEZE $DIR_NAME_ON_BOARD - python -m freezefs $DIR_NAME_ON_BOARD $OUTPUT_FILE + # Use on-import=extract so our frozen filesystem is unpacked to '/' in flash on import + # Use --compress to compress the frozen filesystem archive + # Use --overwrite always to ensure that the frozen filesystem is returned to factory state if the persistent file is deleted + + python -m freezefs $DIR_NAME_ON_BOARD $OUTPUT_FILE --on-import=extract --compress --overwrite always } +# Adds the provided directory to the manifest file for the specified port and board. # Options: # $1: The directory to add to the manifest # $2: The port (e.g. rp2) @@ -77,17 +82,11 @@ function add_to_manifest { # Options: # $1: Port name # $2: Frozen data file path - # $3: Copy Source: If copying imported frozen data to a mutable location, this is the directory name of the source (optional) - # $4: Copy Destination: If copying imported frozen data to a mutable location, this is the directory name of the destination (optional) - # $5: Add destination to sys.path? If true, the destination directory will be added to sys.path in _boot.py (optional) - # NOTE: By providing the source and destination, the frozen data filesystem will be copied to a mutable location on the board - # If they are not provided, the frozen data filesystem will still be accessible, but will be read-only. + # $3: Unpacked directory name on the board (optional). If provided, the modules in this directory will be made importable function add_frozen_data_to_boot_for_port { local TARGET_PORT_NAME=$1 local FROZEN_DATA_FILE=$2 - local SOURCE_DIR=$3 - local DESTINATION_DIR=$4 - local ADD_TO_SYSPATH=${5:-false} + local UNPACKED_DIR=$3 # Remove the ".py" extension from the frozen data file local FROZEN_DATA_BASENAME=$(basename $FROZEN_DATA_FILE .py) @@ -110,41 +109,13 @@ function add_frozen_data_to_boot_for_port { echo "except OSError:" >> ${BOOT_FILE} echo " import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} echo " with open('${PERSISTENT_FILE_FOR_UNPACK}', 'w') as f:" >> ${BOOT_FILE} - echo " f.write('Hi! Delete this file to restore the ${FROZEN_EXAMPLES_UNPACKED_DIR} to its default state. WARNING: This will override ALL of your changes to that directory.')" >> ${BOOT_FILE} - - # Now, copy the unpacked frozen data filesystem to a mutable location if the source and destination are provided - # Simple recursive function to copy the directory tree (since i.e. shutil.copytree is not available on MicroPython) - if [ -n "$SOURCE_DIR" ] && [ -n "$DESTINATION_DIR" ]; then - echo "Copying frozen data from ${SOURCE_DIR} to ${DESTINATION_DIR} in _boot.py" - local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" - echo " def copytree(src, dst):" >> ${BOOT_FILE} - echo " try:" >> ${BOOT_FILE} - echo " os.mkdir(dst)" >> ${BOOT_FILE} - echo " except OSError:" >> ${BOOT_FILE} - echo " pass" >> ${BOOT_FILE} - echo " for entry in os.ilistdir(src):" >> ${BOOT_FILE} - echo " fname, typecode, _, _ = entry" >> ${BOOT_FILE} - echo " src_path = src + '/' + fname" >> ${BOOT_FILE} - echo " dst_path = dst + '/' + fname" >> ${BOOT_FILE} - echo " if typecode == 0x4000:" >> ${BOOT_FILE} # typecode == 0x4000 means directory - echo " copytree(src_path, dst_path)" >> ${BOOT_FILE} - echo " else:" >> ${BOOT_FILE} - echo " with open(src_path, 'rb') as fsrc:" >> ${BOOT_FILE} - echo " with open(dst_path, 'wb') as fdst:" >> ${BOOT_FILE} - echo " fdst.write(fsrc.read())" >> ${BOOT_FILE} - echo " copytree('${SOURCE_DIR}', '${DESTINATION_DIR}')" >> ${BOOT_FILE} - # Finally, unmount the source directory if it is mounted - echo " try:" >> ${BOOT_FILE} - echo " os.umount('/${SOURCE_DIR}')" >> ${BOOT_FILE} - echo " except Exception as e:" >> ${BOOT_FILE} - echo " print('umount failed:', e)" >> ${BOOT_FILE} - fi + echo " f.write('Hi! Delete this file and reset your board to restore the ${FROZEN_EXAMPLES_UNPACKED_DIR} directory to its default state. WARNING: This will override ALL of your changes to that directory.')" >> ${BOOT_FILE} - # If the ADD_TO_SYSPATH flag is true, add the destination directory to sys.path - if [ "$ADD_TO_SYSPATH" = true ]; then - echo "Adding ${DESTINATION_DIR} to sys.path in _boot.py" + # If a destination directory is provided, we will add it to the sys.path so that the modules in the unpacked directory can be imported + if [ -n "$UNPACKED_DIR" ]; then + echo "Adding ${UNPACKED_DIR} to sys.path in _boot.py" echo "import sys" >> ${BOOT_FILE} - echo "sys.path.append('/${DESTINATION_DIR}')" >> ${BOOT_FILE} + echo "sys.path.append('/${UNPACKED_DIR}')" >> ${BOOT_FILE} fi # Helpful for debugging during the build process, but can be removed if we'd rather not see this output... @@ -156,7 +127,7 @@ function add_frozen_data_to_boot_for_port { # Also freezes the examples directory in a filesystem archive on the board function build_micropython_opencv { # Install necessary packages (Could move into an install_dependencies.sh if we want this to be more explicit/modular) - sudo apt-get update + sudo apt update sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib # Install necessary python packages (could also move this to a requirements.txt file) pip install freezefs @@ -167,7 +138,7 @@ function build_micropython_opencv { # Create our frozen filesystem archive for the examples directory # Note the "." to make the read-only version of the examples directory hidden in IDEs like Thonny - create_frozen_fs "examples" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" + create_frozen_fs "examples" "$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" # Add necessary content to the manifest file to freeze the modules in the provided directory add_to_manifest "$FROZEN_MODULES_DIR" "rp2" "SPARKFUN_XRP_CONTROLLER" "mpconfigvariant_LARGE_BINARY.cmake" @@ -175,7 +146,8 @@ function build_micropython_opencv { # Add necessary content to the boot.py file to unpack the frozen data filesystem on boot # Provide the source and destination directories to copy the frozen data filesystem to a mutable (and non-hidden) location # Provide "true" as the last argument to add the destination directory to sys.path (since our examples directory contains modules that we want to be importable...) - add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_EXAMPLES_UNPACKED_DIR" true + # add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_EXAMPLES_UNPACKED_DIR" true + add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" "$FROZEN_EXAMPLES_UNPACKED_DIR" true # Set Pico SDK path to $GITHUB_WORKSPACE/micropython/lib/pico-sdk if $GITHUB_WORKSPACE is set, otherwise use the current directory if [ -n "$GITHUB_WORKSPACE" ]; then From 95c72eb0075052896affb4fd9dd6bd1e0566f3a5 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 12:14:08 -0600 Subject: [PATCH 144/158] Update content of PERSISTENT_FILE_FOR_UNPACK --- build.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 7bf2d5f..966535a 100644 --- a/build.sh +++ b/build.sh @@ -109,7 +109,14 @@ function add_frozen_data_to_boot_for_port { echo "except OSError:" >> ${BOOT_FILE} echo " import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} echo " with open('${PERSISTENT_FILE_FOR_UNPACK}', 'w') as f:" >> ${BOOT_FILE} - echo " f.write('Hi! Delete this file and reset your board to restore the ${FROZEN_EXAMPLES_UNPACKED_DIR} directory to its default state. WARNING: This will override ALL of your changes to that directory.')" >> ${BOOT_FILE} + echo " f.write('Hi! The firmware has this directory frozen into the firmware, and the _boot.py\\n')" >> ${BOOT_FILE} + echo " f.write('file has been modified to automatically unpack this directory if needed. As long\\n')" >> ${BOOT_FILE} + echo " f.write('as this file exists, it will not unpack the directory, meaning you can safely\\n')" >> ${BOOT_FILE} + echo " f.write('edit the files here or delete all other files to free up storage space. If you\\n')" >> ${BOOT_FILE} + echo " f.write('want to restore this directory to its default state, delete this file and the\\n')" >> ${BOOT_FILE} + echo " f.write('directory will be unpacked again on the next boot.\\n')" >> ${BOOT_FILE} + echo " f.write('\\n')" >> ${BOOT_FILE} + echo " f.write('WARNING: Deleting this file will override ALL changes to this directory!')" >> ${BOOT_FILE} # If a destination directory is provided, we will add it to the sys.path so that the modules in the unpacked directory can be imported if [ -n "$UNPACKED_DIR" ]; then From ae3312548859156ee44e5c0bd0effa3c9724b498 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 14:42:15 -0600 Subject: [PATCH 145/158] Nitpicky fixes in examples --- examples/ex01_hello_opencv.py | 4 ++-- examples/ex02_camera.py | 2 +- examples/ex03_touch_screen.py | 2 +- examples/ex05_performance.py | 17 ++++++++--------- examples/xrp_examples/ex02_grab_orange_ring.py | 14 +++++++------- 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py index d8f7bac..0ee6b9b 100644 --- a/examples/ex01_hello_opencv.py +++ b/examples/ex01_hello_opencv.py @@ -17,7 +17,7 @@ # Standard OpenCV leverages the host operating system to access hardware, but we # don't have that luxury in MicroPython. Instead, drivers are provided for # various hardware components, which need to be initialized before using them. -# The exmples import a module called `cv2_hardware_init`, which initializes the +# The examples import a module called `cv2_hardware_init`, which initializes the # drivers. You may need to edit the contents of the `cv2_hardware_init` module # based on your specific board and hardware configuration from cv2_hardware_init import * @@ -61,7 +61,7 @@ cv.imshow(display, img) # Can alternatively call `display.imshow(img)` # Standard OpenCV requires a call to `cv.waitKey()` to process events and -# actually display the image. However the display driver shows the image +# actually display the image. However, the display driver shows the image # immediately, so it's not necessary to call `cv.waitKey()` in MicroPython. # But it is available, and behaves almost like any other Python environment! The # only difference is that it requires a key to be pressed in the REPL instead of diff --git a/examples/ex02_camera.py b/examples/ex02_camera.py index 432fa38..88a078f 100644 --- a/examples/ex02_camera.py +++ b/examples/ex02_camera.py @@ -34,7 +34,7 @@ success, frame = camera.read() # Check if the frame was read successfully - if success == False: + if not success: print("Error reading frame from camera") break diff --git a/examples/ex03_touch_screen.py b/examples/ex03_touch_screen.py index f3c9ecd..94a4483 100644 --- a/examples/ex03_touch_screen.py +++ b/examples/ex03_touch_screen.py @@ -52,7 +52,7 @@ else: # Check if there was touch input before if touch_input: - # There was touch input before, but not any more + # There was touch input before, but not anymore touch_input = False # Draw a line if there was touch input diff --git a/examples/ex05_performance.py b/examples/ex05_performance.py index d46cc58..f6c4b7f 100644 --- a/examples/ex05_performance.py +++ b/examples/ex05_performance.py @@ -56,7 +56,7 @@ # Loop to continuously read frames from the camera and display them while True: # Read a frame from the camera and measure how long it takes. Try running - # this both with and without the preallocated `frame` array to see the + # this both with and without the pre-allocated `frame` array to see the # difference in performance t0 = time.ticks_us() success, frame = camera.read(frame) @@ -69,7 +69,7 @@ break # Now we'll do some processing on the frame. Try running this with and - # without the preallocated `result_image` array, and try different OpenCV + # without the pre-allocated `result_image` array, and try different OpenCV # functions to compare performance t0 = time.ticks_us() result_image = cv.cvtColor(frame, cv.COLOR_BGR2HSV, result_image) @@ -110,21 +110,20 @@ # since it mitigates how frequently garbage collection is triggered if memory_used < 0: print("Garbage collection triggered!") - + # Something to try is triggering the garbage collector manually each loop # iteration to immediately free up memory. Garbage collection can be faster # if less memory has been allocated, so this can help avoid long stutters - # from occasional garbage collection. However garbage collection will always - # take *some* time, so this will lower the average FPS. You can choose to do - # this if you prefer a consistent frame rate, or don't if you prefer maximum - # frame rate and are okay with occasional stutters - # gc.collect() + # from occasional garbage collection. However, garbage collection always + # takes *some* time, so this will lower the average FPS. You can choose to + # do this if you prefer a consistent frame rate, or don't if you prefer + # maximum frame rate and are okay with occasional stutters gc.collect() # For advanced users, you can use the internal buffers of the camera and # display drivers: `camera._buffer` and `display._buffer`. Using these # buffers directly can avoid the colorspace conversions implemented in # `camera.read()` and `display.imshow()`, which can improve performance if - # your application can make use of the native colorspaces and improve + # your application can make use of the native color spaces and improve # overall performance # Check for key presses diff --git a/examples/xrp_examples/ex02_grab_orange_ring.py b/examples/xrp_examples/ex02_grab_orange_ring.py index 10e4bf6..6bdbb04 100644 --- a/examples/xrp_examples/ex02_grab_orange_ring.py +++ b/examples/xrp_examples/ex02_grab_orange_ring.py @@ -55,7 +55,7 @@ def find_orange_ring_pipeline(frame): # Value: Anything above 30 is bright enough lower_bound = (15, 50, 30) upper_bound = (25, 255, 255) - inRange = cv.inRange(hsv, lower_bound, upper_bound) + in_range = cv.inRange(hsv, lower_bound, upper_bound) # Noise in the image often causes `cv.inRange()` to return false positives # and false negatives, meaning there are some incorrect pixels in the binary @@ -63,12 +63,12 @@ def find_orange_ring_pipeline(frame): # effectively grow and shrink regions in the binary image to remove tiny # blobs of noise kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) - morphOpen = cv.morphologyEx(inRange, cv.MORPH_OPEN, kernel) - morphClose = cv.morphologyEx(morphOpen, cv.MORPH_CLOSE, kernel) + morph_open = cv.morphologyEx(in_range, cv.MORPH_OPEN, kernel) + morph_close = cv.morphologyEx(morph_open, cv.MORPH_CLOSE, kernel) # Now we use `cv.findContours()` to find the contours in the binary image, # which are the boundaries of the regions in the binary image - contours, hierarchy = cv.findContours(morphClose, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) + contours, hierarchy = cv.findContours(morph_close, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # It's possible that no contours were found, so first check if any were # found before proceeding @@ -91,7 +91,7 @@ def find_orange_ring_pipeline(frame): # If no contour was found, return invalid values to indicate that if best_contour is None: - return (-1, -1) + return -1, -1 # Calculate the bounding rectangle of the contour, and use that to calculate # the center coordinates of the ring @@ -141,7 +141,7 @@ def find_orange_ring_pipeline(frame): # Now we can return the distance and position of the ring in cm, since # that's the only data we need from this pipeline - return (distance_cm, position_x_cm) + return distance_cm, position_x_cm # Move the servo out of the way of the camera servo_one.set_angle(90) @@ -157,7 +157,7 @@ def find_orange_ring_pipeline(frame): while True: # Read a frame from the camera success, frame = camera.read() - if success == False: + if not success: print("Error reading frame from camera") break From 68584d7337f9e1092e51b2742df4d7f5d98f2411 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:08:45 -0600 Subject: [PATCH 146/158] Add splash image and update image paths Rename "test_image" to "images" since they're already in the examples folder Updated paths in imread imwrite example Fixed cv2_hardware_init/__init__.py to use full path to new example splash image --- examples/cv2_hardware_init/__init__.py | 8 +++++--- examples/ex04_imread_imwrite.py | 4 ++-- .../{test_images => images}/sparkfun_logo.png | Bin examples/images/splash.png | Bin 0 -> 18396 bytes 4 files changed, 7 insertions(+), 5 deletions(-) rename examples/{test_images => images}/sparkfun_logo.png (100%) create mode 100644 examples/images/splash.png diff --git a/examples/cv2_hardware_init/__init__.py b/examples/cv2_hardware_init/__init__.py index 7ca8a21..1bf8755 100644 --- a/examples/cv2_hardware_init/__init__.py +++ b/examples/cv2_hardware_init/__init__.py @@ -9,9 +9,11 @@ except: print("Display initialization failed, skipping...") -# Optional - show a splash image on the display if one is available, or clear -# the display of any previous content -display.splash() +# Optional - Show a splash screen on the display with an optional filename (if +# not provided, it defaults to `splash.png` in the root directory of the +# MicroPython filesystem). If the file is not present, the driver will simply +# clear the display of any previous content +display.splash("opencv-examples/images/splash.png") # Import the camera driver try: diff --git a/examples/ex04_imread_imwrite.py b/examples/ex04_imread_imwrite.py index 3400347..cef4cb7 100644 --- a/examples/ex04_imread_imwrite.py +++ b/examples/ex04_imread_imwrite.py @@ -24,7 +24,7 @@ # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV print("Loading image...") -img = cv.imread("test_images/sparkfun_logo.png") +img = cv.imread("opencv-examples/images/splash.png") # Show the image # @@ -52,7 +52,7 @@ # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV print("Saving modified image...") -success = cv.imwrite("test_images/sparkfun_logo_edges.png", edges) +success = cv.imwrite("opencv-examples/images/splash_edges.png", edges) # Check if the image was saved successfully if success: diff --git a/examples/test_images/sparkfun_logo.png b/examples/images/sparkfun_logo.png similarity index 100% rename from examples/test_images/sparkfun_logo.png rename to examples/images/sparkfun_logo.png diff --git a/examples/images/splash.png b/examples/images/splash.png new file mode 100644 index 0000000000000000000000000000000000000000..609a9b858d91da2536e6b5b54bffc6f5e68a7907 GIT binary patch literal 18396 zcmaI7WmufevMx+;*Wd&Q9^BpC-Q67qch}%DNN@@61b26LcemhfXWq5<+WVaA`qno; zpy!#c>h9|5>gv1iCqhwP0tp@u9t;c&NlH>w84T=;AgFu=3l94Gp+L@g{HnXs@=Oejn>mnhxGUX%DV3T8%a}Y7Jw3764GE?!AS2gjnG2t>L z5#T50_2336urqTpBKEMewRh(B;3N5$UT#qNpT`U&#Q&<|V#7!BU!BsFQzRAvIGGW1 z(KFGRFfuX|b8yicF`1Y$F`Kh8(GoK=GP5!;F)=W5(lN1fGjee=GZO#zhXmB1lc_nk zvZ(lfHwXI0M`G#X;=s+o;O_2D@6JLGaI#=v;^N|BU}R=sW~KwxpmX-LcQNvyvv(%_ zw+2x&XA>tY2Nx@VJ@G#pjf?@VE_@`Q&i>aZ>>U0_t-bSqTMA?|1`i_#1}1vOe@6B1 zigI%Q|5fem{-?IHi?Z4O_V@o&v9qeDgBgRenKQuE$pkca=A{2L<-jfCWM|OpXYx=*kBL7kL zA9t{G0JSV?=49n&W-9Ikup|E0n7OU~b6>drqrU%^HT}N~POcYeS_^Z5tyO zmE3-xKmoR`fIiP%>;Uth{S->zkl;#O-}~tWiQ~IrA;ICI|EqwG6DUX=hz1M!?*bVk zc%UFSDJtwg1xRsXR7pd~|1JDm>|gQ(f=1ZHqhDypYAXWvV84GyO#~0>rWXPyrRL>~ zWyyd-6roBk$PxIC?ko-g%%QCL1w|(Ib70t%j4?ky5xYbNbr#ERpYKIFqaV)q4L0E? z6nOC#TNbD7a?=L7KVc*q`j3YNe;9E0LeZqq4zH|cd*7|vxqUtFz(1}^K}ATf?w0C4 zCj9=|f(eYXHFF;b@fdwRHQ1&tAMPEz zOB~Ya5OH3w2RxTZei85^e~I`iu93r|L*Ls_qa_Lo4w!XAVG3+Tl5-AFSHWwRjIjp# zq1y$v=)CV+@0#?PzpaW;B|(ZgQD9gw=f8-71`LfeXfe;37njp^+0f+!;BHqq_O1r= z>ilG4On}&Fcqz1ECX-!l4Sq$>-07zGdw>O*R+X~`@#Ks_wR8iE7?zszYkj?pecJ~C z=oemDHpEZQ0H-U5FJf?-ixC|ml4Po{4&o3~rl5Jcrh-No**|jRjtM9E^PtxwW$wC4 zA{x2{XVg7}u<(0pw*pIbK+>?E6ii4-R=ezE+;M8)jiU4H_J&ijF>^rKQoBx^4-@ZD z;2z>L&6#ucv?3qP42r^uSQ=t=^z?qzPr+N-2@75b1sM2!u{`zHIpTCvrVvAwmd<;x zjc}UoY|Rg$#6b~nShT^``0k(KRUsz@8Tr3jck!6AWu#9z)bH>Fp~C|oX=3e;1>P}~ zTwGr@W{c=1judn-sz}1ec47?aS@Tu5Y0asbxm^~ZsmUtFlQrpcx?X$1zAP`39eU)V zM0@-qF`z93Yz$|!CDdt;=|(i$i+C>S`y^t4T+d^e`Q&+VTDT+Pd{|!B*_Aa=p33#rhz}`lfQ)1rA~fH zt!+x^{^_(&52LN2Tb=L{y7UD1{ZsID$ktwd^U>f2lD>XR)q0~fr_j+Z`91qb6uSMi zXjDfGq&{B<6nAt7dmhQU@3_&uPi|X+h^y=O~u; zR?vbxHgj2K&eWmgGwCsl-(ptfT=2R_|31ho4YClraF-QdMrsru4qSAJkD*iD{n`hX zq~beCAX+kNdQQr$RFi|uw|^>nbuH1`XnZ~es3?LW;x1Z1own2AxG7spGd>o`g3s}f z3&n$^uW)JLZ6L=i@u>oQ+*F4QERm6bh6Im%k)F(z<2wvf$W2GKLF0|0n#UxXE>xa9=ZICCU!(LpC=+_N2+m$``RQ1HY>M9UybiQj7gsS*eC;}3`fF9J3nXdqC>&58W!M<16 z2*d^&(4*4KwYxT4K!+i}w{nTH2|yLqP?Kl}Q&Ud=Fp25Vxj2NH7mW}!Ze2eHy9qCg z_Uq;EYmP|GQJ5gvwBxaZ@ob~zmKk#{Qg6_bkq*s-BC_$*5gK-c5*CAmuFd8&(G;9} zQ$OFVq#D*~RlI#S&Xd(|#e}6D(;9~=PQlc-+!9J5eky!x&i(=yP4!JPj15Ort!$K= z&uqCiVPcBLQ7_PKVziy%=*ISuKRnG$>v!PYH718Gb?I6_WSKNH%OFx`ypIqaRsGAj(UIR>JUJ1<$H|D-a@>EZvqg+9a{5?3Jw7nCS}$7;Zt zb^jMqaTUD!F>7yJoSK)}aM4FA+&-I?^;n-5736SN!bIs%h;()Xg{d%I*f1a`RR)JO^Ax^ZKY9)oGp4g>Oh~_z)cVRRIgY#j= zT_t{q@WFclnnlBlf)DlzXnvD`C${X*o!mk*mubqnq+gwAfwx`ma+Jtxjg`CYU8&7} zCI}Xq_Pc?nS^zHZXL$2_z|tFlw61H?}2ETUubDm zOG|4cQw_8+st#!1LRfGvoC52#HG_LPzkodufV=~3o>yk{>)+jl6nqWN=_X{WUyzd@ z{Q8iGoHR)Csy*Xmu;xSKZ9)4yd4i^E`+4K$1;On{&Ty6LAGf{UD}a6g`FF2eR0NUR zsou?7-ftL~@D}_#B4z8@14VNc;HwXu4{NnPWY0pt%-n{zGlx<#yKm%V2r^Dr^ADTO z7lb-c+mTz3gIphW!nfP#OLaX22A&ZwDxviC(Qs#ZOJuI=r(M&%Wrj<MLe6L97Yuv#dN8Fu}aTGe*8uthi7RC@rs4 zKNC!(`Org_aMhN-h5u+68?^nKv2dT!`E2P(x|+<6xS2l#vFs1&62&&B zJT5;f3}JQw?2k}rRCKr>z*~|&yV$U)xx&4?D;j6AB!im11x_B6xSWQbs0&ChK)u zEL(iCM@IM0O5^=re_LT}*(j&w9|?-(^dyQ;4Clt9L#t_*JtX0AZu``JiKf8^(6Qxhf1M3=IHPT>!k|T9KIfv zfhcTqppdYYE{_MQm9CDG>YeY!{*KmU)-2$4&BGAeg0-2I|Kc}l80RoJHJPkxU2KUo zn^Z7`rwle8%0=MN(H(c0wQCNxe(K}#(Y$xqw~XVYedbQ=sI)r-*e8$g!*jcLV0Mr> z2Yw$3j+gAY@WZj)aai%wX^=wDp{*5)i;Xf*FB^}qA;d2JRg>@5*%hwg-CR+6=s5Z* ze=oN-4&%WNEV)6!Kx(dem#zSa!BX5Yh#Nyp%n6jbU#D7_PL0G^Zg%&ce0%I!-KABx zYNO|8Xs5878Ql0*DaCRBS+P)|HE?K(rtB%EqxcL9JAUdqOX29JvKg*_+nCH4`-%vi zi1Tncti!rQ;J+#5=d51WZo}||rdBzE!O2-LeVzx{GIhoCOw|v}q0P@P^J+%G#i=0R zRGq2odWbW)!SaVsPm3R3?__lNK=hIdF6soxrnq~JzCyq}S->G_@GeaGym(B$X%*E` zM#-)%M;qNER7<7z2O0@OLk2T<2|VPI^*KOyv_Iws;MR@A*BA@`+*!cJ`B6w@o~zVD z2KTnL^v6waJ$dB$9AdBY8^J^XE2940Z$6GWh!APax34FrijIg#()ihv_vHM3;^rN*a&-0J9=^ zu6O@%c-0k2>n6(LVUq|_InAs8*EYaElxZ|A#{NEFCfDmBbed>stnGn*&GCD#h;Yhh z-Z5Levkw+yrI_*kum+~WJ@!)7(Xc*`fL8FAPNWyxQiKQopK_)RLB=RAK50A;PW5cw z4~P>t)q?6lHz*%3#po+%MT8kX*Mdq6Mb(~w;RYxpM>m*toPg~($O!}Zpv0@c8xba_ zH*vac*=ygg$xv}@@we@^hY&IgoKCdQiBRuW+-CWm#ptZ$e%$Yz3cw@#gm!Vc2a&q6X}zu%*2f`+%W$scHoF-{r8y zO=*-$MllhR1Zs(wC|(6mnMCrI&;4mx`Maz+ZJEry-Qnrx{ilD88it>;N~;+00_t<+ zGppq>cn8_9x>*PqkXFYRo+~KE!nKkKEL42c`bxdUQM4SrnTW`uI0;|)8Lcq-?86wQ zp|brL6uv88Y1YA^)aJ*wuAyA5H0<6LVbjkWF!yjQB6KsUP-@^)iHI$o(FE;YD*R(7 zd?{nwb(s4hejad6A5idY7y7(!#Lj?S!A8u5iB4~`r8WcjX-C>%d|tMN6D_;(Vj#1b zr78Ip#kEEo$pQL~5>siQuP|~mI2y+N>_hh=xBK}A##XJ$#s>B|_=dn4qIVOMRcZTg z@&wrTx5A0Fl59dD=(dQv`V&}^L8m=%XXe2e)+}CK#yaEM1lsGPdmfIH4=w!mV~Nxc zLQBMru!csoeLL+qzq75?Ee&mezQw(E8wK`Ie_1xsghbwY_fhh77@v-3RCm#10C4-Y zMJH?MvV;2>voCO?-GZFVbEW47BYO~{4M~UN%Fnd`!Bc2>KVRFGR)Zd%7ab=owLnT& z%8gL!>O6|8~r33I#q_8{880dE_)Iq+9&uDp0;f&ZF))bI%Fe zC$YQ`Zm?q zYPnAC_{<4!y#s#5ESTYfW zNXPnJr9>qXv5tNEB7;dxjLisi#EkSf9g|$Zkj7=>`w$)6ya|gebS~;aV`vWo2c)}< z&+S{q^OYg0q>9>!-2naNCnQ;6F`u6F5$w7rv!yvia zvEAcVz^EC-14NrKrVN8|-RvB(=^*xceXbFSJ{=CYQq?o-y!1ZY4FU{CO)b#Ha0k`` zlYZXH8m5>{nAe2+>U)mqk&)b9a3HodVO<;89uZ=C+s0YA^^u!G@y+u*jX=g4YsqW`p&HcM4yK>LBZ#)C+{D2KHDq6@$5;4GaN6`7NRxz-iOFA2 z%w0cfRqhn13NFdK>b%0_$f{6&L7$98^m`ikQrpd?IhmEq;IS5#sW)x5_H4*A0 zBfhmnaoxj`s?EZ|<2*|PIcr3Sf>K5{bwOHTg_frQ|4L3HTI|IwCzD%c+VRj zL+{AUmGc*w1kS6zDRQKYQ}&8l{gz>gU*~@_eI?jZn7(Ze)h(4)+wn+xkZ$x)8FyPb z`sbrC0_8jHmTywM<~6aQgOtfjmzKTW;r-_;C6k&gk#pY9&Mna0w6>MSub9w-^;g^F zT4X=>=`-!tyXLaE_QY5d6qIf$Whk}DyuG<3ySuc2C)&v6zm^zr}DEo^R zbduT#VU(ola+4ki%AqF@qAIE~QGr9jB7I6Bwt@HK^q$8vZFJf%@7yaA@Q_HK%9(w? zUkfHsgw*f&TqA0EWq%DGX}5nwpjva<9RX{b|t%wDO1 zOmO-mObB+s_4Jk}an)SzSDSmc1DgTwn99owCdFRWM(>XWYkcmDs{02zp$LQkDzvY^ zS^l<07e$sWw)?A4iUgQF_Y?EW_f;35m8lAcMnZ;f2?QV_VT);CFIA9MZ7pv^uFMK| zU85)-_Cw$_Wt;f@q3=NSc5kk6C!}OV%%T6A(49FljCWr!W+yJ)cGR;cWJsKSGVFJe zo}(&aa97r@DJ-@WK$K<|8{X->=5qSeXozC3RwPqi#KTt|ECUF~?itaBicx@SOHHxC z7Fas%z@M;TkFd@WZN(_j(~Y(C6K%eHWiAHvuV+-4Ake|L8;%KMS9$~e3sdtkp~7Ls*xRSg!b}?Vu6dJ z9}d!s-Wc6v^g7=X!zP3~KSuml!`t~yJC>U&9h$%{P3ziHAnf4DsxP*QuAqATR`vzn zO&o)Z3JouV;Q$m2;S2l0wO3o6g*C8SaF@$37uPPqT6Ga@X^_1`JrW+vQg)DHfha|K zkIt*Zes8jSlX5;~dM9~=Y>R+dZ{3I!_#ov-C6sc{h*z>l+;{cqc0lQ{CBVvt^_M)+ zm`-av8=oQ`UGdy1R@>(hslv)9uIFVSIfavkJ})~m|GgwP3av8E+>8I>DMxj2@9%E! zqwVete2k zU;Z8%!o#y50uycJr1F9GvsYtTWZc(92QOw0$$`%(+CI?Vk*?fPF)Fx0y(d~@hsSVdu~~65Wsz~F9u?y zt)BkjPw3=VpMlECo{BHD@L;(sC z84jYwu)}l0xgSARB_6x|!<}m#h;cC%M{dUxVPd?YL}D^)dE27W4I@mpiS z+ZbefXecK=Odv3t=zwVpAJ2J;R^7~A0K;#=SuH$_Y=9r5SxeT!h*aES87Bc%O}L`3Q)KK1u%P?(S z^cRUwOamIQ8MF*rNJsWRk73>DJh$YfeeP?Sihi|dNTTtE${aYg*W$$`IP4KVJg{}3 zQmbl9$c26g>Z15P^^|tA2@!R~a`y2=etG68T6@PDgnnI`gb4h+I5JeF@j`YmlS^nf z(oK{oIDf4FJGJ1WpS0FZ+J%vve`)B>_KP8_p<3Osg)m;D%UxysNZxBy)*P3*)k_RE zJ0MG8ORD~2cqFi}yP)up{+agJ-=K|p^jr%xE@^j7n_-DrfmW*$2!G}!CIP#9gxO$B5~R&yzl z{K1-foo!zRtQu}&Hx4dZTh+Bn-Dy)NuPF}_$xiUMTgk;Ch|nJy`?&(!4yNOXU$(dJ zg2b$s@4~OU>6>-|-HM*oPBD@Q&p+H3ys5r+hrlWJm`7k`u46M z!2GsT6Ce63Ga;d8Jp1nZtLwFBq<~lz@_Q-I#K|UlIXWYjH$pk6c~!k?u?(ix4r zEKHfRHkw%cUkBz7G)`Mb8rTU|A+ou0BJ8%tRu8^`gNITlXWaI~$`>cI+K`_ucaV%a zOot<8v{}*@FST6oF&2mjkB!BLJr3+2{CW1Q>PjB<8I<4C5XD1+Vn7H36S=*s(SE0B z*O~NA{l`{r`0>n#vP0qpW)&Ll+5_zudKyC}2=2%BB?iLy+(F^U zS5V|^t)-bEpqptza|bP0C%xw+ySsImgpq&ndbn)TN|HbX2Y2aI;_>4R!VoDyOTkHl zh0#g{1%CVEf}~cMj{j`E@P4l&bb~Q8)97Q1eg6tW4THP~gWKNz8rL19ZO+z`pWXl= z@FINt?zw{Ei6dQ@5@m7YuYLM(A*Ock>&)$+FV+INBbhicmInm1yb|iWPv;Z$um-0N z6!S4hD<1R#Se_1~{TieOW;)dCN~|ZQG5rIhEV%z{HT#D*<9!xWwL<&cYd&`y)@cFs zSqRrFkJ+XR*C67ux^D3IdiZ3uBR87C{gqYu#AhsidhO%5t0aV7e0m7v2bcsBG1SN6 z*>F3CNGU)qAXlWY$6^e5ts2hM+yne_9=%xvK{r9`$#| zqv9L?ZnK!@w{7fS#)L|=&wT$5v{LsiKAut7H}B(Km?MqxcE@?JB_FK;UkPQB&6v!L zZ@4^6>C^Kb;dVKooMoAXO$dQU-kCF}o*x8b$G!O6EIyR6NNf8sn{pwv@@q0X!0|x9 zM*roPMTazB2`yw}hV`?6&jbJznsmtg$evT;Ol3Yb<>Q#|M=&R8h ze@dlMq!iHw>z=sO(6s3K_3ERoDZV1nK@|zDtjSJ^q}$tsB(HhS0~UtqA5CfFh#q~W zf{d7iI}^Qn!q+7l$njq@GO}I|GPlKObceo%2zQ+jWiVtO%5A|I$bCo0eXzatHER!d z_$q1lAqk(#6B%SRF;yinI&uBv;h|a&x@x*x;=cp!Mo#a{?Jgkjmh09Wita5CKCa-h zh&RNyj!+*P6}|MJ_hfy*nXf@S4}EApx#5D=W==}AJQn65sD3Q_HkJd87P_J#Y0b=v zY0P{oO^R}p3i0{s>;t3fgyVhZ1TBf$Vb=75du-JeZK?9|;E(t0)MtVM8bm50564-P zor6qk8Jo|L91W-1u@D_|#Qmt)`(oE`z^9&iO8EiJ_{_NVNo5+2p z{>yi7lN&o;c^q{#liO?jL%jIhXuR>{{Y{(dvN-eJx#0Dw)*7m zwc7RO#3rBG@F(C_JxWMm&s$60=KRTyJ_%Zu7p3zl&Wl@qlk$S;c-p7mMqBkH?4j~q`utWknK5pzJWra=_9os z8wRKGLu^>@5*I8Ccz@V%$0Wig;6K$>oOtdq_B;p54U)O8%}|&BVkzB(qp(NpStN3; zbvv@;nIRl{P8e|@<0!=^?ySP1kM4sY0bvwxSduFnu5ZW{#7|$j-5=4tqceZdVow!x zy;Ruur6(Syam^*XTUGOBeD0y=*;z1iH2o$(adTdZ5w@iB7q1O(TY)ZXYga-B zFAqc+KU3qe)5MZxQ!n0eUuf|`b4k5_!CbW5rZkCs?znn^;MXefGB2QUO?P{I?!4Rh z`hl`a9T}1Ae)7X^CM>cFfq+)9n-Ea z%rApqu-0jg+OEKo&zL$g*W)$e_L}y&rfrsb;35m!BC0m(LH$t{TQTx>N*-tzY%FCf z!oPf$)F7T{YmdCVqyGan3{9GTnj>Moh<1N^fk!;7;#S>U7E%kIeLasdp9 z1%yTe$V|uNgTK(l+I7RwX5!0UOOR|e?=pGGUa`<4lE&ImT~5o*7LXG?s5~z>3hoN# zGil+bI8%a-Fu&vpzdWwFVTauzp)OQfWK_|-SbAkW-+1bR5l3afHW-xT6ypxhj_o|#q4jH;Q)7HV6A&iUqjl@o_DfoJ+Z#S| zXCcw16>+}REM%=ImV-zA^Ve|VylU=wwjUeRs{c5XW$@Vo}&R~_1~IKC?j zoha!8-Rl?_3Sfca1wzA$D8GWCdd8H3mq_T9u5rX(%ccDzB5DKzn|P1uADX&}19lsG z4@<`^5qVgd=x@sdz*i-e`Y>;a%r@qh9}KU+VvN2=fv)cftKn2+4)R#lnVx-ePM;UI zT;S#$4_Wp5#WguQ`n^)ue~CsLi#DY2G0E0=+WW(UC9-r-NdGX-%g)K#Be&PQZqV)* z!;iXS5-L!xm0omfQZO8C#)Qa6g|}D9*>3G3LgP*pmpVXUCfZZq{dLr(fJ<$)n-2Ix ze{k?D;>iaE?&GmdfSF;KxQvN60W2!ARiF<$@N#2zfTq3nuoA=>{$kO`zAYIaKF5d%?f;=q$6yFeyE zhXs*os(6X}mHPtqaH~;3`Tlhw=&m?}hUW^2O7 zpu^sd?~^r}_x=^BCD|u02=eser1krxwg?dB>dcplKC+Ht?(Eq~;`iCX-pE*SyFp&ZvQj4If zJnA4&0z}XGIVx?4%n>C_JPq@Iv5Wp61^;3f{YO&LzdN9$BxHoG>c)r&Oi3AJwsnxK z5heMt&;TMm;l)r86AMSH1dK!#R}qxFx8SHtLV#= zi=KD-8I24y?j$6cw)Tv6Bic-@rO7D;f=)nIuw<+`lq+rn||hIV2X#{JZ{eq7a5gI(@w@%!ap`~wph)-Q2Dw7SZDp` zwn{M~KLj4rf<0Lq0s`(}^fgAgETCf(9S2WrF2&|L<*6>+Af@z0;J2R%9wv_Jlig5k zR+)Oa*TeYASSkaD<#hKLca!}_FI@Ac@5@)GkI5_^#0g7R?n?1jVm*82H0Ig4P+=o; z^OONw@$~P3L`51xsKyb3v_{T#bpn~AW@aUW9dbzu3z@%vQ5E8#qi=8Z#tq~@Tcw@! z?*wo~kt3zAb|Jzcoy?ccl_i%q+HdZX2Ld+QJ$IL{kEv()yzZ#p97dCV@bmL8lxalS z;64&h!O7ph68SY-?G09Kv_gf4Z2uAl7l0>;>d%ayr+=2k(Gyv5muykhLUxTO%Tv~= zHW=}{4b83gN8i|ER5ZItZ}5IN$Gu%G_w(~3SFdxw+^oO5%bViE#`&uGWaMGm4^pm3 z-?7vxXY+KeNX*1^-5ZK9Yple9YFnRdKUdUHjja&qkqFST{YjRkt&-bf{k)gffojLe2r#Zc~1s&jOoWW%!K!*ktA;W9|it0zEXJB*DTv z!H0C(tX8Xc{nvZji$QAV&-Bp2Mn|z(#Xxq)$Ix0P6D>u~}zmJRaqZ8Mh9QBh~} zrKE`%aOV*K@JQu5LfbfY=lTUNnjWW}B)I|16O24j>t;W-F8^@rZD)HM6P&!bYU5*> z5Y^MzzRUrVGF%nmavX8p-&mNq6Ixcp3>K`+D`|HZI@7$u)Xs~C%4sLsJ|ODJ$E6>< zKR7;*`-#ufblwC$N3!JhlT7Ml4v$8cP_WniDPI|fB2Cfa71|VoPOB^hkSkJX`*xXQ zD~aZ-q&!&-40s(R`Aybc6;=Ld2a4uKN|&=hnmK}Ef*7Z4A_#xuMg3)Bl{Y#B=#>?w z@Xr-blN+@#P(>l~&6)A!I;2T;0rB&eJCak8#nfsQtLE)Af8+6st5!n4t#nAlsnMus zaye6c|DGovgGXq8Hx-6N0W3A>@{{K^!Wqovh$s7@(d>sBBMwYVOiWAY`BPC{ZJ_9& zC@J}Kthou-kXfc%l-`_7<0*f{(srwqbb0eSb(h2L z&7!wJN4ZE6vQP{Z8msh(w33d*~F#fLHT{(U~Hc ztI(Gp{wmhN9Ms~OrBP{L9Q)JBCaF+zJmu~_waJm@=?%jvl({yLADR5$h@#rRRv>S$aM)c!Ixnx$&JR3Dya z|J$P<$&bGu5jvlYr>gi^w$f%QCE{!}s^4$MriNk(+cMIvS#h?JpkZKw<(M8S9b@p= z#K8q3P{YK~23DxZG=C>FnTQ!L-I4!b7odCvp7`Ycawey)P|sn)5LL5D;&M3P*~6H7 zR!J#OeS&Q3=D?3T<+ZK?tEv6!8@=_=QiHT<`eAwfPhgiSD#o3 zDLyIa7j4j%`--WSO0S>E?KygXwq#;#?S8RH$ID9qn+ePUrl%(+{kpqI1Z)?TFTFv( z{b(00U)XH+lLO{lYNC|<#RYWvzCPHs=zgqKmwVrzu-vsAbrVRo0Ko~Me{Yu-XIrl~ zUmrX@LA)YA!9bpGc34bO)>%xYi6}$Ok}+^Rn)f2r4g7goTFCKz`SEjp+3Q}L6;P#G zntOwQO%JO6+WUNa%l}#78;&fwO^LjCj+oz!J?7Xg%hz+j_9JX&^4+qyk0!Hu+|D(N<+F6tzpXXeL#VA( zdUIeWN>V`@Xsf#P>Q*mJz241bz`<3=`9jS9R#6w%)?UpEhI#zEVp6LaIZB{Rn-Y(_ z$hLfhfKA)%_}CsxUaO z#iHUiLIB)qhc`#m!XNJ=`qbnAH*WX8x)i7N%Vr8(9lK!@+)^|N!?C{v>&F1qi zoDCvVktPFI;pO0Hx{9z$XG)u>`{ZzcSZ|3v$6P+_6G-DJb_7xoQ#@R(MBzO#Vd)Ap zM%;EjX_h>EVRlwW$V#_uami-Ghm)pt_!F!pK3g*I+h48Ik6`(V0HMM)k>M@;5o~e? zWz3(m#^2xCntHiaKG}AwlaGPn@)>Q4rXBg#+`=r7ze4GE*tG9W+He$YQ$yG1`!ON-lG{Vrx$?N;U3MZpyo_&FGTnbxM#Pp8UgT7N zH|DFvNo<-@2q+jb@l^Y7dl>EiT7t{ATWhrA0BpWp_J~X&(*0@tPDu-#;QdH-&( z%S(iV1?q)}J<7yH^xwbBQ!1}>g{$>DPKTXWr6oebIc%3v{QwKCY;VhSK395igk11l zUte~v#;US-F430FEj@Hy)H=Len`doV`Owmjr5-%sO3yo!X_7G&0Ealx{5QBfayY1( ziE@)e^<2T511LUKbAi47-HWGF*<_l7E3HSI%G8+L{pHLqGFo-&beESpm0tIt%~iL}&F-dO%D>9MpJk^=rD2npkB*LoCCcfitq!gEmF)$j zn~P{RK6`~-o11w!qV@EfmX;O;aAYRy!GYn5bldKi%$DSQcU)=);%ThW0>+logLi94 zBC7}#^mA+mg=#5R%0E1Ju>}*t0w&tNY40W{v0}HUo{8z+8p%BW_`UGlK+u-$eZ4zS zu2IF>JcOEJ=5D+71@TJ`kZMAiI!%`StPiE)QX6C^X5ZB{p+F(EGWAmZ4rq@q*Ruh3 z`+2J2?@#B*xM^w7S@LdEjL>~wGS|!}GTIO+SZ(J~b-^US@$bB@QV_oFC05Yvj^Gwb z8ggWs;y5;>pE6;90Mh#+*6+X{UMODOqR1%IK3qdSAN9GK{8&_^QsMTZN^3n!Q%xd6 z$XkB569V`jKgb+}g~1tXq?4&DSL=UVZ0wYgZ~1y3gH2n14n(E?CEMciKBO%X%Ej_o zD6(5y8tO#D!aB@@j#}x(WP0OjRzDv^hZabol9p`7ewy0s;v>5P_9%PPvr3^2NF=Tl zcz6|PoYUbj9gy5}<(EW_X;m)vvZ7ZlrLDS zS!4SA69%pVVNh_>2T4{sdZ)!psTlK!~C<)Joj4No|rtsP8=Cw zkR*(`euj*cR4K_kpOU%z8BXbUAQN&n1GfB#uR?E+str0e^UZYU%;m06bmuNfHxgP^~VRBucgF!S65YDxn zQ`Ae+Isj`RPuKIjs`fr`kjEbCuB1Vv&;3`K0{WQlhx$>(a)Ihkg%W# zn+8n~r~7hV}K|%N9Dk9)+t~ zA?<0#L}tr^ot7|h77Yh>myXM#1L-gvJdIaX!j*PL4d&e1Ad=48m zz}|6NmN{M3bSL{qXxGy**)LZ63xo<^gOZ$G20iPAKW3U9nz;5EiHXcN&uE~P!R7eo zA@0?>r-6%$d-nOXHx?RY%Dgf+mBhGGq)NS&&Gb!DUdM{^0I>{*I9WQ&JU~`lrQ_xP z9B%d}(HHPNI1ycSkx)=RO(zNbKVQA`xJepgFAz-N^jyvB~s8mOEznsmh$!hOeLTpRnQVlMKPr|!b}AXe-P=t ztk>+A16#~1)8#TaXlmPKsUcz)A|o7iRY3M{xhYl)M<(QUdUEjbd0U{-n9Sryib<6$ z>^@?u?qGMg%Ke3N#F{)EKW#pd9va3iaSkoOCz_`^p2H!)M~75jC6luv|1lVY@6T(| zI5+tZ*df>un?#R%m6j))^vYE^H0t4V&f&%|l$)CCRP+>}kLlF`N1r1lj`}w-t($aW zd>oPv2@A2gIi|hO0J?KG9*o>b_+px8t`GgG-%Z@-x@w(>_VyZ;L5AM&^@ zWVb)l0O*|vVpL3Q&X|z@eTOXxUO{@f+qjlM!0mkS9$y@okY6%9JS-lA&ts39oSp5r z(+gF$Tu&x92a1Js>%F@m+(CohfS%Ak(73_aSeh4M$(Egn&uhN1J{c%UMyJ!5FZx){ z?R%;!Z%^No<3MeQ97%^=P-JMQ%^pM#7I1OnPDq5D`t&_-w1dh(b? zA}GHm8EwSL4ULP7+tY(2&?uC4k-Z3dUc=|G2YDJfnUpeBw9U%wMoN8Xv&;4FMw`17 zp=wk!*2B%TswJb862VY|<)*HhE&8x2^EV!^Br`2b3sqAPo}I1kwbAN%yFUU1;a%YJ zsJNI5Wv(>E)2u!jIjNMyu*u4BD3&?YZ7wIX8}q8tWV6-{(PREdtao3RJxV)^X!t3{mw3$|yK8W3Q^>l*n!75s|QE?|=~X3VnX zdvq}Ko39CS@O6qt<6^7JfFik;;I%IjlTN3BHlEnZ3S&qi;2NHch~38{qtOnkQLOzl zg9qCl=APm6Iz0B#EmE(^?V>E1Mvr}Oth)1bqnP6jIV6+QC`gl*9WRE(e6pUBEGRRb z*HKwq9J+>^p8oHU8qGz^kO94Jz1^AmccJ>9Q8pI?k)O!|RZp*%T>~VwU0qwJ4P6pq z*D$(oEgw?{TmFElpKo>(m-nYj?yc5a*&P@|ao93s>g5e?7pnr*9}8A=dEY=U?#gt& zSPwmX^K5A$Ryn_<;pH_xuE`@6kENAO(SI5U0!j6CoGw&9uBEdPqki(;UCxgH16!|# zE43RyuXnMfPbU-RvJ@+1xqYmAdh`^?Nv=^mMO0K&98*7HQdqKV2h&k%940+oGh2Jq1s;@#lJq+!4`R^jRIqz=pE$d2K z;8O4zJM?;au50WK)(b(Ax6%x|Yn2z=oQt0&Dx{w;zzK}Iy=n4vU@ih7iSVik*Fig`RB5JFg_@4THYO-?SI z^}a~cF?0_-29Kjmr^(*Ohc}w5#vBOPiY-%yVjVn;$q2}2{NQf1zj!t!l}PwP`r2YN z_5Ad-Isn7^Tf3p`0T4(wT>v_$L3|G^F}x1iGt|%mUlUjH>LA?5i+`Js0OFqDDyMz@ zxl!fYHfsfzS2Ml&F#ZNQQ3w5i%!iq;o_L_(c)Qn_+UyV&EB)yEb5Jaw31$VZ_k~_+ z&Fvz$B|MIx#o@!Jcrxo^FjR%IM6Il#zdvoR=;-LE@9bx(p7*B3Z4|&L2C^y%jAMk~ zbVn}u42rZd9lW(-w%&^bv-Sr`OR3Iaw!AYe?ef6{~gwc zZWliT1NI9MtIvYXQ>v-(sL2g76Mslc}3DYt1CMa6iT z4sGMw{e9Em9#jA%1qaj;Z^lM;6)4m*{thyAIj( zFilQQmg#r2_D_V3KR|<`J8;zWDqc~bd3_)zJu@?NgU7w6QmEnZaKHWf)J7Z#*NPw= zwDhu+c-`L(?5bO5kp-;Mov!N8)Jkx0agmhjy4Y$>3h<&)yd_@i~e-o z6ExKYi-d5oT1oR1;+5sM*{;B0{{%D6S2t2Icaj+f9-XaTsS_w=y99|S0zzSVvewm% zYXuf7U!8V@fivsHfSk0;4jo;{fF`&-zt2wJ2TS&74Mx;e_hVE%^+-9ETqHxd;Fa<6 zId5?xH+A&@kvmLQM$MV4Uk}Y_mV3vRA79VIknlcg-M+0a5w1fk&HkUQDwRH7)^zmu zzv}l-pFMNn{cQjH&he>sEvsw(*4WScyK#R#!=If`Rd*;NaVa`AnNrKR>%4{-4c8{?nJs zM{jbA`pz^f{kzNlz^R7aYkm~E-rcn^c6YfmlRcjYFk(t%BRWs-i{@G<((~iWl#t(V zGJJe|Ql=_towiH<_^*2NiR#P!Zj(1AxA)E0{c-4bl7o;$Ze`_9hjUxAuYY@aU3^E> zo}2DWc6$8UJm z^|Ri;9g=ZIT%Tdyp=rLeU8iv5tqohj!JnxQ0(i4wrncrKl z$J-bGGS&8NPGP+L1X!+pnt0+*%@1LgF1N+3-G^7!RrH_dljSPPeDga0hB(jv{C6=M zu6Ey62F|jw7yLQU!dy9j>eQ*zrls|7Jz&kYzT(jn&r{XyUmA-H?pQAoJ{Go6J1{)qk2XNTsJu|+wEk7DqWuN}|F=4@{SKDiU|N8n`^mD%Mx7&_? z`~K;M#9iR&D>hMDAQ5nD&rtkO4IJUly0(flk5xe0ZcXIsu---A ziZe21{7q~OX=G&dD{bOW`hP0HM8s{a&bF|R-Jf#PHE-D8*<5|T^YQV6$F99B1$Kya zZC;Ac`unJ3;&i>(N6c6Dua~sQm~d&^=i->t)8uPEeN6xS>~M7Ryq5a6ae5}e9jLp2 zLxDT)v)z8{@aH8j+XNS2W3DRs%?;N1^?ch(f`Trcd3kw%!W@coYDO29>1r&*yUt&2virGpy@4(IM1* zaKr3h-)?j5bXtDB`MvDpv?MpRse8YC(kO6TD%5#1N%H5R6OZmChfLK122vvfyV%hy zeTCP1Zk1+Rv+vzqVp;o2_I zY|X#FF!gnqU+J-HtCwHSbkt^g&Go!7``DGHuuj(`yS*Rdx~{&x?HRkfbg@f7(ASXl z^IA<0w4Mjn`pLh`Izy6-W(f%i0V5#g|El=^|A0}!w)^F=udiEcnf}NaWHhW_Kfk&B z!Ws5kJ_4buLNgTe~DWM4fH3D4; literal 0 HcmV?d00001 From 5423f37483a3287962fc04c2baebeb09fb6becc9 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:11:50 -0600 Subject: [PATCH 147/158] Remove sparkfun_logo.png Replaced by splash.png --- examples/images/sparkfun_logo.png | Bin 31075 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 examples/images/sparkfun_logo.png diff --git a/examples/images/sparkfun_logo.png b/examples/images/sparkfun_logo.png deleted file mode 100644 index 645f8f905f79c0ddf6ca201656d57f83f755b6d0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31075 zcmd2>V`E)iw~fscouG|vCrxA9wr$%wvDMhN?KHOCIE|eKH_!Vk?uV@pd#|;})R<$f zNCi1@WCT0}FfcG=NeK}pFfed|uls8_h_Ca9Jk8Iq3%IkAxDZ&)G{NcD1C+U-tRNUz zeH`MOA@tWXyn}?MGZ+}k;C~1BsC}6U7?{Viq==x3hu(!RY#!F|!_#B;;e!yYpdj5U zEjHU@P<8}w=1|URN?ASPBX;4)P>p0A=ihqkzYsxXc^W70OWG}dy{zU}PWp$uL@)nt zr{X4h*4E0+<+9e+blH)FMTLY4%!Q&pq$DKn1th>z<4^kIA?S$bf{CFugbHAV5$Zxj zP{S|{;3K~yQTG7=yg;P@axyQbA~0!*dxQTEFIdCU_A1c|fxjcaP#TH;{Dj-@7{U|w zBh)C!3OEsbW*eq_AhZxd@fCf*lvT$RhBM29`)$iEC9vee2ut0=&Whz@Nh zF9LJN`GTYaRzIQ&LbJ0HAU6d>5Lk3G*%ISMMbTk z%IM%a2!(T2g!6?5>i_USHG(Om?J^dxBB2zLcN89W*zVemeyjZ4mSI)ST*>2rEzv7j zjp;cqav?u1%4YgTmd4hp|GZt`TJh9=+u2t4A#vh^eu&CPiz9ew-sp|ws5F1K&UMab^Z%i1k@XA@|W`l;ow4(%S zogZ+GMUSIc$dH^s!_uTcY^qG`L~?a_MP7hz(0{Ok&m$5mW2rmG5Gk-vs4gD)e{(q5 zeUTm?pc}$xDYMKZgt3CNMleK?Qc79CX4tw9tF%JprqQxTNx-VYIk_G{o~&>T3qCz_ zSt`Q(`sBSjSqJPlnP6PTqk+1* zOJYP;Zz9T#(KSfq+(Q_I#Jstgfw z6QV#u$c14J@IM3f5=jN(B~y+6J=O!3L5cPBv#4kO*4Snv*CF6K|F{Jcm8^&@sr*sJ z<}s~4_|wORoXv+kL)iv)xPK1?tfNr{3Iq^HpGcrBAlOhfFQA;7LoJ7d}j_{YJGY!M- zf){(eDPUNnr{l?MrkD+UWfVxm%h)s30{Xu&6TUYp<;-PNs=#;b5IoWjY`VI@ih-B=0Jle@jYZgW?{Dej zs|w~!p^8Qg)UWsD)?s46vmC^){;hja+_2T=TdeQ2B*WXrhUd^x)__kC!}VYHF5N>$c^;e($m?k_iJgp z5yH?BoVR7f@GPyO1<*y%Uq5GnXcz+j7H@|(zxD08$*asl515`QeXgMeeDx%kI0>6q zcse77ty<`s4DwWyfm~Rg#bDePbi2+7d!vF0+jzv%SHHs}OZyJFj3th$%dXPgch9!7 zkl1Jqb8&x*=qWll^aIJDXX7$4l}MsN!UbxTzir&a4-&wyuWXsivTr$&BB-5dD|#nP zI9;8e#jTD!PV?k-`r9*wTuhGIRVAXb^!&LHnye|EIZM#O2&MW2cVY~WYPOz>V$rE1 zmA$GfRjAyJp_WcV5uG@xhm{{^0YZ|ir4_koxZNmTHH(zS0BfC@M{Q+A-fs$>#X$Wtofv&p~aD* zo~m}~X?^V=2#@XJ{iKSuV&@-Iwthj$-&xj@pod5J-qY^qQUsnqR>&@-lJZO8k?V@2G1aZeLhO>L`>+)$GvzOR zdFGeQQxC2f0Q09yET|?=Naejtfl&BmSJi$p?(-L9SBuCq*idmwGA-eG(E{GGiV~|R zLT{BU_Uidb?jKba7B6DJ616rdufZ5gf6Qtt>YCG|WE-X5)Pk{kE$CLbvJ*5fe?i>T z5{;G~_fw5E5Hte8%ugqN%IP!5ESi&JSP_g{%&8yFJYTeW4ke@tD);Bx3Nn3T)!NE{ ziC*K0b+BPink9qxKS;7b#Sr((P%$A%i*}X`dBJNT9MAV6Bfp1deregACs+C=kpx|6 zC?HAYA40kxNX*0KO&Br-ykGX!fUlneL4sJ$JMTGn!t5xFGvq9?dQAVJ?N*tZr24;W z{?TTf?f%BQ2+3FpM22U#PF$_*P^vg-SJxl&y*9K|dJ0x3om$Y(7%v)8!^|baKWWrtX~uGEGG}t%C4iLsCyZ7uv3f zJ)*J!uk_6OKShX0qp=MMz~?#&B{n6cm#gGCr> z$Qs(DGF+Dk!^=I4xD5+;pSRMJ0+!1`P`>0knOPiP<`INIk_!ME#I)`~pI?DV`ECA7 z<#jpBa#DhRefEU*$KTo#n5jVRR2gpB7lk=t+IF&BTZJw$MtiIAco$L65o-A0Ux%E) zz|wO``9f3ExAROe$R0ustzFz#bJbw6-usTA`NggRjTREULdq`=Rw^kHM^`2kZD`^- zlBunirhu%>3Gs@AK*yvMYKT_ivS2-4A!bp6rLL)$F*ht4#gfLyPnb z2b36sr@?|H!Y1S+^MEC6C8ca_4c&^I} zE1?OLo?%kJ{LTKs^}Mz(2*p`?I))FEnX|C;^KYx^inf^U{Iq5Z_DO0fDCNLz;7N5! zXTNf3!&aKlHBSSSAHGk%2(bO)X+m&x2Qpn$YC!29fqfC)oDQXsx<2Jd2~7f3VfS2y zVD%hGJ1W8+%O!=HC^{ERhTdxP%r2P{l?=SI@;eHr9pED&(=1b6wHzytr=1H_>;9s( zM}oR<8i^wRB`hi?@R#YgdJ;Gtu*iH=$_Q&R;rlcZbUBV8_|knt?+$HiD5(J)M1%aq z>w%_YP4LOGt2tWt6N>n;b!;JXOrbYe)=M9T9U#rSe|?fsQMu_4?zv=v%MzRaT9be= z5rhN~)bxr_Z(Se+tQjU;o%GDpTzDHZExXS!0w%AE@De=R%o4A>JMR=Sds=i0$@ZrV z;{qCV>qq^J+!C?K!#s#4+QUlG`7XjZPORULDj5zS$#(&k$ONd%Gk{7Y7S6YaLdaN( zK_elUca!w`rM}|ZGf||WC!zmD0guCs2_LRidZA2qiAj;K=89PkJ@ABb-_py(#w0IR zOx1@Z%)f{yyLCM%Q<%DG?r(Zx7E6f79m8h`w#950f9M#a@Jpyd=$((58b zBB7mV2_w@Qa{WaKTwWceKza8rMZF|&lv$6WFrcH>*U-$`@Yvc;T;&I03@NDJfO~-? zKIPxGuG6?V`}5{#mauG++*_;rKghKH<5Qey@QP$*bM~qc7})Q5P;4_RIydE5q!_P~ zTN2#se{IaV7yPQLTI+>(a8>tEuOAbG)DkR^zv%WSbf08tu18gx-~MBA4Xjvs;c5D@ zmR-E%IF!^vUCc7~9|HB8soACksv_PTvg<^7o&d7wA%FPmK6c~1VmgP#+Hf;UQ2gHQSnCjxUNoBpHkcBI&pP|xapiuJ?zC_@b zo~7uyR5LCJ#1wS)np1=@LOjmHmYB;`;Qg_ct#ISVpVD@3bJ=*oA(DRy@-G(HA9qj5cj zMYx&8)?xt;Le`6SF=iiyL_F@J7f=4D+|F4S-}ud8;&=MOtAfcbe^*R3sAj>z3;c%# z7_rj>ll`kFhIuA#S*mpI0V?~Ofx#VUKO_?tHw^5b*Y25Mo+ne6)^3HTJmO?mTKZ1a zB6(#?hQm~>zb_ZvRmu&~BYgc7BCBrwZ|Q;wv^&hm&tAbOQ6pdAKuJRU8ED_%@4la% zwO3VmC#V-`p}fCkAO;y?7*R#`N%nQTRcOExiuld)X0_+YD!tFD2|6k@6Dy9dB5{F@ za@R@_`N&=N zdHF9zC7Gt;?1xO1UXyKk41{exzJ%I$%W(I+pqo%!w;cs3uzK4}f)x(`Tz zfkf)X16O8Op2eiXBlosqQ875|#Bj-M%GqCwbbBjwMRbKha+fm0FYQ(Kb*v1vj4p*d)#oRfN1f+9V65t=7 zm_1j*DhB909j1SPfz};jV3Er*3k{irzlE7?Luz@8g~e)8 z1N04(ULqv0)lmw`!3OGoO1q#d1$>Tz_eThcUjK|Um4uVST!U%aEeg>ZV9h=<@FaOq zwRFmj#K=l6DkXQ2EPbEKk!}rP)-TDDu7siq{w+>~Z7k-aML<1=H5NngIT27R9EJ0AwuJb--fq@B{A7#WN?;=c@&hi9=jBA$s^??Gt$JRdwlN4*I!HLjSCAV2 zuZ7E?_|q-uJBe~^YAiCw{$?OM5f(9DU4e2U9O4mUS#7QTG+SqEmJG#$faH1u-J2{K zK#?SpAx}3``rc+%861MxmT3evNM?hEsH3lmT7OYR_4BlaUlCzlpPYkLXvjwv`7~d^ zk!P$a$Z7@gt~_kSWpGsRjySG+R~5yP*DI;B1S;P!gvJ+zt2o^aIRB?_O!eB|chbCE zgt%L3_Z3C@s!^W5ec^FKy=dY6^=ZSQ5V>{R>pX!cdebLrD!iJ%O25~Sot?dP+X*9$ zXioIYN`xV+EfNEej-k#7|6nW2D@GcI5#4u8(_~v$ZPq}C?5e5~f7%%mm8`rX0)+O} z=-`dirpRQ-FH(HejtLxQ-lb|ksAh}4;mpES5v^KMc;e(sScBnpe}L4Ahh((ttB3Tj zdT0%CVF`bj-5%!_%(G0;v|<#u=%z{}Qvy_;9(jGEG~;RSA15Bze|6Wgj#vBl+$*UpMvwA5vi3b z0!@Cx$>{zK(kq7|>wu!}`DEzAS6nblH{b+lJDcShTmu*VP#8*Rm00R z?3hSYg0bf;#Mr>w6IWV-lllIPNs9R(lm0)zWbIzX4DsOAFY9)H*%&eEJqHCa;x!h z723ed`D5q}NCfj!QHr*4xVO8xNa_W*&7XK+;hJ>Pg$DHw#UZ$(-6&OyYJ$d=C>UUj|Clzv>g#9Bi$ z#N~WM1>I93F};wZV;#22%WDl``yNifPst4UiwPFcnUHlDZdZoLw!igwU%d}ZFh+&Y z2NW%qGX&6H{b~{^m>!J?Y*>B-HUq!y@()68FYfK%~j*uiDKOQ3+2-{EE^y zk5HNgr4rF8;|(w9_vi^^q3q+a%#s3na#1aXqSm2F7ZeiQcNcE4K+C?`XY%(2)dz+2LkL8O@u*a^~(w&EgfSCLISoc*8gFIT|kp z4^ds-Y)~t&vJQA=4Y%j$@?!ou>I(bRT~~yOi#KlQHS_B*X2cKtcg;u?Ee!DA8>zvB zWy67OBp-7`QTo4Fq$s*R)GX23Sp+9`{4uhX^VkKfsH9=$y5Vd^sbDAt0$5*rP?0Yd zyhR-U{=LCS27W?FNp40K#{Jz7cLJfCVnl3^T8zu0t*O36RCWki97KSycUgcse3r2s z0&^z?tIhK4O7e#82;>gGzY$;BFFF{$6xt0`MM9M07^RL!p77^JJcK(x$LoMWb9VCzsd z3d;k0i;TFibzm#9OV9AmbMvnkIXxw^YW zGX8jbd3bopIHI^_1}(bqCe$ffih6por~=3VTY=9b+L#J-Uj2s2ge$}1GtRExn#dc# zba-&<1za+2XOjXtB;6HHRTRzqIlWI4$xoE7^tzXES-2S9W+72B*-qQpF4{;SmC6#U zjC>hZE(w6-tf7tvEpuDzs{J=QOMz!pn9zZ1(v*mkBfnLVn-Z zrj8N{IqbrUDwp6r{9mrq?ZzX~ilGbv0CuFUhjOJh2gMGQfK;aRlanj&t8Vw5?kC^d zHOeS#hH>tTI--Cg-+%X~3$0g?JOu(bb&g0+co>3%R#Ub+ zKJT)svZ}VS^bwFmVHy9L)%Cmo&QFxc1J zQS{rz+xEvNQy6Zv>!8qJpV>@zwHuGd7W2q{i5->NdYqbyTKfz8`Ozvn z;Ww-#cy4d;GMg{_@99c@9YYtb;pri|)~@|#gVoh`c(XjvY?mvT!I7(|?fj{2d}s2Wc6Rk0+fft>@%a$&}CnB3Tw_ zT5aCO={gsg{!k8isj6*#uJjHU503&*uoSr9&3##&=bdNt9Ui$i?AleYtQrowy&p!f z>|jJ%=~BdsiYP6CRD_GCu2;y_@i&^spUjI~9HvP_R3pUXPM!;d*ThbvW zaOgko|HyxDJKi>qLHTH6sb+(DUOwFdzM*1>nL$1TdnZmnAu+Sj3rnAGB z+0kFc^bm8QMXNIF=zK5N#zl_2D_9r}mPw@|IO~H;>UyGwAUqsKt*%3}Ea#sd--kv~ z=>ql3Q%+Qs6wN5nXW%(@Y#NKB(ui06f#7}!k9GeDCUF@WH%x$m3ssfV`jUnsJi_6uXVAZ{!K+C%jd&zfy27!t@%WlM-8ECk98Ro73I zXbX;L8M~vquHD*r%BEd6jT)4~bu}=xfWw-`!hh7XZ6c#P3XE*VF)ZP1GqQyOv3>*C z;1McmGO9w3q{!NPRpHr2YM3yVA`=lIzOg6uL=Za#P^`{7!;qrI8pOW82+-F z$_N7P0d%zj-vA^3XOIQ(TP**P#t@lHV*!*C;*CUF#tB!MtlYm`yPgaHybD>M={H2C z18gk_Oo8X8(odA%G)l$A@J6#rE}BfHq7dVAMwdP>p*RVEt^Rt$7lfhYbPkVzq`3%0 z@GzspIYDU}g-qKt3L7P#pa3osUj&5urer+uyX#>WOwmoz0m2(BJOw?a%!a@H!7z#> zNQ5wD7MohZsZE0)&D|Dtf6LI`n0#j4f#NDRhxd#LFn@?xLUv|eu@g+;6NoeiP9?c1 z${O76o}bXY~oz-m$X zoxYzO(o(^^0{QMKvh2N{a_vWG`{q=SN1V)Pz)T#*bzY*9^xAt#{6mvSn&bNR4r&1^T8O zYDh@^qXLFl=n&)L9`kFBl>9hs%5Sp!<+5z|p5t2oKr(aQd9*y_W_cT}On&?AG=1Mg zHunBt0rNT2>yCUH3Upz|7Yu`nhQVcR-s`bJ_S3|od{`8O(ZxgpX?hjIT+utxFpe)S zD%^bidJG{EnYU(CfU-aM-EfSt{bYaR>e<%7CrVa2V83G0w8o#D+%Arhps<-E7DNm2~r#4W^#)CXqQ;D1(>}c8@f-mRA=C z5S9>!R-OWLwx+)q)cuyEu#&HGKZ{Af9|~N1?4<2eou5{aS7mZg8G6Ii#a^`8C>$+cFs!}`;FI?!Zac<mIxU2m-K;|S=GTVURkz?f+vh+68afCeVUOORP`6yUWO4t&zd4@OBE z%**p!Sp-CAI21@2^d$q)1>vGAfkG7!3pCfT^{DVE6{mQM*VMiIf38q}1^{(s&LU8S zCj-lsR05Puh#!UM#*6bym0*zxu3I}l-p;b@1F3R^?OpPtf#}Id`w3Phpf+X|Y*V3p zOvsWl2HMVp23m&h10!Sv!G-*+SwRw0^jRrgPC{>+{lIVnm?v=D1b{5MdK;UT`7+WV zPGx@q!)yR1EQM3LSwtw+z4Byk2n-!Py)5oXBxUaF$G@xoXd+^E{68^{40NRliID2~ zbm)0NN0Yrx+r=_ezJFaT*+Kin`4}N20P%|gXY^K@CK(Zzk*-(^I3?IH+nC<{iptsy zg#?*CP9f>|0JEI^4B}{TEA1Rxucr(5$2E7tA%l)ikN$z%3K|cBbXMis{w}uoA$YX6 zHb%@mfC3CM)vF{T@mJ&cDz}AmYJ^sqhbE}&I8wijmMQMcV^WLvjkF5p2KsiAfQsCl`w>p(K2m4F}9szm%lgzurVLAp;n zpI~QY>Fv(+y79fL220S_VEAF4tHiMFeqbBWFyn^=6AkGf7KmX%K+e8jbDBBUHSXE3 zJ~heH0SMP|xhbw=>?V6(GIis@_0#az(*=vRm^0|`cb z3?(n8l({*6>sPe%2TK8fvGC+EB)kQdsG2!tD~t4aDzrGy*zpjvm3tiqZCyRLr6r8- z{%?x1th0#1Rj(j25!6L<;Bs_r4*}2PSVaRsrcr^K8K0{JjD?40WNC`~#J&XmeaaNy znVVVq!DRu{4fp^GL=fI+%F4`Q0xcC6cShrda%m7Nz=ZBbD62UyYJ;l;Dud8R*DES| z`LB1gu6ndnM|;TqGd_?#|BCem+b(lf!Fou9xN*GKjwq zg6$H(-)5}$_&)MF+>_(zc}%IQ>%E0Lh7)*=c@2iMx5+v4h1VpO1C*fh1nRh|ewZ11 zUp3?Gms2^az7;EfZ8}qei8VZESWC8rs*#uDX@6+?p?PJpyC@iP*0>h^2aR=p(2DFm zUWh5s$Wrv{hYI0U52Tw|A(?M5R_aXZDG&8H@N$w0#HRfR&W z8gT;hsVULVor5KGxPkst)A2TsIKzK+t^b|F<@t50W+Qn*!LuwIR&o|W3v3`hydQM{ zme50iEe4+|EI=8w-dWfrg20%Oeta=Jk9hx7`bQwDXef6pO@_TVg$-+Gg!ORtFH*kM*!Oj}x zY^j~sQhsuqHaV9as2XBd*-~+qyp_I??-J0QnMh3KWCR(-WfS02)B;mQw)`)X3lSk3 z^KQxNsI)1*;sOwC`%k?*@6$Y&Av=hz8F~dFFLbaMVEh>3xTb4QO*RP04T zfivLkSCxJ7`P}Y)T6M1FeOj+8G0Ha!#C1OlyJZsCDe$nBd=j3u$FCdzhL4L&^1%XNF_Y3NCR zqUV*F57f~H^Q=!OGHAW5Xs&3aWL>Acgu{xIBo~8i%r_W0DNm9$OCAS!s_WW2Y<(8)Ho2sOuE|wa`^Jszsb zBM(zm)(d1KO2|Xr+iKkwGmg(`9KtOulx^L&QnGBDm1NGMk*CxQ(b}7rW{8opX-A+( zh{P^H7aQmQJasF|_hv4*VwCejNjN-pDYu*heN&vIX`QNYp|RB}Ls2)MM6+&b`YGR< zSr!``xQ5xFl9rq@rwdkrgS`7ag%Ut*G4j~gbC|Z!E3!QwB3e7q0N8B>g=m^(n7WbJ zW9`Z#z!uRolf@ zUj;kZA~4qZct)NwX>m#7MF>17AfKM&yG{F+o5LVeQwKPE@NbMM|2)2Yiap+fOz*2+ z)ZcmC2E0QJ*${{*kRh51LMw#X&+>|7FJ}ZNa0-@92#hLR6;EAnCy=O*A%n12$zoU? zzha3uyRc%A@$K6jg*k$a@{QUDE%?^M!f=hYP6`ki4~#@_LS8#M)^>am|D{f`C52+r zL@+3X4~mWY?uFtw9JSYdXPXJUPP80^r(YG(h9mUZ&8L_VW&M5Y;07MD%ST^b`$yS= z3&jPZz+a-aZop^b;a!9VdZw|c>-K&@?)_je?&$5>SXlRcUo`6YgYD@MNB_tDZg8Q^ zx8RLI#F*K@^y&A1J69K+uYKc*0Ga7BLZd@E#ikoe})TVlgJ=eFl@dcF9pl>{1@l0!dS9p~?6rn5P(9(gV|y*h1St60mLx~f{#Z9fCwpSIm|r&=~4Sg;Olkx39G z6Sxc&1)m#d__iEJvn>a%xH1E8$k$+)`!gkS-S0GKw3+OucZttEzf)W*bTt`3E`@Pp z5*Q@Pml>1L(rX78la^p%VoBBZ4Re_eMu0I!gBSf!T}1qZW{YmXn%x&^YQb6`sp4*v z5v+nc0XNfwW8;2N_PaaZ$#Jqm^J2mjau@&CA&BCF92iddfq`MlgtnDtcrGs8*yKxp z<%yPY)ciUh!~pML%*v|?fi8;3Bw--^D#24Cmd*n&ejjFC(~rJc?!~b6OAzgSSYmFS zUS2<0S&rucG{Mk&RB^MS$NSN7gF&I=iRHJVx3Qjb+9{*XHl4G0qk9%=5-IvlV^&=diY~Jr%xWzGyL=$0(TUZLy~f zECinA8J(zT)bUC{!XH>j8{F|=@i1rP zDh~o=Xt(7)?0+h&zev9wD}#p16$YzbriYOUv-t0$t|~$X?XDvEUT}$8%Mv+8SIRZf z35oSNVJQ@93eYHj0N;LG5!*k}+Va98_&GBf0Rh+qxRe4S3W#$-Op0l!c#eW!ZJOcp zT8}`S_FtDlPL6^5F66C|&Q7S28u!AXvpq&bDqJdMH#Odu!|<76Py(Ztn8T*?y9r48 z>>rsFE5#oTA4_@HKBM6X%X^MQ{qw_74kD_dAJu%TdesouNiKslvJ|TIq3NswR01w# z7bcQkmcH-gIQmeAnCcPSZR#h0a9p2e-2h`QK#VIZuP0Vj0s(bGanW_ghif6EaTu#Ewpu3(La8wEOcAiw z<FsjffdNsgg712ltk-w&35#^_rcJrb$P$=I6Iy63W@rcP|)>_7KMA z;-_Rkjm}20Y(Nskb>pp#X2{z9@ADxs#FSga^Q7S=__46(Cv0C31jT$iD7pBNieC_qua1@gNLIxG_7$Z9>0i?D?z~`ke`5 zs{ANL3QjDMiKB)S9s*H`+H*nQ1z)rSlKlMMPeO1~i3RnA$=Ck$L6k#av@d01;|&F= z0~EPnM~W~ihJr)m`I!ZJ2($@u{TWFcG;GW^f0jGcj2IXqhCMe*Y>R2yY%1KJku3Wy zyUvULzgf0F_gk{;{GO&-Z`Z0&^rioDLg4S6B-zU@h<-Q*8W2k`l1cq?g(>1cElNTQ zXu@QN#SuV*Xe44F2f#mSpl~N&=J3dUw;7j$g7cXXm|*k$eTyZX#*jZU2f~`V%(JE@ zI+@7KNsdCpR|1D-MPza{${2#{eyH_-s61tVi7JO**dLl}UV>;4eyW7Qy6t407ILLN>E(-SMOh69p21x z3i3~XgEja6qVxEYqYWG|lol84Stm*KLIN`&P zaokicKVdskLyn45K7LazBsvvAfGtMCkb~p#tgYPSEt8Q*(O!f=bm}-#+UjFshFWsw zc5}0;>Ts6+SS2DeKm5;n{73P6kQny2n*DY%JbS{FiMb!^T{Y*n z&wVb&%#S~l$YIE|l4Sn%$2l>yXXCQ#2p|;^S}k@BUC9zzMlodu|66-M+UZ%E$s)RJ zp8`9a2o5yl*+iy1^F#~ddigGmSv47x5NWpewmeqz%{t z^fs!k={Rqcf?MPu(FZlv`6ml71s1i4>b{$=JuYJso`n+0!hHA(+%VPTRk&(f{tHbF zYe%l(C_L{)80rU&i*Sy{L~b~aMP`O};Gc_#W8Nh%-Fm4KDn&|`6XP80{P%JGA5MQ) zKi>bXsHtr^{gI8rrXRQu(|4EJ0{9I2UPLfZVI6=C3Now^^8KpvaBo`i8Vu{NrF7KN z-h5xJLBdS*3Jxw_ivKI_-9yuHH?0y_o&%Xe5A)dq((%7FYAWd7i zAjPLh8wDDsTAHZ?g$|4FuTvKyV3(L&#+CG^%yj)GL5t5(tDjY4Z(RX)jj=>nNDy$H zOWb`$T&bxk9emvEqCY#HDuAVS2|H(cUQc|!yVKD%)g;WwHNfS|{iOJmj?;;67!5RB-c|RhI zlzj&uFltVi2NDNg`SK_Up*fVk;mn{;z#Zzy7c*hf0C zC&9DCj`s7*PDd!mQ7bUyK)1QKeSP42S^Y5DAKYY($H|PHZQqq_hekn65wWySe#kr2 zXW}{NQ)my{sJJ0TlEj}C~4bV9S zWq0&XHg|g|-ih4kAg;r2oJDq)NDtQp3(+s4jI1*E(p6Ity5ERSxyGcpbpV1u#uYLy zNd#!=;wR;O`J7OJbG)PMvcZoWffPk9W%2ArzBdr0CJK?-3s_kZ6YR+Ts28D zwnS<ezdW4Ru=kPZ&hbZa!R zd7TapPKh--gas+0WPlzh-2qzx$R&IVS^eNmId*+HoeC$JLEJ!F{H}Y@1j@F#zLqp7 zgtG$+>ie*#h}#gh45djlgz&bnJQb4 zGoUbWF)^I%B~%UdxZH*vGD%S6Tw#|7u+ut^9j=7?PQV@953mzV+$Wv)8<`nryig7B&aVd5oE@eI!D06JE>v83wMKi zMQ4paW`v7ph_pkAEV0V<{rGLBZKIl*SM0jbG)qdGaP6~>6`^C|bJ_!Bsg#|{IDDDB zl`%GESV*5LnvfDF^wyh#FB!{rr#!D6T4So$eCsJF{S#@J~ z@u}VZ4U=yV$_mW`gF*7Qpi^0w{P>NtaM}K^R-s{6P{#9Q*&?m(htLLk*)e$eimHv?t_UKv4r>)NV^gpyA`?22&V=C}sy8_ONr#-@A6bshe?9-qeLsau z%5PF6XL9d1cB*>!U}9!U1Q7%ZyujmBvv*+Ze4*+wxkqEY`F;n#rH3!GZtCLv= zm~2ATu#g|!4+er@&1&+etodhNtyt$wsq2x{@pCDk8D%k$b}ba|-q zUQP=zfuO-R$)QnHYk^2~`_61?v|FL(g;^1_Z$6B4+|UFpY@FPKF~rw-9p)EEUd>k( zDB{Y;Yz}`L(2G0emr!}iAnjA*?C_W`#UDX$#O28(1I4QGXThU;iBjIbVs@uwBg0ED zKR`xUDpm_;oWDpW-qhKN%Am&C2t zN}52h?WAQdH)qP)=2dv_;B#Kv)hPSzy35A0X+!^sMPt~AOuvCDQ`zZX;{f{? zdkIkVTZe4xXR4~dSR4l2u8whTodh$f^2Tp<8zR{*7Ivvh`Zh7NmugJV=Tj6X^x(E`EEnS=t7&TBc9Zd0 zXzwze#cUqn($efndRp)Fy4$~jf_)F%Cq%*PKk#qYrP$=7RGTeyV*^;pbx6qu$a;7X z029}V#WazeNnI#q$jK|H(JZQU8vOqNLCS03zPP51o3gf(=F1wd_3I|l(_Xx;w$o+; z-kSD1e5~Sp;WT7oNMVE(o&$9YIyG~2SJ@-y3+wQ%{lac zhmRHKuMV)9LULvC4~7d#N)S}fyKiqdg19HkS%U8mUdi8d*jAbEXLO5S{`-xa>Lupx z8Rq42`Ln4owR(@dR63!TXgL-jVA5NBtM1bA=R25f9=X{e+vb=)qgYVB9Loj-6dx!q zazdkzK3yT$>#tTrSbZZQakF0}EHi0@^*n{ascUTrG;#RT)(Ag8hd(3=)O`o>$^@z3|dpvDVhNK9`bLOJPlhcILDou{0 zE{=@MmaPR%1!(q=E;aW4Zq)qHOH)dxfR8{981Le!8} zHyppdXD8T{;=Oi|AN~G$VE=B^5V;lAlzs_s=u1z)0U8{nyGE^Xs5BJ(mtwBF7{%vX z60c*koT_`caoVNGid58BTtHN>&thm(PENs#ucw8*$%dPSq>r}VL`;5lXe@u*FIqVs zrnXHN9{Z+~=L;v``c&BZ#|}DkWJiy{kI6^UX24TFyZA?nVa*TP(h`%cL;`sfj93A~dS7J>{^o!Z+)-nC%idPr`kSYT1 zf~ETNoRsP7x2uWsYW8Zya=I2Jxdrln?}}v!52K(Z2o9Ksz>Ys6njOLGr&M;tk=#h6 z7=-GC>WU}rm++q2zGcO!rAVw+sBVA$H^UH0Goi$}>rZ)i6ku}3LL_Jg2C8nW3}>)N zB+pusM~Z>2ZZtp(TU>E|--c#6B;=}2?r_kdx(LZM*k<+{-8Ie+TU0!hoa3S5&v_=z zw&-{B_-_TZLYM}VoWq+6eCa|~&<(4YGDH!QLeR)_@XOZ_nrXQu2)+uDAgao9vq=*# z6VK7L&Owtz|80grlD*8HHR*q%L!0luTZfWRKkmuxdiFzzX!{6>r2uMlChB>=?r?d- zv9%8hv}%#g8f1M(life2%{Iwc#Hn;}CjKU6fi^k~mzMls5F)KUD79wv)(}h*4|l~Z zka%&wU3z}deC1zi@l_Y1Qj-db0>pQQj>)g0x6KUR3QBF9)!edN`%Pz$_oBqNW zz$?W<&hrj0K>-OL;2!#J_@1^g@Vw>yWg17Xz_%=EM#2IoRp!m&94xc69&tRvlPZMM z_D9os4_o!Hj{+#Ps?xw`c+hT!SE*U%AsFwCmPo*o7!A^&8yXOKKq#KjU<51w!uAjV zn05no+2;LPv_o}nzESwz3>!)~P)yG5Inidaf-mT#9T8iinX=tHQ{8+5nRDktx6#&m z{}uRARNP%@1I{tKZ4aiVrS$>+wH{TUcHVLsVt3M`7ruB#a-pAXDaKmk6TeBe7XsaB zsmt?;-;^_?aTtP@00VTDo1f{4-0Ho3#q*n58J-IwD8GeEhfm%V-u{DfQt~$0U0g6z z28qoOj-KL@qN`RAG&&s)rfrP>(=+-F9-+UYH!st~X70xOf}EP`Gni49>yDAll0C?{ zjZ=c>5ag+T!*-4j%!y@ZTr~DSLrG(?8@nNGyiv9u{jT+NBFVN!;V09mg%rE8a%Z~8 z^Hu~6($|dTuL2IJX4aGoMqVZteqS5H_-Um5jk71A0&A7PF-R4S*z? zxUc}t)UH*Z&#w?f{8sGwsXh1f1~>Ls2djU?W{JB`0;s>gWqbr*@y zJ8ZU$pT(lWABdgD(^dNt`8N96(1u@hnv#p_z<`1|k$T-cShVh8Mpjnr=`Tro=T(#;Kklp8`sLu%)gZ;n=6UNk(y zu=5?@!)9ugBGOS$+q&9YM|V|rQfc2>mq{W=HqG#qpQ%e-EA+fl=Z8&+zdlI_Hu$_< z7kqCtpPx`(cGgE#BVb44m95$ADsM~F)@So&{pCyBkVhiiHOmzdh3=CefRRu5t|wo} zSWG(o!s_DUoWS*O_w!BL;mp`8c$SkS@2zv*+Kt@!kFq>dd*&^Vzm!9@b_`T?5YH{i zA%*56T2w2dXZ?W4T-MNeGVrmE-f8_dwvp2>d255EwAs>vP@LE2E!5K>0C~k_>(`?d zEe&; zey5>7>cN6`)67Zff#q`owr-eQiO!6pl@)>6qC;i3%t=(TAuzo|THY)6DEN!l%J-m)ks+@&AJj}O zjPJZ^Q|Gj0iH=uzsqRD|@Gq`3>pINc{OFBh49Y7}bb=&pB!xCx>fBoXiC?pXw zfBGt(kjY~DgWqYn9*;pGGL=NN`(t28VeR>$J-b*G!(1rnje0FD5aa?}) z)UT~Uf7)Y#8a!MY$S*s z8U_9y07EUzbglT|P+y`OD?p;pqFmXOdgc#B+HwDj>AULK~^SFN`YZZb)C~ z!2Zh~1_pj0)@mR$Hfg%d44nzlI^!T7!YnU_esot@dOmE*axH3_VHzGKrt!HRC7cu z5HB9t*x9)|&wAl`sn1Cvvgp6&?OeZq)7eCM75nusLLGL9-;jEiFiE&aXFfORJfI1R z|E8@cLq}huq<@W;=zd?LS)*LaMlhl*3Xwtw^IUnM8Oa&#eZ>aql|y0s-4}y{iqziu zUR@F_*iJTrdA?WEB^frpt3$oU={N2LEUvis8|j{Yft%gZ(DS0Evw~C@ z3jPk{yTB%|=t*#3K(03ooed8|*HJ}kgz)ScePJ4dSxfR`V#>qU-h;Tb`sHwK0s0?V zL{U~`zM1N2%-TdF-|D6FXoqd;eprzg<`yKh3-kfa2IK$rdm(H3Vfcai(Jl*edWfB9 z<@VMT+%k4TG$F7cLK0bhp$B%|XUmWYD8CFq=lM*0nWqjAu@GyddUr7$ zL2V!6x?`tec6EsOk%g3c%UeEfKVF(BB!WTN)_rYx`S9H(F)c@ZxNE}p=LTdo$%k7% ztd{JfmM&))3_hWn$ z=PG9a3^YDXBAV&U_U2FWKP`yfTYj~oXKec3v(=n93F*&{EsvH&gApRjiXaQOrZZU5 zezPX6#!!EKxsAA!x25+>QGrU=ma&+pnRpFMeb_Lq3?B z7o4zH*q^jt&nx)43AZQ-WY-tUeo4RZz($hi@0S;T+xhi<&FNc_$;hoENxt*xf%-x8 zNDRBrXibnHLI{I}A$Or-e}O8)Hyoj%lMIlnF>0|( zq~90%RF8l=^ReF<(~rk*UCuwG#Lg_bnH>;GV}yD(QnhTyaI4$jw1dQtDZ6{5+%&n$ zbLXetAJ58&bSzLbOsAWP#E(ESwcCA}y|(U2#P|>H)r3|+1G}`(o5JJtJscG>6Zkkw zotGzFKbU(vQ1vIa*_6U*IDho`<+nK%muZnO7LvIMq~pQEE%KqMsm?0pCE_cH zIlhyMqcf067%@zi=n0BtEiFeF#}QL?b?MKO$yUNqWSB67neC2e`2G7OT4%XG{j=Uqw`9gO*P-9*9IqRXOCWnZ8r(Y9SgEZ1t<@Y| zomHj8rR}dS7pj5)7eQeHt_IjwPbV#tUD7rT5X2SPRU+Vd@j1Js+WCH0#>61{Lm@@O z0sb6j7ZvOMrAo?}dhfP;<~KRM7cogB>P;ELo=e-G7!d0*Viz8NQa&ay6;(fFR&+_t z+9FYu9GReo|H-iWcTyq#&G;4Pw?UJ}Tzj(#5;C9|6s0h!Ct28xfYW-|WRcBX&rwbL zVARS>a78rR8!l&8xjLb4GX#$BZKnz@Thb;hVF^#$rR%e)XLNN`G_YOaG)IHB1X@NsCL8U4dcjY)VK&!UYB#>O`n1ooS?I2 z?{!Lk81Na`r!%4i>J|8f=a9NkNUOY4`(y2R_m6gV4ZSNqRHagVar zPFU&%shgN|JSR^cTgJgJ`J}D1Grbc+)0fCGBqjk9VVxbk_qSWU{;$VgO{?zO6>la= zNe?BR$@&{xTOV*pOd|1NC`De=fO@1EvoKAn8S%T#&`+v4Y~Nw@ojsoB1}Xj$Uc=@Q z$8}>|@p}g8*!>GJ01YHrE7Ey*zR%-%_;DG`s>^9U@)fT{l{BUONa%BB6V1DeMazW0 zv0<7c0`O2bZ_x30<(7|zfkKjs7_Lsk4mT%e@r3N-Sy+4l!kj-CAzxlB*uUJL^?7YP zr!{SmMuW7KNJ>hzJ5 zo-2HiZ=y9kd{$jDAT&-$P6Ln3`zn>d+vhSc)8ojg>F?#e`0eT?pr}DrXf|IJB?88p&MuDM-+AFpd8^x4|+L8th|7p94?6D1^;6g;pHbD!sZ` zasUni4=B;;As#3se(0z)w@+5D|Zs8Tv5KYjy z65*6bmLNr#5c6gk`k9x23;d~lr~H&9cH z%eNQFp%(K#W6FxvciDGU2S|yA+G%2CDsw$o&>9OKGT_R40?Gp}wlJ&alK_ZT4CR^^ z5k=+L-O*G!MOj)O@$yZ1htru2m<+m{5!5yr=hf*9m#AURb^Qbj(?1_s&J!fZbAjQR48_Dal?Xz$(h#G_fEw~LIa)s6~+ImP?;P9 zj&JdZ8t6X8I?qGtG_l!wY(wtEO5M^CV_8^VmjP)$W-DzhQ0aB-S+B4L9d(jgAu=3m z`GKDxPk=gAQkbpg+3j zOpm$gM|~{M0);p?lI%-K2|mesJqOds##L@JgnrwuVi8Y`9V`-IYjj067x}C68F)dL zq1Q|>`24q+cLE`rLnfQlDg-soBa$74*Y|fj(%H=dC7iSa)ArS2zike$xH|!O#YVK8 zKAyt_{fGVfgOE+77B!VtiqSP$&H+yW1f-;4-UKklXvrCMV$cQ?U~jU_{6wz$H#UXh zRPq~3_fG)GgLe!3D{v)RGQakot+)OI&LqG9IcPupWapqLizOs;wp89&0#!I1!iSf@ z03)*^$v2jJXfzN;h?pj|N3h>fMs~$g+xf`^FgXS$Mq=i}4GSGRL`MNkYl=Ai96t zKr{4GOGpQ<7=?65br)*FN#CUg>Jmev!EKT2JP7y<+~G8TV)h1$`T;4Zj3}C&i_ud{ z+YD^5iCS9}4?ILYgdK)Syz38ia8o^sFhV>)Fqhqg;*JdC%n*cIy);cJDYl(D)gv>8>ydOof zyvc45v`ze8%B6SF1X?9;lF9ilS%=PafL;bl*g9|Q%S8RZUhQMPr3DsZel9ei|MlJ? zmi;NTUS70Spb%f#4Cc{+=XRH&ttDzGca2WC@U@oot-ac+4fa_QwIaK6f|B}sZlZz+)}Z^~QWKGwD@954*WL@%zzfhKHnc{J~hqWhBfRL_#4JfKb8_-L@tgiY7 zKawse!MC(-?5pX&uGbehvvnqE8NQEXOv^0$psIb@al3Xqn;>~}GW$4O6o0tUCAr2x zw9Kr`eIh$^3)8HzUF;81&ld7z^FIlEetdmc=;GomQL{8=3PqFlVR@9K4n*2nwoWtg zRz#9V?}gpXX=^|Jn!%JP)uluf4hnNa@}igaq5>Pq+ui2pE+T`=`?I)@GH?^d^+M7IFYY|{tJXoC1W zc2z%mbwf}aH_5hIC0|!PCMG6~99#Y>P`HGB$%Jt_+rIJrWjkG%_NjEu4;~!F;z@x_ zCbE{>{2G#R-GPtCdOA8^r9PTfXn{6XPJIoDc z1&_$wT&bzw(@IrFmCwpRUEe;EEA|J8Z?m%*)PodrY3O02V>R7>w)*DFfNQoEOBl>K zN}uRzs;iWlwOJ;F4lHrDC%XV?-iWN3(E#Q^xa%mXxxKKR(h0e#R0SGdQDJ{Z2|Mk2 z4J<~urNt~erBoW}XnXB^OzJ4CqMT4-Og5XKaBhao_S2rihx#v3_QE@{%ws&$!J`)# z^~iuOzEdDevNCb88SJf~K;cuEH4cpks4Sli-?dEQ@gKi&ofAb!n6|UnEqU2#`z3bu zcMOptpAtqSo?G4_%Fs%Xddu_8%Jv7gR5l_jX$x`&dTj%1YwK(d6a}#yb(e-ey5JAC zd(}l>5T2)5@mlyg_F6SvaT|(rNd4}%BDWOW0YgDg+cdV!_Ny+Z0j#pPzE}7$XA?ow z-Uw5YuVMovxJgR2{es)f-6N=fE8zuvMTN@DRcR;xq;RNqHD?v0V66~f3{3+yMb=yJ z%c~SSfBuTiRBk{&o}!eX1xLZZ9{nRYS3W@U=?VT}!PwZfz!Ia1hOr8my4W0;8RiK2 znDUuK-HYy`vVWKO|C5QFA}(k$n;YaulJJ48Du zel=Pn$&jPRyr(hGXD3fjCpERkmW*d{_0aDg)jGgqMVL$H!$a7oyxPfbX{+bqqPeZ8 z?4Xx>?K1VanO`l$HN#B=HzUlsV7NEsBJGFUkM{PuA3uJ0ovv$0-0uuhxQrkCu+l$B zcRv%-!5Vlu>%&RT%?mrB#deR}PEw?P=L(g=ML>oT9&Z(F^-4p+ob(pv@&lVmg^{D6 zvCW}quF7&U474@owjK5k_dm4~ahbFBmbkg1DDm=Z92^{YPo`U>zG^D2euMFrFn9_Vtx5hG zbDsajn%9PO_|egUEYhY755j&dQsU)EGIP*b52{5Q_c1<|kR$2hf2c>Eh|g$kfox0$ zQ9DrYa?EZvjl&@1u!619x$F6h@s_d?!uQd+`skS%c}x;6lXmaXy}Xt1=^C$7cCIc; zJuUo8cFPr~re|0Tcd2TI82pa?53%9EoX>z$lki&rr2XaJuc1PbZ++iq@4i4%5>b() z;Kj}(he_)d{1Hv+!6WbyVwfE$s6|8A#`7B&c;{_4d)yIFjylCQ4VKRnDaW zY;bH#l)lK$Pn2v91I-SH?sSakDZZQ=S{oHQJndTV9kU63Gl#WN62XMs|~ zPb!(>vriX68_o|Kb_yM=SH1X?#%-cNW*9OHa1({xd+b5Lw40JN_kM(5Q!Es;V|!## zEcgEl(EG-ln}|3_tl`ydreIT~ap2|#_WsIgO0vcvYZH(2-mHcsR*8^RPLFFdG%XZc zIPn5$>+!TR;hj2PYJWlsjNleVxFPp<^=IzyfQIw;HCqc6V}i{0V-9U#X+61_N8u?Q{Zim z7pGxuq6yVT5d;z+o1>2s{ssde!|aiaCft`m%~t5rMhBZ#q>)F5fArEB8{?VxaBhhH zTQ%{MED9wnb=khLR%f=lONpSSa?h#EyQ;hU;i&tpD(NIFx}WPa#bBW37*S2uvg7ZG z{4(fp61?%0Cb^;2<&R7vlM^QzL8_A>%6zNdJppLuuRwFq_}st1GR_SQzz8Rx!cib}LsG zYxMc1l$MsHg8GS!%$>T3A-kA)KM}m&)q1lC;$n{s5HuyyysSpx&8!sg-q63tbWNbO zu*Dh5|01WL!+=ozZZaQ&m;}1HgbyP^91xpAe%m_$FmmS#&N_dntl?eTWGz-v(G(z3 zSGhnUoHSc!alSvKv%h-fY-W~IR2_7r;KBvM&)$h$S#EaOS2+tCTmWl2E*`HT&>i7- z54yU zc@Q{l@96yEcCyM|-HxAp7$>xNmYQaY5(WFhhp3jp_qF|mXVo%6+?Bj3dL}BxwV0QS zCi<=4kPpO|3nu{Syz^$+VS2-n3zflt9hs!N;pO=>2!{qAairq6;KzNuuKHr6dHhli z^J%{jUPcH;Jbt~j@zLnv3Su+zWN($r=Mwyp&VWaF^ZBiLI!(aCuN)<)Bf>t@NROT* zXZq`ZX`Eb#-!1ijVLR+`Gy2*Cg$2d6&t@_c*}4gyUJ+=BB~9`k@m-~T@+JEXD@yo( z{rI(@jc1!ceu6lT(jEKl&vf0qw0eoApm#u5xGH|h-LwwY=I`j3N2Ke)`o-jcnNEa1 z)_XRdBZ)IBv~XQ_FXr}bj|x28^L7LLyI}T6nzqj*L!%LiD4-APZFg!2F1`PW&)!e{ zU$9g?Q(`?k!k7r8BD7ou&bMn|0wx&^v2&z5^!Aq!Z|9a&H`AElOvD5~i_RwSeH}xB z$mqo05e2#L^#NkX@2SAYm+OH9`_DtAKuAi z=zfITXWyUU*O<_S6I96{z?HHGh@RGm%n_^2sBySAMD6()y=zeOP+kZiII|x^nwwLg zU1Dv0X0<4%ktcMV0p~si>}D!~9{r-=^kIj{ysvHpXI7h(nLhw+$_n!-@ zi}TX#ZFU54o)P>R$l*n|NIh$$H+J(@DMebf{>?da-V;1o4=?^H>OBlkhr*HoTbe4o z3}29|L`{AE62(G^^q@{}2xGRgC4+k4kve3)aR%+)e^NlTmd}bd3qHA!f}Gr}8Wo#- zIG0m|g3+W>N(IWhC?)ZHNfl&T=f8wX6X3n0rO0j4N;umxt1eD-zAv%gEvNrE$J2W^ z#DGhEDHVV#``tjRR6Qp;9z-QVxCOIGrY1xQSgN!Bob6d!_)TguNAxx!%{ZhItr%?d&c%I_=!t8;mZFWvlH$wD?dQAzA=dRT#G&=M>7Canvk@6v zIZYe!IXUo7?%x?nQ}7epu|P_NCcA~kL!y#}5J+}WM=Zv;L%#YbHZlD3|}!9=We z>p7ON?MfB|bBTb*i6%{@Jkt)Z#lW3;Bj<+gVJZ|X(#5Wg;9rWd13HUFNesO$d)Y6f|Bs83E_x#4UtfY)JG@MC zd$awQ#8i$#d6v{}@S9l~=POyby%XiuoG%muSvHV48nL1f%_N zvTO4S4(hC1PKhg?c1eDsS`3oPTQ+Tv{QBblWORl8hqh&4|`or7rxaP1ds(Np6byqeRt#fyoF zR@{RVCE@JY%>KIz8=HjX)fFcjO*9XJ-kY=nD4o1RL;AlQ-<1;C_C>}Cm0r@0$N2pN z?0)i(c!j^F9#;FU!+9il09}=sgOaEqJFhQU9C4Cx>!)95*CG}nqZA*zLm$%5{Lkn< zu4Tb@qTQTMMgN5DOi7}W$P~DPntv~Ti!33yNXr*k*Ru{GYj|1T}SAR41XDa-cfJ2IgbOA-R+6(SF>9DZ2n z$?c8dRu#EPx0Jj)>-(=T*@m!cmlay+4}ddoB4|OHm_&bzlhtKN>!O!9fcu#KNa-8! zoL9DDYs2EE6J8<8*2=%fE06nVRG$tW=N)RlIjo%0$Rx~aiOlSkaszyw)mmgp*%*~z zAZsdPaJ^CQPUCfjxya4LE-qpJ8B7AHM;$4JT3~wU_m0j-bLCfgpWF;@&=}N#(Vqe} zQAaj~-Tjdy%gZaoHiHp)*dcoE-7iI2NKRu!!6Bgx;bKnA43%-^1(LE-i1sKQtyF!W zUlqr*6jb4Y{UyyC=dLguu&7OY;y9JpD&7N5<^_Dh5NX4{GhEBV z7X8;v7BfOZq}@;H-;i!{(ehN57Z`nU3)k`q)P=Wp;91i-{I|U5y(v3NX|pQ!CEh9` zao5T65ghkD35{&!uapBpxZfHxb~X!!Ug39B~;?>Om<<-^;vMi#BY}K?Wdq^tiEup zub#ce*mlvgefoIped8910lt3M_uE{-B$`Ir1ESv3;jk+=;k>Z|V%4k|hBj;y{z%<| z3%HGuQ@6`I7?2f{7@*)#kAt^kL8Q!JZlN(2PJBr^qFR1+YRf+scrkuq92v&Mo^XR?SIZAO zxV8f~MuhyyeCJ`4T}o6K3*(HVoaIUc9PcmEpM(BRbQjbLa~?rE9{iW(z7epnm-3alg zab%+N=`1KUQwz?y67*9ayJbA1x%9NT-0fwp3&zu&CAb4IW!9 zjNeAEgci;E@F@OVoqW|M2h#OrTdW*68M!izP_f9y4`LL98h)Wv zF9(shiAo#WAR~#mH(5A4AoqvbJKZC;0+QUsD(#ho?YE*6X}NIw`W${cIY{%RBybEI zw1nNUDFVxp%YIrUvOA-sg(A7B>Pc}D3w}wd_Uix=A zgHGF4!hZo4Fbjlu#W#q&!o-(|_#T$gnEh@Octi`or3BcaB%8y!Qb1!M$sh=#9ltZH zyQ#G&0I(1q*K>>NgIS0o>T*(4 z2JaS#(0*sH?jTCUGe`{18vT2ggc+-1lTX==e^-*@;@R{rQ^&7r0y=usD*=4r(Eq))xBm+_@&12&B+CMpSDapG2LoNnayxu6 PGT^ Date: Wed, 30 Jul 2025 15:14:49 -0600 Subject: [PATCH 148/158] Nitpick print tweaks to SD card initialization --- examples/cv2_hardware_init/sd_card.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/cv2_hardware_init/sd_card.py b/examples/cv2_hardware_init/sd_card.py index b2976d0..73d3b99 100644 --- a/examples/cv2_hardware_init/sd_card.py +++ b/examples/cv2_hardware_init/sd_card.py @@ -39,11 +39,11 @@ vfs = uos.VfsFat(sd_card) uos.mount(vfs, "/sd") except ImportError: - print("sdcard module not found, skipping SD card initialization...") + print("`sdcard` module not found, skipping SD card initialization...") except OSError as e: eStr = str(e) if "no SD card" in eStr: - print("no SD card found, skipping SD card initialization...") + print("No SD card found, skipping SD card initialization...") elif "Errno 1" in eStr: print("SD card already mounted, skipping SD card initialization...") else: From 66f61fd3c21073d1f58161c563253d1fd52246c0 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:30:12 -0600 Subject: [PATCH 149/158] Tweak XRP grab ring example to wait on user button pushes --- examples/xrp_examples/ex02_grab_orange_ring.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/examples/xrp_examples/ex02_grab_orange_ring.py b/examples/xrp_examples/ex02_grab_orange_ring.py index 6bdbb04..0952263 100644 --- a/examples/xrp_examples/ex02_grab_orange_ring.py +++ b/examples/xrp_examples/ex02_grab_orange_ring.py @@ -146,6 +146,11 @@ def find_orange_ring_pipeline(frame): # Move the servo out of the way of the camera servo_one.set_angle(90) +# Wait for user button to be pressed to start the example +print("Press the user button to start the example") +while not board.is_button_pressed(): + pass + # Open the camera and wait a moment for at least one frame to be captured camera.open() time.sleep(0.1) @@ -184,6 +189,11 @@ def find_orange_ring_pipeline(frame): # Release the camera, we're done with it camera.release() +# Wait for user button to be pressed to continue +print("Press the user button to continue") +while not board.is_button_pressed(): + pass + # Move the servo to go through the center of the ring servo_one.set_angle(45) From 18c403922f7719d52e165396b3ae9e3a41246a4e Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:32:06 -0600 Subject: [PATCH 150/158] Rename examaples folder to opencv-examples --- .../cv2_hardware_init/__init__.py | 0 .../cv2_hardware_init/bus_i2c.py | 0 .../cv2_hardware_init/bus_spi.py | 0 .../cv2_hardware_init/camera.py | 0 .../cv2_hardware_init/display.py | 0 .../cv2_hardware_init/sd_card.py | 0 .../cv2_hardware_init/touch_screen.py | 0 {examples => opencv-examples}/ex01_hello_opencv.py | 0 {examples => opencv-examples}/ex02_camera.py | 0 {examples => opencv-examples}/ex03_touch_screen.py | 0 .../ex04_imread_imwrite.py | 0 {examples => opencv-examples}/ex05_performance.py | 0 .../ex06_detect_sfe_logo.py | 0 {examples => opencv-examples}/images/splash.png | Bin .../xrp_examples/ex01_touch_drive.py | 0 .../xrp_examples/ex02_grab_orange_ring.py | 0 16 files changed, 0 insertions(+), 0 deletions(-) rename {examples => opencv-examples}/cv2_hardware_init/__init__.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/bus_i2c.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/bus_spi.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/camera.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/display.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/sd_card.py (100%) rename {examples => opencv-examples}/cv2_hardware_init/touch_screen.py (100%) rename {examples => opencv-examples}/ex01_hello_opencv.py (100%) rename {examples => opencv-examples}/ex02_camera.py (100%) rename {examples => opencv-examples}/ex03_touch_screen.py (100%) rename {examples => opencv-examples}/ex04_imread_imwrite.py (100%) rename {examples => opencv-examples}/ex05_performance.py (100%) rename {examples => opencv-examples}/ex06_detect_sfe_logo.py (100%) rename {examples => opencv-examples}/images/splash.png (100%) rename {examples => opencv-examples}/xrp_examples/ex01_touch_drive.py (100%) rename {examples => opencv-examples}/xrp_examples/ex02_grab_orange_ring.py (100%) diff --git a/examples/cv2_hardware_init/__init__.py b/opencv-examples/cv2_hardware_init/__init__.py similarity index 100% rename from examples/cv2_hardware_init/__init__.py rename to opencv-examples/cv2_hardware_init/__init__.py diff --git a/examples/cv2_hardware_init/bus_i2c.py b/opencv-examples/cv2_hardware_init/bus_i2c.py similarity index 100% rename from examples/cv2_hardware_init/bus_i2c.py rename to opencv-examples/cv2_hardware_init/bus_i2c.py diff --git a/examples/cv2_hardware_init/bus_spi.py b/opencv-examples/cv2_hardware_init/bus_spi.py similarity index 100% rename from examples/cv2_hardware_init/bus_spi.py rename to opencv-examples/cv2_hardware_init/bus_spi.py diff --git a/examples/cv2_hardware_init/camera.py b/opencv-examples/cv2_hardware_init/camera.py similarity index 100% rename from examples/cv2_hardware_init/camera.py rename to opencv-examples/cv2_hardware_init/camera.py diff --git a/examples/cv2_hardware_init/display.py b/opencv-examples/cv2_hardware_init/display.py similarity index 100% rename from examples/cv2_hardware_init/display.py rename to opencv-examples/cv2_hardware_init/display.py diff --git a/examples/cv2_hardware_init/sd_card.py b/opencv-examples/cv2_hardware_init/sd_card.py similarity index 100% rename from examples/cv2_hardware_init/sd_card.py rename to opencv-examples/cv2_hardware_init/sd_card.py diff --git a/examples/cv2_hardware_init/touch_screen.py b/opencv-examples/cv2_hardware_init/touch_screen.py similarity index 100% rename from examples/cv2_hardware_init/touch_screen.py rename to opencv-examples/cv2_hardware_init/touch_screen.py diff --git a/examples/ex01_hello_opencv.py b/opencv-examples/ex01_hello_opencv.py similarity index 100% rename from examples/ex01_hello_opencv.py rename to opencv-examples/ex01_hello_opencv.py diff --git a/examples/ex02_camera.py b/opencv-examples/ex02_camera.py similarity index 100% rename from examples/ex02_camera.py rename to opencv-examples/ex02_camera.py diff --git a/examples/ex03_touch_screen.py b/opencv-examples/ex03_touch_screen.py similarity index 100% rename from examples/ex03_touch_screen.py rename to opencv-examples/ex03_touch_screen.py diff --git a/examples/ex04_imread_imwrite.py b/opencv-examples/ex04_imread_imwrite.py similarity index 100% rename from examples/ex04_imread_imwrite.py rename to opencv-examples/ex04_imread_imwrite.py diff --git a/examples/ex05_performance.py b/opencv-examples/ex05_performance.py similarity index 100% rename from examples/ex05_performance.py rename to opencv-examples/ex05_performance.py diff --git a/examples/ex06_detect_sfe_logo.py b/opencv-examples/ex06_detect_sfe_logo.py similarity index 100% rename from examples/ex06_detect_sfe_logo.py rename to opencv-examples/ex06_detect_sfe_logo.py diff --git a/examples/images/splash.png b/opencv-examples/images/splash.png similarity index 100% rename from examples/images/splash.png rename to opencv-examples/images/splash.png diff --git a/examples/xrp_examples/ex01_touch_drive.py b/opencv-examples/xrp_examples/ex01_touch_drive.py similarity index 100% rename from examples/xrp_examples/ex01_touch_drive.py rename to opencv-examples/xrp_examples/ex01_touch_drive.py diff --git a/examples/xrp_examples/ex02_grab_orange_ring.py b/opencv-examples/xrp_examples/ex02_grab_orange_ring.py similarity index 100% rename from examples/xrp_examples/ex02_grab_orange_ring.py rename to opencv-examples/xrp_examples/ex02_grab_orange_ring.py From 4fc3bbec02700cce9ae11f5252b70587ae8be699 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:34:47 -0600 Subject: [PATCH 151/158] Update build.sh 18c4039 changed the examples folder name --- build.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/build.sh b/build.sh index 966535a..ed1f31e 100644 --- a/build.sh +++ b/build.sh @@ -144,8 +144,7 @@ function build_micropython_opencv { mkdir "$FROZEN_MODULES_DIR" # Create our frozen filesystem archive for the examples directory - # Note the "." to make the read-only version of the examples directory hidden in IDEs like Thonny - create_frozen_fs "examples" "$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" + create_frozen_fs "opencv-examples" "$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" # Add necessary content to the manifest file to freeze the modules in the provided directory add_to_manifest "$FROZEN_MODULES_DIR" "rp2" "SPARKFUN_XRP_CONTROLLER" "mpconfigvariant_LARGE_BINARY.cmake" From c37d7b32e7c1739fced9e31ab557d5f635f14af8 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 15:48:37 -0600 Subject: [PATCH 152/158] Change release workflow to rename firmware file --- .github/workflows/release.yml | 2 +- build.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index cf1e5b0..df235a7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ jobs: - name: Upload Release Assets uses: shogo82148/actions-upload-release-asset@v1 with: - asset_path: "micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2" + asset_path: "micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/MICROPYTHON_OPENCV_SPARKFUN_XRP_CONTROLLER.uf2" github_token: ${{ secrets.GITHUB_TOKEN }} upload_url: ${{ github.event.release.upload_url }} diff --git a/build.sh b/build.sh index ed1f31e..9dd0071 100644 --- a/build.sh +++ b/build.sh @@ -173,4 +173,7 @@ function build_micropython_opencv { # Build firmware make BOARD=SPARKFUN_XRP_CONTROLLER ${MAKEOPTS} + + # Rename firmware file to identify it as the OpenCV build and which board it's for + mv micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2 micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/MICROPYTHON_OPENCV_SPARKFUN_XRP_CONTROLLER.uf2 } From b6b595841726bc0e2d93cf654bdf9c389c35271c Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 16:04:19 -0600 Subject: [PATCH 153/158] build.sh: only copy frozen directory if different --- build.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 9dd0071..ca7ff35 100644 --- a/build.sh +++ b/build.sh @@ -25,7 +25,9 @@ function create_frozen_fs { echo "The frozen directory will be named: $DIR_NAME_ON_BOARD" echo "The output file will be: $OUTPUT_FILE" - cp -r $DIR_TO_FREEZE $DIR_NAME_ON_BOARD + if [ $DIR_TO_FREEZE != $DIR_NAME_ON_BOARD ]; then + cp -r $DIR_TO_FREEZE $DIR_NAME_ON_BOARD + fi # Use on-import=extract so our frozen filesystem is unpacked to '/' in flash on import # Use --compress to compress the frozen filesystem archive From 27bbff910c28f6a44b81ff92e2af4cd7fc5707f0 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Wed, 30 Jul 2025 16:49:04 -0600 Subject: [PATCH 154/158] Add animation example --- opencv-examples/ex07_animation.py | 61 +++++++++++++++++++++ opencv-examples/images/animation_sheet.png | Bin 0 -> 8487 bytes 2 files changed, 61 insertions(+) create mode 100644 opencv-examples/ex07_animation.py create mode 100644 opencv-examples/images/animation_sheet.png diff --git a/opencv-examples/ex07_animation.py b/opencv-examples/ex07_animation.py new file mode 100644 index 0000000..759e4a1 --- /dev/null +++ b/opencv-examples/ex07_animation.py @@ -0,0 +1,61 @@ +#------------------------------------------------------------------------------- +# SPDX-License-Identifier: MIT +# +# Copyright (c) 2025 SparkFun Electronics +#------------------------------------------------------------------------------- +# ex07_animation.py +# +# This example demonstrates how to play an animation using a series of frames +# stored in a single image file. It assumes full 320x240 frames are stacked +# vertically in the image, and the animation plays by displaying each frame in +# sequence. This can be the basis for things like sprite sheets, where smaller +# icons or characters are stored in a single image and displayed as needed. +#------------------------------------------------------------------------------- + +# Import OpenCV and hardware initialization module +import cv2 as cv +from cv2_hardware_init import * + +# Load an animation sheet image that contains multiple frames of an animation +animation_sheet = cv.imread("opencv-examples/images/animation_sheet.png") + +# This example assumes the image has full 320x240 frames stacked vertically +frame_height = 240 + +# Calculate the number of frames in the sheet by dividing the sheet height by +# the frame height +frame_num = animation_sheet.shape[0] // frame_height + +# Initialize variables to keep track of the current row in the sheet and the +# direction of animation playback (up or down) +row_index = 0 +direction = 1 + +# Prompt the user to press a key to continue +print("Press any key to continue") + +# Loop to continuously play the animation +while True: + # Calculate the starting and ending pixel row for the current frame + row_start_px = row_index * frame_height + row_end_px = row_start_px + frame_height + cv.imshow(display, animation_sheet[row_start_px:row_end_px, :]) + + # Update the row index based on the direction of playback + row_index += direction + + # If we reach the end of the sheet, reverse the direction + if row_index == frame_num-1: + direction = -1 + elif row_index == 0: + direction = 1 + + # Check for key presses. If you want the animation to play at a specific + # frame rate, you can change the wait time to slow it down. This example + # plays the animation as fast as possible, which is often needed to look + # smooth in MicroPython + key = cv.waitKey(1) + + # If any key is pressed, exit the loop + if key != -1: + break diff --git a/opencv-examples/images/animation_sheet.png b/opencv-examples/images/animation_sheet.png new file mode 100644 index 0000000000000000000000000000000000000000..ee77cca87581e1a6be2cc53f1797209bb86bcb33 GIT binary patch literal 8487 zcmeI2c~p{V`^Nz(q0A-AHOVDqEX}RO0yQl)v!v3Qa!k$CurzT?$qXq?n_S2;mr=1Y z$7b3Nbd zdwp(x+U;PwWRc1u2n4dkZu_>q5D0V&1R_aBz`%&%BjsT51;<y1}^0Ri!p?5 zO(SdU{JmlKH<235NQPPEq$s-_efNTQ*^CCa81v+%=HNaJZayL8{_g4%YsjM(>@jqc9e#p>(f4ab%Yhj}Gxayz7b-m!Jp z9Z=`qleV|7b#`I;=kDy#T75Moou%c9dmv$q8Mr=8Mq=BXHC(d?*rAV=V5q}Cny64g z9JR{&&U9ZmwjmmKRe`q*8vI}dUsj+bUpJy3BXELI-*$&D=|zawg->JVbY8PvD%dW+ zVZTerY4Jj_O$ufdvZPS9?h4ba)EbALXFIr1^KJv9*D>gFw*>S-)L>oy3Gs%Z8u*Vq z;n-T%aLBu&{enbsNXM)y>#klcIJT}|?fESBGFcMiE2Y9zgHij#Th%yi6UqskdvGlE z#KU7S)L?fYewvLSQp*&MOf1yz@^o!)T)gnZy3U#NdkL+jdO87*P&q`*AS!l17OaI0 ziSYL;Pdq$rv;Qi->e7&mYS+la72Yma7aRzPd`r5skypJ48rDq^zH%U0Ob6O54S1v_ zN6H9~g0Fc0K0Y@?o135KrPpH$ojR4{)dS`-FIc4Hccp3_M09;Tj#l`RM3^T(z72y{ z$%R%=FHBXuq~FL)U4%;|&P5)LyJmFyc_XQ9WbK-D;_X#|hFRnqx_|7s{9);0q58PC zf6??Jgay^0=bGnQ7!o7s6CJvdmIV_G=Yl|SpH(+kf zoLCK&31us-BVmA)Or| zxq&T9mNqfup!bC}W8N(J-3s%+JNvV)$2Fu3aO}_K+25VBxMTe!jrG>sf)|#$Tt!@a z6UbHBd(#YN_>UrfS`cwTtPRQk=!QTb5e=ng?Bk5P5UUjLQRV;a5dMnWeewB{SB9sG z%F*qlx$((p#Zo_-1$;#)y3^(U^lEtKM1p^j?j{G8CC&~w3_Q|wfXAyw_o+j}uzN?c z5RsqK`JCv(Dc!qh)cGqBMTorTI zg=w{FFqCJFknup4x&st`mm-Z78o)saq{hRXT;6Yop^nrn^o?693p6z?w&U7sIRRv4 zjneyaTiP4me1;3liWO-Ob_@F1zmCG2YP-|cQ^Y%~)!}*~o{U$j$V!}9zWAja2uZvZ zT<(L3h$h#k-OcID&RZ%th)=NWE8ohxbU=pQ=R!Bn8O5ZL>rG@Tcd=?SG5mOr3bZAd ztyush<0pUs-ILALY2Kfq{-oN~FPZ>e|uSxn~O2 zp0Hn2XRdQxfAA6O?0OiVw8NwNebpMVSFjwk*W)_=p;Q06mk5Cfv>PY)w@-}05Y8zN z@bg&auWvYIxTJ7sV8O9(ZOpee=Bw9--zs66pYtrkiAcXMnsannmVnH6o4iB{bG`~H zf-D3qFDRnhov2}vH>r$d-q+sX<2B-e!wEmK)5 zD>xXNo7mv?fPcQ@|jCKcwrf~IE$?Ip3`C|>@B!j>T4s>4!H8IuC#B`qLM zVz=jT(|Q=yt0r%ET*!HH0ss-D> z0V)&BuP`A22GrslcRM`lH<^!$P#Gp!-`wcqtRQddP1$o-EtpapgaB*tg_v)ox!9VJ zwDMcadKxBT6T}> z(g3!jaf*K?Sya8J)E~@sf4I_~J=_R9*|8bA{Nn$}(*9)HeNLo9rQRQ=_V)6Yj?CeX z?OCT&9NvNZ*yDXx4Dt4Bart@62JM+u#_orpE4`}k!_2k+(Si(txR-wRI^+x9&Jhd$ z2sQr6?fFN|(N`;X#h)F&W!H#10arLP`+68@;ML(5i7MUV=;`NspxmiKIvK>oqYFxCJh{G zjxSoGh#(f%wzUPY*+nk8j+5th-0;>BY;SiQB{w;2(zQ4UqlEmhy&Ye9$JHc;9GlLL zj}gbQ<(GsGsz7nZy^c4mH(Rp~7{vk(QWrgXGEM^!)V$5*Zd*;Q#JZq7IC6vbCJByh zi(bN3a8(y4wM?0HixwcX(ME3w6u&1==c|E4vdolyQ)O>M!M;Awk5wA^qQoG^wzO(Y z2~eK^jGAbT!d<{Z@Ji7Y2k2 zj|$2l$8HMiR%Qb6e4TIoBg67HnYT1g2xVS-rg~d14w+e6|J}>P%6gAY(d0b(dPufv zS3!<4Q|^a19sK@}wRK_v@A$z7s@R=PcOTwG2cS%CO0~M;5=SO7GYOtKwmq8j%S&}o z7tB0)hmod3x`5F*uugHlb1nyS4f-kCLz4r^u<8S=v94icpa%T=dksW(%&P7kNPzW= zZCKTZJ;ktraP{iy|25?J&13v!IPSLth?-9TmopOilG<7>rZ1WA50*^vDt7|t!UOyk z7?Kjl^7do^P|wu10f|@uqYm5i%+tD2_5G{BImNvulF|z!yvIfuibruZW8CO#d1wMo zIWYQayPs6zU&jH+nMw3o0RXK3`rt!`5(Yob5r*0strAT;2iP4WO?YWE_oj{%eORTb zt0TV^Uxo6K$gn%mN@inc?keV=y{)spr4=Ef_%P!Ox1@vIqp5pWRqb1?w2*tvPm z-L}r&#WHb<$P-EFAYeQ*72G4sPy5$oerAZGX~mIdTsi$0reir_JHqLiZ1K*s6bQA za=o6UAcfxxkG?H(S|kj*DG2F-(yX6UDE`QJho0e&HA;yKk}v$7?*?(-@IgW4me>(w zFa2|r|IfX03n63!=g$ax%fnJVbBV9RpMUcW^EAc(4`ttqp{&eDap&?ViN!`YflZxMF5df z?gx9BJS~h>g-6gVehKnN*BXozp}CGdEuIdlmxrcB2q8A?R$BTt{+T%#%)FsMX)v{w zF}80N*Ce8I%NnwxFk7&cMw;2sR^rxIFvnZKQE7f>$HK=+uN@Cv2wnLm+$PtgqMEo> z7yvVGSuAA~EcAn!KXNzDlIrzD3RfAJR7v$Z{)<<@jTv*;A3HxdiRX Date: Thu, 31 Jul 2025 10:06:11 -0600 Subject: [PATCH 155/158] Update README.md Add splash image Update Quick Start section now that examples are frozen into the firmware Condense example code snippets Re-organize supported hardware list as tables with emojis --- README.md | 156 ++++++++++++++++++++++-------------------------------- 1 file changed, 63 insertions(+), 93 deletions(-) diff --git a/README.md b/README.md index ba3fadd..6e14446 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +

+ +

+ # MicroPython-OpenCV Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroPython port of OpenCV, and as such, there may be some rough edges. Hardware support is limited to SparkFun products. @@ -6,127 +10,93 @@ Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroP 1. Flash MicroPython-OpenCV firmware * Back up any files you want to keep, they *will* be overwritten! - * Download the latest firmware from the [Releases tab](https://github.com/sparkfun/micropython-opencv/releases). + * Download the latest firmware for your board from the [Releases tab](https://github.com/sparkfun/micropython-opencv/releases). * If you don't know how to flash firmware to your board, find your board [here](https://micropython.org/download/) and follow the instructions using the OpenCV firmware. -2. Copy examples (optional) - * It is suggested to copy the entire examples folder to your MicroPython board to get started. This can be done simply with [mpremote](https://docs.micropython.org/en/latest/reference/mpremote.html): - * `cd micropython-opencv/examples` - * `mpremote cp -r . :` -3. Configure hardware drivers + * After first boot, the [opencv-examples](opencv-examples) directory will be automatically extraced to the MicroPython filesystem for easy access to all the examples. +2. Configure hardware driver initialization * The MicroPython port of OpenCV depends on hardware drivers to interface with cameras and displays. Drivers are built into the firmware, so there is no need to install them manually. - * An example module called [cv2_hardware_init](examples/cv2_hardware_init/) is imported by all examples to initialize the drivers. You will likely need to edit the files for your specific hardware and board configuration. -4. Write OpenCV code! + * An example module called [cv2_hardware_init](opencv-examples/cv2_hardware_init/) is imported by all examples to initialize the drivers. You will likely need to edit the files for your specific hardware and board configuration. +3. Write and run OpenCV code * Any IDE should work, so use your favorite! - * The code block below contains snippets from various examples to highlight major features. + * Start with the examples! Go through them in order, which will verify your hardware is working and demonstrate some basics of OpenCV. Read the comments to understand the differences with the MicroPython port. + * The code block below contains snippets to highlight major features. ```python -# Import OpenCV, just as you would in any other Python environment! +# Import OpenCV, just like any other Python environment! import cv2 as cv -# Standard OpenCV leverages the host operating system to access hardware, but we -# don't have that luxury in MicroPython. Instead, drivers are provided for -# various hardware components, which need to be initialized before using them. -# The exmples import a module called `cv2_hardware_init`, which initializes the -# drivers. You may need to edit the contents of the `cv2_hardware_init` module -# based on your specific board and hardware configuration +# Initialize hardware drivers by importing the example module (you'll likely +# need to modify it for your specific hardware configuration). from cv2_hardware_init import * -# Import NumPy, almost like any other Python environment! The only difference is -# the addition of `from ulab` since MicroPython does not have a full NumPy -# implementation; ulab NumPy is a lightweight version of standard NumPy +# Import ulab NumPy and initialize an image, almost like any other Python +# environment! from ulab import numpy as np - -# Initialize an image (NumPy array) to be displayed, just like in any other -# Python environment! Here we create a 240x320 pixel image with 3 color channels -# (BGR order, like standard OpenCV) and a data type of `uint8` (you should -# always specify the data type, because NumPy defaults to `float`) img = np.zeros((240, 320, 3), dtype=np.uint8) -# OpenCV's drawing functions can be used to modify the image. Here is the -# obligatory "Hello OpenCV!" text in red -img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) +# Call OpenCV functions just like any other Python environment! +img = cv.putText(img, "Hello OpenCV!", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) +img = cv.Canny(img, 100, 200) -# Once we have an image ready to show, just call `cv.imshow()`, almost like any -# other Python environment! However, there is one important difference: -# -# Standard OpenCV takes a window name string in `cv.imshow()`, which is used -# to display the image in a window. We don't have windows in MicroPython, so -# there is an API change where the first argument must be a display driver. Any -# display driver can be used, as long as it implements an `imshow()` method that -# takes a NumPy array as input -cv.imshow(display, img) # Can alternatively call `display.imshow(img)` - -# Standard OpenCV requires a call to `cv.waitKey()` to process events and -# actually display the image. However the display driver shows the image -# immediately, so it's not necessary to call `cv.waitKey()` in MicroPython. -# But it is available, and behaves almost like any other Python environment! The -# only difference is that it requires a key to be pressed in the REPL instead of -# a window. It will wait for up to the specified number of milliseconds (0 for -# indefinite), and return the ASCII code of the key pressed (-1 if no key press) -# -# Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses -# until you hit Enter on your keyboard -key = cv.waitKey(0) # Not necessary to display image, can remove if desired - -# Open a camera, similar to any other Python environment! In standard OpenCV, -# you would use `cv.VideoCapture(0)` or similar, and OpenCV would leverage the -# host operating system to open a camera object and return it as a -# `cv.VideoCapture` object. However, we don't have that luxury in MicroPython, -# so a camera driver is required instead. Any camera driver can be used, as long -# as it implements the same methods as the standard OpenCV `cv.VideoCapture` -# class, such as `open()`, `read()`, and `release()` -camera.open() +# Call `cv.imshow()`, almost like any other Python environment! Instead of a +# window name string, you pass a display driver that implements an `imshow()` +# method that takes a NumPy array as input +cv.imshow(display, img) -# Read a frame from the camera, just like any other Python environment! It -# returns a tuple, where the first element is a boolean indicating success, -# and the second element is the frame (NumPy array) read from the camera -success, frame = camera.read() +# Call `cv.waitKey()`, just like any other Python environment! This waits for a +# key press on the REPL. Standard OpenCV requires this to update windows, but +# MicroPython OpenCV does not. +key = cv.waitKey(0) -# Release the camera, just like in any other Python environment! +# Use a camera, similar to any other Python environment! `cv.VideoCapture(0)` +# is not used in MicroPython OpenCV, the driver is initialized separately. +camera.open() +success, frame = camera.read() camera.release() -# Call `cv.imread()` to read an image from the MicroPython filesystem, just -# like in any other Python environment! Make sure to copy the image to the -# MicroPython filesystem first, and set the path to the image file as needed -# -# If your board can mount an SD card, you can instead load the image to the SD -# card and change the path to point to the SD card -# -# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV -img = cv.imread("test_images/sparkfun_logo.png") - -# Let's modify the image! Here we use `cv2.Canny()` to perform edge detection -# on the image, which is a common operation in computer vision -edges = cv2.Canny(img, 100, 200) - -# Now we'll save the modified image to the MicroPython filesystem using -# `cv.imwrite()`, just like in any other Python environment! -# -# Again, SD cards are supported, just change the path to point to the SD card +# Call `cv.imread()` and `cv.imwrite()` to read and write images to and from +# the MicroPython filesystem, just like in any other Python environment! Can +# also point to an SD card. # # Note - only BMP and PNG formats are currently supported in MicroPython OpenCV -success = cv.imwrite("test_images/sparkfun_logo_edges.png", edges) +img = cv.imread("path/to/image.png") +success = cv.imwrite("path/to/image.png", img) ``` # Hardware Support and Requirements Hardware support in this repository is mostly limited to SparkFun products. The current list of supported proudcts is very small, but may be expanded in the future. Users are welcome to fork this repository to add support for other products, following our licence requirements. Assistance in adding support for other hardware will not be provided by SparkFun. We may consider pull requests that add support for additional hardware, see [#Contributing](#Contributing). -The OpenCV firmware adds ~3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). So a board with at least 8MB of flash is recommended, to also have space available for file storage. +The OpenCV firmware adds over 3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). You'll also want some storage space, so a board with at least 8MB of flash is recommended. PSRAM is a requirement to do anything useful with OpenCV. A single 320x240 RGB888 frame buffer requires 225KiB of RAM; most processors only have a few hundred KiB of SRAM. Several frame buffers can be needed for even simple vision pipelines, so you really need at least a few MiB of RAM available. The more the merrier! -Below is the list of supported hardware devices: - -* MicroPython Devices - * [XRP Controller](https://www.sparkfun.com/sparkfun-experiential-robotics-platform-xrp-controller.html) -* Camera Drivers - * HM01B0 - * [OV5640](https://www.sparkfun.com/ov5640-camera-board-5-megapixel-2592x1944-fisheye-lens.html) (not fully working yet) -* Display Drivers - * ST7789 -* Touch Screen Drivers - * CST816 +Below is the list of currently supported hardware: + +## MicroPython Devices + +| Status | Device | Notes | +| --- | --- | --- | +| ✔️ | [XRP Controller](https://www.sparkfun.com/sparkfun-experiential-robotics-platform-xrp-controller.html) | | + +## Camera Drivers + +| Status | Device | Notes | +| --- | --- | --- | +| ✔️ | HM01B0 | | +| ⚠️ | [OV5640](https://www.sparkfun.com/ov5640-camera-board-5-megapixel-2592x1944-fisheye-lens.html) | See [#22](https://github.com/sparkfun/micropython-opencv/issues/22) | + +## Display Drivers + +| Status | Device | Notes | +| --- | --- | --- | +| ✔️ | ST7789 | | + +## Touch Screen Drivers + +| Status | Device | Notes | +| --- | --- | --- | +| ✔️ | CST816 | | # Performance From 81f3be7032408f01359f95b1719bfbb1f781b2ee Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 31 Jul 2025 10:13:28 -0600 Subject: [PATCH 156/158] Fix XRP touch screen drive example Uncomment drive functions Also rename to "touch_screen_drive" --- ...{ex01_touch_drive.py => ex01_touch_screen_drive.py} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename opencv-examples/xrp_examples/{ex01_touch_drive.py => ex01_touch_screen_drive.py} (96%) diff --git a/opencv-examples/xrp_examples/ex01_touch_drive.py b/opencv-examples/xrp_examples/ex01_touch_screen_drive.py similarity index 96% rename from opencv-examples/xrp_examples/ex01_touch_drive.py rename to opencv-examples/xrp_examples/ex01_touch_screen_drive.py index fb409c3..2fb921e 100644 --- a/opencv-examples/xrp_examples/ex01_touch_drive.py +++ b/opencv-examples/xrp_examples/ex01_touch_screen_drive.py @@ -3,7 +3,7 @@ # # Copyright (c) 2025 SparkFun Electronics #------------------------------------------------------------------------------- -# ex01_touch_drive.py +# ex01_touch_screen_drive.py # # This example creates a simple touch screen interface to drive the XRP robot. # It creates arrow buttons to drive around, and a stop button to exit the @@ -138,25 +138,25 @@ def create_ui_image(): elif (ui_cx - button_cx <= x <= ui_cx + button_cx and ui_cy - button_spacing - button_cy <= y <= ui_cy - button_spacing + button_cy): print("Forward") - # drivetrain.straight(20, 0.5) + drivetrain.straight(20, 0.5) # Check if the backward arrow was pressed elif (ui_cx - button_cx <= x <= ui_cx + button_cx and ui_cy + button_spacing - button_cy <= y <= ui_cy + button_spacing + button_cy): print("Backward") - # drivetrain.straight(-20, 0.5) + drivetrain.straight(-20, 0.5) # Check if the right arrow was pressed elif (ui_cy - button_cy <= y <= ui_cy + button_cy and ui_cx + button_spacing - button_cx <= x <= ui_cx + button_spacing + button_cx): print("Right") - # drivetrain.turn(-90, 0.5) + drivetrain.turn(-90, 0.5) # Check if the left arrow was pressed elif (ui_cy - button_cy <= y <= ui_cy + button_cy and ui_cx - button_spacing - button_cx <= x <= ui_cx - button_spacing + button_cx): print("Left") - # drivetrain.turn(90, 0.5) + drivetrain.turn(90, 0.5) # Check for key presses key = cv.waitKey(1) From afdf23b0463505b87a26d61b726ba81ed5443f08 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Thu, 31 Jul 2025 13:52:35 -0600 Subject: [PATCH 157/158] Update performance section of README.md --- README.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6e14446..a7fb3a9 100644 --- a/README.md +++ b/README.md @@ -100,13 +100,26 @@ Below is the list of currently supported hardware: # Performance -Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed. In contrast, microcontrollers processors typically run at a few hundred MHz 1 or 2 cores optimized for low power consumtion. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc. But for reference, the RP2350 can run the SparkFun Logo Detection Example at about 2.5 FPS at 320x240 resolution. +Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed. In contrast, microcontrollers processors typically run at a few hundred MHz with 1 or 2 cores optimized for low power consumtion. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc. But for reference, the RP2350 can run the [SparkFun Logo Detection Example](opencv-examples/ex06_detect_sfe_logo.py) at 2 to 2.5 FPS at 320x240 resolution. Something to consider is that MicroPython uses a garbage collector for memory management. As images are created and destroyed in a vision pipeline, RAM will be consumed until the garbage collector runs. The collection process takes longer with more RAM, so this can result in noticable delays during collection (typically a few hundred milliseconds). To mitigate this, it's best to pre-allocate arrays and utilize the optional `dst` argument of OpenCV functions to avoid allocating new arrays when possible. Pre-allocation also helps improve performance by avoiding repeated delays from allocating memory. +Below are some typical execution times for various OpenCV functions. All were tested on the XRP (RP2350) with the [splash image](opencv-examples/images/splash.png), which is 320x240. + +| Function | Execution Time | +| --- | --- | +| `dst = cv.blur(src, (5, 5))` | 115ms | +| `dst = cv.blur(src, (5, 5), dst)` | 87ms | +| `retval, dst = cv.threshold(src, 127, 255, cv.THRESH_BINARY)` | 76ms | +| `retval, dst = cv.threshold(src, 127, 255, cv.THRESH_BINARY, dst)` | 46ms | +| `dst = cv.cvtColor(src, cv.COLOR_BGR2HSV)` | 114ms | +| `dst = cv.cvtColor(src, cv.COLOR_BGR2HSV, dst)` | 84ms | +| `dst = cv.Canny(src, 100, 200)` | 504ms | +| `dst = cv.Canny(src, 100, 200, dst)` | 482ms | + Another way to improve performance is to select the best hardware drivers for your setup. For example, the default SPI driver for the ST7789 is limited to the max SPI baudrate for the processor's SPI peripheral. That's 24MHz in the case of the RP2350, but another driver is provided that uses the PIO peripheral that runs at 75MHz, so displaying images can be ~3x faster (ignoring any required colorspace conversions). -For users wanting maximum performance, it may be desireable to bypass the high-level functions of the display/camera drivers, and instead work directly with the buffer member variables and read/write functions. This can avoid computationally expensive colorspace conversions when reading and writing images if they're not needed. +For users wanting maximum performance, it may be desireable to bypass the high-level functions of the display/camera drivers, and instead work directly with the buffer member variables and read/write functions. This can avoid computationally expensive colorspace conversions when reading and writing images if they're not needed, but this is for advanced users only. # Included OpenCV Functions From e502357371c50586f0668ba94a3fc234d1c66871 Mon Sep 17 00:00:00 2001 From: Dryw Wade Date: Fri, 1 Aug 2025 17:56:39 -0600 Subject: [PATCH 158/158] Remove non-OpenCV features Refactor for #42 Also move submodules out of src directory --- .github/workflows/build.yml | 20 - .github/workflows/release.yml | 23 - .gitmodules | 11 +- Makefile | 30 +- README.md | 197 ++- build.sh | 181 --- cv2_drivers/__init__.py | 13 - cv2_drivers/cameras/__init__.py | 17 - cv2_drivers/cameras/cv2_camera.py | 45 - cv2_drivers/cameras/dvp_camera.py | 60 - cv2_drivers/cameras/dvp_rp2_pio.py | 182 --- cv2_drivers/cameras/hm01b0.py | 352 ----- cv2_drivers/cameras/hm01b0_pio.py | 86 -- cv2_drivers/cameras/ov5640.py | 1190 ----------------- cv2_drivers/cameras/ov5640_pio.py | 82 -- cv2_drivers/displays/__init__.py | 19 - cv2_drivers/displays/cv2_display.py | 193 --- cv2_drivers/displays/st7789.py | 272 ---- cv2_drivers/displays/st7789_pio.py | 205 --- cv2_drivers/displays/st7789_spi.py | 92 -- cv2_drivers/touch_screens/__init__.py | 12 - cv2_drivers/touch_screens/cst816.py | 165 --- cv2_drivers/touch_screens/cv2_touch_screen.py | 21 - manifest.py | 9 - micropython | 1 - ...encv_upy.cmake => micropython_opencv.cmake | 18 +- src/opencv/opencv => opencv | 0 opencv-examples/cv2_hardware_init/__init__.py | 38 - opencv-examples/cv2_hardware_init/bus_i2c.py | 11 - opencv-examples/cv2_hardware_init/bus_spi.py | 13 - opencv-examples/cv2_hardware_init/camera.py | 40 - opencv-examples/cv2_hardware_init/display.py | 37 - opencv-examples/cv2_hardware_init/sd_card.py | 50 - .../cv2_hardware_init/touch_screen.py | 16 - opencv-examples/ex01_hello_opencv.py | 83 -- opencv-examples/ex02_camera.py | 52 - opencv-examples/ex03_touch_screen.py | 70 - opencv-examples/ex04_imread_imwrite.py | 61 - opencv-examples/ex05_performance.py | 137 -- opencv-examples/ex06_detect_sfe_logo.py | 193 --- opencv-examples/ex07_animation.py | 61 - opencv-examples/images/animation_sheet.png | Bin 8487 -> 0 bytes opencv-examples/images/splash.png | Bin 18396 -> 0 bytes .../xrp_examples/ex01_touch_screen_drive.py | 169 --- .../xrp_examples/ex02_grab_orange_ring.py | 212 --- .../platforms => platforms}/common.cmake | 0 platforms/include/rp2350_unsafe_cv_xadd.h | 20 + .../include/zephyr_stdint.h | 0 .../rp2350.toolchain.cmake | 0 src/convert.h | 2 +- src/core.cpp | 2 +- src/highgui.cpp | 2 +- src/imgcodecs.cpp | 2 +- src/imgproc.cpp | 2 +- src/numpy.h | 2 +- src/opencv/Makefile | 16 - .../platforms/include/rp2350_unsafe_cv_xadd.h | 13 - src/ulab => ulab | 0 58 files changed, 134 insertions(+), 4666 deletions(-) delete mode 100644 .github/workflows/build.yml delete mode 100644 .github/workflows/release.yml delete mode 100644 build.sh delete mode 100644 cv2_drivers/__init__.py delete mode 100644 cv2_drivers/cameras/__init__.py delete mode 100644 cv2_drivers/cameras/cv2_camera.py delete mode 100644 cv2_drivers/cameras/dvp_camera.py delete mode 100644 cv2_drivers/cameras/dvp_rp2_pio.py delete mode 100644 cv2_drivers/cameras/hm01b0.py delete mode 100644 cv2_drivers/cameras/hm01b0_pio.py delete mode 100644 cv2_drivers/cameras/ov5640.py delete mode 100644 cv2_drivers/cameras/ov5640_pio.py delete mode 100644 cv2_drivers/displays/__init__.py delete mode 100644 cv2_drivers/displays/cv2_display.py delete mode 100644 cv2_drivers/displays/st7789.py delete mode 100644 cv2_drivers/displays/st7789_pio.py delete mode 100644 cv2_drivers/displays/st7789_spi.py delete mode 100644 cv2_drivers/touch_screens/__init__.py delete mode 100644 cv2_drivers/touch_screens/cst816.py delete mode 100644 cv2_drivers/touch_screens/cv2_touch_screen.py delete mode 100644 manifest.py delete mode 160000 micropython rename src/opencv_upy.cmake => micropython_opencv.cmake (84%) rename src/opencv/opencv => opencv (100%) delete mode 100644 opencv-examples/cv2_hardware_init/__init__.py delete mode 100644 opencv-examples/cv2_hardware_init/bus_i2c.py delete mode 100644 opencv-examples/cv2_hardware_init/bus_spi.py delete mode 100644 opencv-examples/cv2_hardware_init/camera.py delete mode 100644 opencv-examples/cv2_hardware_init/display.py delete mode 100644 opencv-examples/cv2_hardware_init/sd_card.py delete mode 100644 opencv-examples/cv2_hardware_init/touch_screen.py delete mode 100644 opencv-examples/ex01_hello_opencv.py delete mode 100644 opencv-examples/ex02_camera.py delete mode 100644 opencv-examples/ex03_touch_screen.py delete mode 100644 opencv-examples/ex04_imread_imwrite.py delete mode 100644 opencv-examples/ex05_performance.py delete mode 100644 opencv-examples/ex06_detect_sfe_logo.py delete mode 100644 opencv-examples/ex07_animation.py delete mode 100644 opencv-examples/images/animation_sheet.png delete mode 100644 opencv-examples/images/splash.png delete mode 100644 opencv-examples/xrp_examples/ex01_touch_screen_drive.py delete mode 100644 opencv-examples/xrp_examples/ex02_grab_orange_ring.py rename {src/opencv/platforms => platforms}/common.cmake (100%) create mode 100644 platforms/include/rp2350_unsafe_cv_xadd.h rename {src/opencv/platforms => platforms}/include/zephyr_stdint.h (100%) rename {src/opencv/platforms => platforms}/rp2350.toolchain.cmake (100%) delete mode 100644 src/opencv/Makefile delete mode 100644 src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h rename src/ulab => ulab (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index fbb9696..0000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Build Firmware - -on: - pull_request: - branches: - - main - push: - branches: - - features_for_launch - -jobs: - build: - runs-on: ubuntu-22.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: true - - name: Build Firmware - run: source build.sh && build_micropython_opencv diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index df235a7..0000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Build and Deploy Firmware Release - -on: - release: - types: [created] - -jobs: - build: - runs-on: ubuntu-22.04 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: true - - name: Build Firmware - run: source build.sh && build_micropython_opencv - - name: Upload Release Assets - uses: shogo82148/actions-upload-release-asset@v1 - with: - asset_path: "micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/MICROPYTHON_OPENCV_SPARKFUN_XRP_CONTROLLER.uf2" - github_token: ${{ secrets.GITHUB_TOKEN }} - upload_url: ${{ github.event.release.upload_url }} - diff --git a/.gitmodules b/.gitmodules index 860523c..0968144 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ -[submodule "src/opencv/opencv"] - path = src/opencv/opencv +[submodule "opencv"] + path = opencv url = https://github.com/opencv/opencv.git -[submodule "src/ulab"] - path = src/ulab +[submodule "ulab"] + path = ulab url = https://github.com/v923z/micropython-ulab.git -[submodule "micropython"] - path = micropython - url = https://github.com/sparkfun/micropython.git diff --git a/Makefile b/Makefile index b73912c..3d1cfd6 100644 --- a/Makefile +++ b/Makefile @@ -1,26 +1,16 @@ -# Set Pico SDK flags to create our own malloc wrapper and enable exceptions -CMAKE_ARGS += -DSKIP_PICO_MALLOC=1 -DPICO_CXX_ENABLE_EXCEPTIONS=1 +ifndef PLATFORM +$(error PLATFORM not specified. Use 'make PLATFORM=rp2350' or similar.) +endif -# Get current directory -CURRENT_DIR = $(shell pwd) +TOOLCHAIN_FILE = platforms/${PLATFORM}.toolchain.cmake -# Set the MicroPython user C module path to the OpenCV module -MAKE_ARGS = USER_C_MODULES="$(CURRENT_DIR)/src/opencv_upy.cmake" +# TODO: For some reason, specifying this in the toolchain file doesn't work +CMAKE_ARGS += -DBUILD_LIST=core,imgproc,imgcodecs -# Ensure we're building the OpenCV board variant -MAKE_ARGS += BOARD_VARIANT=LARGE_BINARY - -# Use the OpenCV driver manifest -MAKE_ARGS += FROZEN_MANIFEST="$(CURRENT_DIR)/manifest.py" - -# Build MicroPython with the OpenCV module +# Generic build all: - @cd micropython/ports/rp2 && export CMAKE_ARGS="$(CMAKE_ARGS)" && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) + cd opencv && mkdir -p build && cmake -S . -B build -DPICO_BUILD_DOCS=0 -DCMAKE_TOOLCHAIN_FILE=../${TOOLCHAIN_FILE} ${CMAKE_ARGS} && make -C build -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) -# Clean the MicroPython build +# Clean the OpenCV build clean: - @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) clean - -# Load the MicroPython submodules -submodules: - @cd micropython/ports/rp2 && make -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) submodules + cd opencv && rm -rf build diff --git a/README.md b/README.md index a7fb3a9..46eab35 100644 --- a/README.md +++ b/README.md @@ -1,110 +1,59 @@ -

- -

+# SparkFun MicroPython-OpenCV -# MicroPython-OpenCV +Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroPython port of OpenCV, which opens up a whole new world of vision processing abilities on embedded devices in a Python environment! -Welcome to SparkFun's MicroPython port of OpenCV! This is the first known MicroPython port of OpenCV, and as such, there may be some rough edges. Hardware support is limited to SparkFun products. +As the first port, there may be incomplete or missing features, and some rough edges. For example, we have only implemented support for the Raspberry Pi RP2350 so far, and some of the build procedures are hard-coded for that. We'd be happy to work with the community to create an official port in the future, but until then, this repo is available and fully open-source for anyone to use! -# Quick Start +# Example Snippets -1. Flash MicroPython-OpenCV firmware - * Back up any files you want to keep, they *will* be overwritten! - * Download the latest firmware for your board from the [Releases tab](https://github.com/sparkfun/micropython-opencv/releases). - * If you don't know how to flash firmware to your board, find your board [here](https://micropython.org/download/) and follow the instructions using the OpenCV firmware. - * After first boot, the [opencv-examples](opencv-examples) directory will be automatically extraced to the MicroPython filesystem for easy access to all the examples. -2. Configure hardware driver initialization - * The MicroPython port of OpenCV depends on hardware drivers to interface with cameras and displays. Drivers are built into the firmware, so there is no need to install them manually. - * An example module called [cv2_hardware_init](opencv-examples/cv2_hardware_init/) is imported by all examples to initialize the drivers. You will likely need to edit the files for your specific hardware and board configuration. -3. Write and run OpenCV code - * Any IDE should work, so use your favorite! - * Start with the examples! Go through them in order, which will verify your hardware is working and demonstrate some basics of OpenCV. Read the comments to understand the differences with the MicroPython port. - * The code block below contains snippets to highlight major features. +Below are example code snippets of features avaiable in this port of OpenCV. We've done our best to make it as similar as possible to standard OpenCV, but there are some necessary API changes due to the limitations of MicroPython. ```python # Import OpenCV, just like any other Python environment! import cv2 as cv -# Initialize hardware drivers by importing the example module (you'll likely -# need to modify it for your specific hardware configuration). -from cv2_hardware_init import * - # Import ulab NumPy and initialize an image, almost like any other Python -# environment! +# environment! from ulab import numpy as np img = np.zeros((240, 320, 3), dtype=np.uint8) -# Call OpenCV functions just like any other Python environment! +# Call OpenCV functions, just like standard OpenCV! img = cv.putText(img, "Hello OpenCV!", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) img = cv.Canny(img, 100, 200) -# Call `cv.imshow()`, almost like any other Python environment! Instead of a -# window name string, you pass a display driver that implements an `imshow()` -# method that takes a NumPy array as input +# Call `cv.imshow()`, almost like standard OpenCV! Instead of passing a window +# name string, you pass a display driver that implements an `imshow()` method +# that takes a NumPy array as input. cv.imshow(display, img) -# Call `cv.waitKey()`, just like any other Python environment! This waits for a -# key press on the REPL. Standard OpenCV requires this to update windows, but -# MicroPython OpenCV does not. +# Call `cv.waitKey()`, just like standard OpenCV! Unlike standard OpenCV, this +# waits for a key press on the REPL instead of a window, and it is not necessary +# to call after `cv.imshow()` because display drivers show images immediately. key = cv.waitKey(0) -# Use a camera, similar to any other Python environment! `cv.VideoCapture(0)` -# is not used in MicroPython OpenCV, the driver is initialized separately. +# Use a camera, similar to standard OpenCV! `cv.VideoCapture()` is not used in +# MicroPython-OpenCV, because a separate camera driver that implements the same +# methods as the OpenCV `VideoCapture` class must be initialized separately. camera.open() success, frame = camera.read() camera.release() # Call `cv.imread()` and `cv.imwrite()` to read and write images to and from -# the MicroPython filesystem, just like in any other Python environment! Can -# also point to an SD card. -# -# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV +# the MicroPython filesystem, just like standard OpenCV! It can also point to an +# SD card if one is mounted for extra storage space. img = cv.imread("path/to/image.png") success = cv.imwrite("path/to/image.png", img) ``` -# Hardware Support and Requirements - -Hardware support in this repository is mostly limited to SparkFun products. The current list of supported proudcts is very small, but may be expanded in the future. Users are welcome to fork this repository to add support for other products, following our licence requirements. Assistance in adding support for other hardware will not be provided by SparkFun. We may consider pull requests that add support for additional hardware, see [#Contributing](#Contributing). - -The OpenCV firmware adds over 3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). You'll also want some storage space, so a board with at least 8MB of flash is recommended. - -PSRAM is a requirement to do anything useful with OpenCV. A single 320x240 RGB888 frame buffer requires 225KiB of RAM; most processors only have a few hundred KiB of SRAM. Several frame buffers can be needed for even simple vision pipelines, so you really need at least a few MiB of RAM available. The more the merrier! - -Below is the list of currently supported hardware: - -## MicroPython Devices - -| Status | Device | Notes | -| --- | --- | --- | -| ✔️ | [XRP Controller](https://www.sparkfun.com/sparkfun-experiential-robotics-platform-xrp-controller.html) | | - -## Camera Drivers - -| Status | Device | Notes | -| --- | --- | --- | -| ✔️ | HM01B0 | | -| ⚠️ | [OV5640](https://www.sparkfun.com/ov5640-camera-board-5-megapixel-2592x1944-fisheye-lens.html) | See [#22](https://github.com/sparkfun/micropython-opencv/issues/22) | - -## Display Drivers - -| Status | Device | Notes | -| --- | --- | --- | -| ✔️ | ST7789 | | - -## Touch Screen Drivers - -| Status | Device | Notes | -| --- | --- | --- | -| ✔️ | CST816 | | +For full example, see our [Red Vision repo](https://github.com/sparkfun/red_vision). # Performance -Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed. In contrast, microcontrollers processors typically run at a few hundred MHz with 1 or 2 cores optimized for low power consumtion. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc. But for reference, the RP2350 can run the [SparkFun Logo Detection Example](opencv-examples/ex06_detect_sfe_logo.py) at 2 to 2.5 FPS at 320x240 resolution. +Limit your expectations. OpenCV typically runs on full desktop systems containing processors running at GHz speeds with dozens of cores optimized for computing speed and GB of RAM. In contrast, microcontrollers processors typically run at a few hundred MHz with 1 or 2 cores optimized for low power consumtion with a few MB of RAM. Exact performance depends on many things, including the processor, vision pipeline, image resolution, colorspaces used, RAM available, etc. -Something to consider is that MicroPython uses a garbage collector for memory management. As images are created and destroyed in a vision pipeline, RAM will be consumed until the garbage collector runs. The collection process takes longer with more RAM, so this can result in noticable delays during collection (typically a few hundred milliseconds). To mitigate this, it's best to pre-allocate arrays and utilize the optional `dst` argument of OpenCV functions to avoid allocating new arrays when possible. Pre-allocation also helps improve performance by avoiding repeated delays from allocating memory. +If you want best performance, keep in mind is that MicroPython uses a garbage collector for memory management. If images are repeatedly created in a vision pipeline, RAM will be consumed until the garbage collector runs. The collection process takes longer with more RAM, so this can result in noticable delays during collection (typically a few hundred milliseconds). To mitigate this, it's best to pre-allocate arrays and utilize the optional `dst` argument of OpenCV functions so memory consumption is minimized. Pre-allocation also helps improve performance, because allocating memory takes time. -Below are some typical execution times for various OpenCV functions. All were tested on the XRP (RP2350) with the [splash image](opencv-examples/images/splash.png), which is 320x240. +Below are some typical execution times for various OpenCV functions. All were tested on a Raspberry Pi RP2350 with a 320x240 test image. | Function | Execution Time | | --- | --- | @@ -117,17 +66,13 @@ Below are some typical execution times for various OpenCV functions. All were te | `dst = cv.Canny(src, 100, 200)` | 504ms | | `dst = cv.Canny(src, 100, 200, dst)` | 482ms | -Another way to improve performance is to select the best hardware drivers for your setup. For example, the default SPI driver for the ST7789 is limited to the max SPI baudrate for the processor's SPI peripheral. That's 24MHz in the case of the RP2350, but another driver is provided that uses the PIO peripheral that runs at 75MHz, so displaying images can be ~3x faster (ignoring any required colorspace conversions). - -For users wanting maximum performance, it may be desireable to bypass the high-level functions of the display/camera drivers, and instead work directly with the buffer member variables and read/write functions. This can avoid computationally expensive colorspace conversions when reading and writing images if they're not needed, but this is for advanced users only. - # Included OpenCV Functions Below is a list of all OpenCV functions included in the MicroPython port of OpenCV. This section follows OpenCV's module structure. Only the most useful OpenCV functions are included. The MicroPython environment is *extremely* limited, so many functions are omitted due to prohibitively high RAM and firmware size requirements. Other less useful functions have been omitted to reduce firmware size. If there are additional functions you'd like to be included, see [#Contributing](#Contributing). -If you need help understanding how to use these functions, see the documentation link for each function. You can also check out [OpenCV's Python Tutorials](https://docs.opencv.org/4.11.0/d6/d00/tutorial_py_root.html) and other tutorials online for more educational experience. This repository is simply a port of OpenCV, so we do not document these functions or how to use them, except for deviations from standard Python OpenCV. +If you need help understanding how to use these functions, see the documentation link for each function. You can also check out [OpenCV's Python Tutorials](https://docs.opencv.org/4.11.0/d6/d00/tutorial_py_root.html) and other tutorials online for more educational experience. This repository is simply a port of OpenCV, so we do not document these functions or how to use them, except for deviations from standard OpenCV. ## [`core`](https://docs.opencv.org/4.11.0/d0/de1/group__core.html) @@ -248,72 +193,100 @@ If you need help understanding how to use these functions, see the documentation | `cv.waitKey([, delay]) -> retval`
Waits for a pressed key.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL. | | `cv.waitKeyEx([, delay]) -> retval`
Similar to waitKey, but returns full key code.
[Documentation](https://docs.opencv.org/4.11.0/d7/dfc/group__highgui.html#ga5628525ad33f52eab17feebcfba38bd7) | Input is taken from `sys.stdin`, which is typically the REPL.
Full key code is implementation specific, so special key codes in MicroPython will not match other Python environments. | +# Hardware Drivers + +Standard OpenCV leverages the host operating system to access hardware, like creating windows and accessing cameras. MicroPython does not have that luxury, so instead, drivers must be implemented for these hardware devices. Take a look at our [Red Vision repo](https://github.com/sparkfun/red_vision) for examples. This leads to necessary API changes for functions like `cv.imshow()`. + +# MicroPython Board Requirements + +As of writing, the OpenCV firmware adds over 3MiB on top of the standard MicroPython firmware, which itself be up to 1MiB in size (depending on platform and board). You'll also want some storage space, so a board with at least 8MB of flash is recommended. + +PSRAM is basically a requirement to do anything useful with OpenCV. A single 320x240 RGB888 frame buffer requires 225KiB of RAM; most microcontrollers only have a few hundred KiB of SRAM. Several frame buffers can be needed for even simple vision pipelines, so you really need at least a few MiB of RAM available. The more the merrier! + # Building Below are instructions to build the MicroPython-OpenCV firmware from scratch. Instructions are only provided for Linux systems. -1. Install dependencies - * `sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib` -2. Clone this repo - * `cd ~` - * `git clone https://github.com/sparkfun/micropython-opencv.git` - * `cd micropython-opencv` - * `git submodule update --init` -3. Build mpy-cross - * `make -C micropython/mpy-cross` -4. Clone submodules for your board - * `make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules` +> [!NOTE] +> This build process does not include any hardware drivers, see our [Red Vision repo](https://github.com/sparkfun/red_vision) for example drivers. + +> [!NOTE] +> Because OpenCV dramatically increases the firmware size, it may be necessary to define board variants that reduce the storage size to avoid it overlapping with the firmware. See [#Adding New Boards](#Adding-New-Boards). + +1. Clone this repo and MicroPython + * ``` + cd ~ + git clone https://github.com/sparkfun/micropython-opencv.git + git clone https://github.com/micropython/micropython.git + ``` +2. Build the MicroPython cross-compiler + * ``` + make -C micropython/mpy-cross -j4 + ``` +3. Clone MicroPython submodules for your board + * ``` + make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules + ``` * Replace `rp2` and `SPARKFUN_XRP_CONTROLLER` with your platform and board name respectively -5. Set environment variables (optional) - * Some platforms require environment variables to be set. Examples: - * `export PICO_SDK_PATH=~/micropython-opencv/micropython/lib/pico-sdk` -6. Build OpenCV - * `make -C src/opencv PLATFORM=rp2350 --no-print-directory -j4` +4. Set environment variables (if needed) + * Some platforms require environment variables to be set. Example: + * ``` + export PICO_SDK_PATH=~/micropython/lib/pico-sdk + ``` +5. Build OpenCV for your platform + * ``` + make -C micropython-opencv PLATFORM=rp2350 --no-print-directory -j4 + ``` * Replace `rp2350` with your board's platform -7. Build firmware - * `make BOARD=SPARKFUN_XRP_CONTROLLER -j4` - * Replace `SPARKFUN_XRP_CONTROLLER` with your board name - * Your firmware file(s) will be located in `~/micropython-opencv/micropython/ports//build--OPENCV/` +6. Build MicroPython-OpenCV firmware for your board + * ``` + export CMAKE_ARGS="-DSKIP_PICO_MALLOC=1 -DPICO_CXX_ENABLE_EXCEPTIONS=1" && make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER USER_C_MODULES=~/micropython-opencv/micropython_opencv.cmake -j4 + ``` + * Replace `rp2` and `SPARKFUN_XRP_CONTROLLER` with your platform and board name respectively + * Replace the `CMAKE_ARGS` contents with whatever is required for your board's platform + * Your firmware file(s) will be located in `~/micropython/ports//build-/` -# Adding New Board +# Adding New Boards -Because OpenCV adds ~3MiB to the firmware size, it is necessary to define variants that reduce the storage size to avoid it overlapping with the firmware. It is also beneficial to adjust the board name to include `OpenCV` (or similar) to help customers and tech support identify whether the MicroPython-OpenCV is actually flashed to the board. +Because OpenCV dramatically increases the firmware size, it may be necessary to define board variants that reduce the storage size to avoid it overlapping with the firmware. It is also beneficial to adjust the board name to include `OpenCV` or similar to help people identify that the MicroPython-OpenCV firmware is flashed to the board instead of standard MicroPython. -Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called `mpconfigvariant_OPENCV.cmake` in [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER`](https://github.com/sparkfun/micropython/blob/7e728e8c6aad74ca244183f3e0705db6f332abd9/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_LARGE_BINARY.cmake) with contents: +Below is the variant for the XRP Controller as an example. The variant is defined by creating a file called [`micropython/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_RED_VISION.cmake`](https://github.com/sparkfun/micropython/blob/7e728e8c6aad74ca244183f3e0705db6f332abd9/ports/rp2/boards/SPARKFUN_XRP_CONTROLLER/mpconfigvariant_LARGE_BINARY.cmake) with contents: ``` list(APPEND MICROPY_DEF_BOARD # Board name - "MICROPY_HW_BOARD_NAME=\"SparkFun XRP Controller (OpenCV)\"" + "MICROPY_HW_BOARD_NAME=\"SparkFun XRP Controller (Red Vision)\"" # 8MB (8 * 1024 * 1024) "MICROPY_HW_FLASH_STORAGE_BYTES=8388608" ) ``` -Some board definitions do not have `#ifndef` wrappers in `mpconfigboard.h` for `MICROPY_HW_BOARD_NAME` and `MICROPY_HW_FLASH_STORAGE_BYTES`. That should be added if needed so the variant can build properly. +Some board definitions do not have `#ifndef` wrappers in `mpconfigboard.h` for `MICROPY_HW_BOARD_NAME` and `MICROPY_HW_FLASH_STORAGE_BYTES`. They should be added if needed so the variant can build properly. -Then, the firmware can be built with `make BOARD= -j4` +Then, the firmware can be built by adding `BOARD_VARIANT=` to the `make` command when building the MicroPython-OpenCV firmware. -# Adding New Platform +# Adding New Platforms -Only RP2350 exists currently, so the all requirements for adding new platforms is not fully known yet. However, it should be along the lines of: +Only support for the Raspberry Pi RP2350 has been figured out, so the all requirements for adding new platforms is not fully known yet. However, it should be along the lines of: 1. Create a valid toolchain file for the platform * See [rp2350.toolchain.cmake](src/opencv/platforms/rp2350.toolchain.cmake) for reference * This loosely follow's [OpenCV's platform definitions](https://github.com/opencv/opencv/tree/4.x/platforms) 2. Ensure OpenCV builds correctly - * `make -C src/opencv PLATFORM= --no-print-directory -j4` + * ``` + make -C micropython-opencv/src/opencv PLATFORM= --no-print-directory -j4 + ``` 3. Create new board(s) for that platform - * See [#Adding New Board](#Adding-New-Board) + * See [#Adding New Boards](#Adding-New-Boards) 4. Build MicroPython-OpenCV firmware for that board - * `make BOARD= -j4` + * ``` + make -C micropython/ports/rp2 BOARD= USER_C_MODULES=micropython-opencv/src/micropython_opencv.cmake -j4 + ``` # Contributing -Found a bug? Is there a discrepancy between standard OpenCV and MicroPython-OpenCV? Have a feature request? Want support for other hardware? +Found a bug? Is there a discrepancy between standard OpenCV and MicroPython-OpenCV? Have a feature request? First, please see if there is an [existing issue](https://github.com/sparkfun/micropython-opencv/issues). If not, then please [open a new issue](https://github.com/sparkfun/micropython-opencv/issues/new) so we can discuss the topic! Pull requests are welcome! Please keep the scope of your pull request focused (make separate ones if needed), and keep file changes limited to the scope of your pull request. - -Keep in mind that we only intend to support SparkFun products in this repository, though we may be open to hosting support for some hardware from other vendors. Please first open an issue to check if we're open to it. If not, you're always welcome to create your own fork following our license requirements! diff --git a/build.sh b/build.sh deleted file mode 100644 index ca7ff35..0000000 --- a/build.sh +++ /dev/null @@ -1,181 +0,0 @@ -if which nproc > /dev/null; then - MAKEOPTS="-j$(nproc)" -else - MAKEOPTS="-j$(sysctl -n hw.ncpu)" -fi - -# TODO: Could also make these opts into the build_micropython_opencv function if we care... -FROZEN_MODULES_DIR="$(dirname "$0")/frozen_modules" -FROZEN_EXAMPLES_ARCHIVE_SCRIPT="frozen_examples.py" -FROZEN_EXAMPLES_UNPACKED_DIR="opencv-examples" -PERSISTENT_FILE_FOR_UNPACK="/${FROZEN_EXAMPLES_UNPACKED_DIR}/reset_examples.txt" - -# Uses freezefs to create a frozen filesystem archive for the provided directory. -# See https://github.com/bixb922/freezefs for more details on freezefs -# Options: - # $1: The directory to freeze - # $2: The name that you want the frozen directory to have once unpacked on the board - # $3: The output file name for the frozen archive .py file -function create_frozen_fs { - local DIR_TO_FREEZE=$1 - local DIR_NAME_ON_BOARD=$2 - local OUTPUT_FILE=$3 - - echo "Creating frozen filesystem for directory: $DIR_TO_FREEZE" - echo "The frozen directory will be named: $DIR_NAME_ON_BOARD" - echo "The output file will be: $OUTPUT_FILE" - - if [ $DIR_TO_FREEZE != $DIR_NAME_ON_BOARD ]; then - cp -r $DIR_TO_FREEZE $DIR_NAME_ON_BOARD - fi - - # Use on-import=extract so our frozen filesystem is unpacked to '/' in flash on import - # Use --compress to compress the frozen filesystem archive - # Use --overwrite always to ensure that the frozen filesystem is returned to factory state if the persistent file is deleted - - python -m freezefs $DIR_NAME_ON_BOARD $OUTPUT_FILE --on-import=extract --compress --overwrite always -} - -# Adds the provided directory to the manifest file for the specified port and board. -# Options: -# $1: The directory to add to the manifest -# $2: The port (e.g. rp2) -# $3: The board (e.g. SPARKFUN_XRP_CONTROLLER) -# $4: The mpconfigboard file name (e.g. mpconfigboard.cmake or mpconfigboard.m) Default: mpconfigboard.cmake -function add_to_manifest { - local DIR=$1 - local PORT=$2 - local BOARD=$3 - local MPCONFIG_FILE="${4:-mpconfigboard.cmake}" - - # Add the directory to the manifest file - echo "Adding $DIR to the manifest for $PORT on $BOARD using $MPCONFIG_FILE" - local BOARD_DIR="micropython/ports/${PORT}/boards/${BOARD}" - - # Create manifest.py if it doesn't exist - if [ ! -f ${BOARD_DIR}/manifest.py ]; then - echo "include(\"\$(PORT_DIR)/boards/manifest.py\")" > ${BOARD_DIR}/manifest.py - - # also add the necessary frozen manifest line to mpconfigboard.cmake: set(MICROPY_FROZEN_MANIFEST ${MICROPY_BOARD_DIR}/manifest.py) - # We will use the optional MPCONFIG_FILE argument to determine if we should add this line - - if [ -n "$MPCONFIG_FILE" ]; then - echo "Attempting to add frozen manifest line to $MPCONFIG_FILE for $BOARD" - - if [[ $MPCONFIG_FILE == *.mk ]]; then - # e.g. for TEENSY which uses mpconfigboard.mk instead of mpconfigboard.cmake - printf "\nFROZEN_MANIFEST ?= \$(BOARD_DIR)/manifest.py" >> ${BOARD_DIR}/$MPCONFIG_FILE - elif [[ $MPCONFIG_FILE == *.cmake ]]; then - printf "\nset(MICROPY_FROZEN_MANIFEST \"\${MICROPY_BOARD_DIR}/manifest.py\")" >> ${BOARD_DIR}/$MPCONFIG_FILE - fi - fi - fi - - # Add the freeze line to the manifest.py for the board - echo "Adding freeze line to manifest.py for $BOARD" - printf "\nfreeze(\"${DIR}\")" >> ${BOARD_DIR}/manifest.py - - # Helpful for debugging during the build process, but can be removed if we'd rather not see this output... - echo "Manifest.py for $BOARD:" - cat ${BOARD_DIR}/manifest.py -} - -# Adds the frozen data filesystem to the _boot.py file for the given port -# Options: - # $1: Port name - # $2: Frozen data file path - # $3: Unpacked directory name on the board (optional). If provided, the modules in this directory will be made importable -function add_frozen_data_to_boot_for_port { - local TARGET_PORT_NAME=$1 - local FROZEN_DATA_FILE=$2 - local UNPACKED_DIR=$3 - - # Remove the ".py" extension from the frozen data file - local FROZEN_DATA_BASENAME=$(basename $FROZEN_DATA_FILE .py) - - # Check if the _boot.py file exists in the port's modules directory and error out if it does not - if [ ! -f micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py ]; then - echo "Error: _boot.py file not found in ports/${TARGET_PORT_NAME}/modules/" - exit 1 - fi - - # Add the frozen data filesystem to the _boot.py file - local BOOT_FILE="micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py" - - # Create our "persistent file for unpack" that will be used to check if the frozen data filesystem has already been unpacked - # If it has not been unpacked, we will import the frozen data filesystem - echo "Adding frozen data filesystem to ${BOOT_FILE}" - echo "import os" >> ${BOOT_FILE} - echo "try:" >> ${BOOT_FILE} - echo " os.stat('${PERSISTENT_FILE_FOR_UNPACK}')" >> ${BOOT_FILE} - echo "except OSError:" >> ${BOOT_FILE} - echo " import ${FROZEN_DATA_BASENAME}" >> ${BOOT_FILE} - echo " with open('${PERSISTENT_FILE_FOR_UNPACK}', 'w') as f:" >> ${BOOT_FILE} - echo " f.write('Hi! The firmware has this directory frozen into the firmware, and the _boot.py\\n')" >> ${BOOT_FILE} - echo " f.write('file has been modified to automatically unpack this directory if needed. As long\\n')" >> ${BOOT_FILE} - echo " f.write('as this file exists, it will not unpack the directory, meaning you can safely\\n')" >> ${BOOT_FILE} - echo " f.write('edit the files here or delete all other files to free up storage space. If you\\n')" >> ${BOOT_FILE} - echo " f.write('want to restore this directory to its default state, delete this file and the\\n')" >> ${BOOT_FILE} - echo " f.write('directory will be unpacked again on the next boot.\\n')" >> ${BOOT_FILE} - echo " f.write('\\n')" >> ${BOOT_FILE} - echo " f.write('WARNING: Deleting this file will override ALL changes to this directory!')" >> ${BOOT_FILE} - - # If a destination directory is provided, we will add it to the sys.path so that the modules in the unpacked directory can be imported - if [ -n "$UNPACKED_DIR" ]; then - echo "Adding ${UNPACKED_DIR} to sys.path in _boot.py" - echo "import sys" >> ${BOOT_FILE} - echo "sys.path.append('/${UNPACKED_DIR}')" >> ${BOOT_FILE} - fi - - # Helpful for debugging during the build process, but can be removed if we'd rather not see this output... - echo "Content of _boot.py after adding frozen data filesystem:" - cat micropython/ports/${TARGET_PORT_NAME}/modules/_boot.py -} - -# Installs necessary dependencies and builds OpenCV and the firmware -# Also freezes the examples directory in a filesystem archive on the board -function build_micropython_opencv { - # Install necessary packages (Could move into an install_dependencies.sh if we want this to be more explicit/modular) - sudo apt update - sudo apt install cmake python3 build-essential gcc-arm-none-eabi libnewlib-arm-none-eabi libstdc++-arm-none-eabi-newlib - # Install necessary python packages (could also move this to a requirements.txt file) - pip install freezefs - - # Create a directory for frozen modules, we can add arbitrary .py files to this directory in the future. - # For now it will just contain the archived examples script. - mkdir "$FROZEN_MODULES_DIR" - - # Create our frozen filesystem archive for the examples directory - create_frozen_fs "opencv-examples" "$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_MODULES_DIR/$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" - - # Add necessary content to the manifest file to freeze the modules in the provided directory - add_to_manifest "$FROZEN_MODULES_DIR" "rp2" "SPARKFUN_XRP_CONTROLLER" "mpconfigvariant_LARGE_BINARY.cmake" - - # Add necessary content to the boot.py file to unpack the frozen data filesystem on boot - # Provide the source and destination directories to copy the frozen data filesystem to a mutable (and non-hidden) location - # Provide "true" as the last argument to add the destination directory to sys.path (since our examples directory contains modules that we want to be importable...) - # add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" ".$FROZEN_EXAMPLES_UNPACKED_DIR" "$FROZEN_EXAMPLES_UNPACKED_DIR" true - add_frozen_data_to_boot_for_port "rp2" "$FROZEN_EXAMPLES_ARCHIVE_SCRIPT" "$FROZEN_EXAMPLES_UNPACKED_DIR" true - - # Set Pico SDK path to $GITHUB_WORKSPACE/micropython/lib/pico-sdk if $GITHUB_WORKSPACE is set, otherwise use the current directory - if [ -n "$GITHUB_WORKSPACE" ]; then - export PICO_SDK_PATH="$GITHUB_WORKSPACE/micropython/lib/pico-sdk" - else - export PICO_SDK_PATH=$(dirname "$0")/micropython/lib/pico-sdk - fi - - # Build MPY Cross compiler - make -C micropython/mpy-cross - - # Update necessary MicroPython submodules - make -C micropython/ports/rp2 BOARD=SPARKFUN_XRP_CONTROLLER submodules - - # Build OpenCV - make -C src/opencv PLATFORM=rp2350 --no-print-directory ${MAKEOPTS} - - # Build firmware - make BOARD=SPARKFUN_XRP_CONTROLLER ${MAKEOPTS} - - # Rename firmware file to identify it as the OpenCV build and which board it's for - mv micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/firmware.uf2 micropython/ports/rp2/build-SPARKFUN_XRP_CONTROLLER-LARGE_BINARY/MICROPYTHON_OPENCV_SPARKFUN_XRP_CONTROLLER.uf2 -} diff --git a/cv2_drivers/__init__.py b/cv2_drivers/__init__.py deleted file mode 100644 index 9db91e7..0000000 --- a/cv2_drivers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_drivers/touch_screens/__init__.py -# -# Imports all available drivers for MicroPython OpenCV. -#------------------------------------------------------------------------------- - -from . import displays -from . import cameras -from . import touch_screens diff --git a/cv2_drivers/cameras/__init__.py b/cv2_drivers/cameras/__init__.py deleted file mode 100644 index cc46d38..0000000 --- a/cv2_drivers/cameras/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_drivers/cameras/__init__.py -# -# Imports all available camera drivers for MicroPython OpenCV. -#------------------------------------------------------------------------------- - -# Import sys module to check platform -import sys - -# Import RP2 drivers -if 'rp2' in sys.platform: - from . import hm01b0_pio - from . import ov5640_pio diff --git a/cv2_drivers/cameras/cv2_camera.py b/cv2_drivers/cameras/cv2_camera.py deleted file mode 100644 index 4cd362b..0000000 --- a/cv2_drivers/cameras/cv2_camera.py +++ /dev/null @@ -1,45 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_camera.py -# -# Base class for OpenCV camera drivers. -#------------------------------------------------------------------------------- - -class CV2_Camera(): - """ - Base class for OpenCV camera drivers. - """ - def __init__(self): - """ - Initializes the camera. - """ - pass - - def open(self): - """ - Opens the camera and prepares it for capturing images. - """ - raise NotImplementedError("open() must be implemented by driver") - - def release(self): - """ - Releases the camera and frees any resources. - """ - raise NotImplementedError("release() must be implemented by driver") - - def read(self, image=None): - """ - Reads an image from the camera. - - Args: - image (ndarray, optional): Image to read into - - Returns: - tuple: (success, image) - - success (bool): True if the image was read, otherwise False - - image (ndarray): The captured image, or None if reading failed - """ - raise NotImplementedError("read() must be implemented by driver") diff --git a/cv2_drivers/cameras/dvp_camera.py b/cv2_drivers/cameras/dvp_camera.py deleted file mode 100644 index 0799498..0000000 --- a/cv2_drivers/cameras/dvp_camera.py +++ /dev/null @@ -1,60 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# dvp_camera.py -# -# Base class for OpenCV DVP (Digital Video Port) camera drivers. -#------------------------------------------------------------------------------- - -from .cv2_camera import CV2_Camera - -class DVP_Camera(CV2_Camera): - """ - Base class for OpenCV DVP (Digital Video Port) camera drivers. - """ - def __init__( - self, - i2c, - i2c_address - ): - """ - Initializes the DVP camera with I2C communication. - - Args: - i2c (I2C): I2C object for communication - i2c_address (int): I2C address of the camera - """ - super().__init__() - - self._i2c = i2c - self._i2c_address = i2c_address - - def _read_register(self, reg, nbytes=1): - """ - Reads a register from the camera over I2C. - - Args: - reg (int): Register address to read - nbytes (int): Number of bytes to read from the register - - Returns: - bytes: Data read from the register - """ - self._i2c.writeto(self._i2c_address, bytes([reg >> 8, reg & 0xFF])) - return self._i2c.readfrom(self._i2c_address, nbytes) - - def _write_register(self, reg, data): - """ - Writes data to a register on the camera over I2C. - - Args: - reg (int): Register address to write - data (bytes, int, list, tuple): Data to write to the register - """ - if isinstance(data, int): - data = bytes([data]) - elif isinstance(data, (list, tuple)): - data = bytes(data) - self._i2c.writeto(self._i2c_address, bytes([reg >> 8, reg & 0xFF]) + data) diff --git a/cv2_drivers/cameras/dvp_rp2_pio.py b/cv2_drivers/cameras/dvp_rp2_pio.py deleted file mode 100644 index 81d2840..0000000 --- a/cv2_drivers/cameras/dvp_rp2_pio.py +++ /dev/null @@ -1,182 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# dvp_rp2_pio.py -# -# This class implements a DVP (Digital Video Port) interface using the RP2 PIO -# (Programmable Input/Output) interface. This is only available on Raspberry Pi -# RP2 processors. -# -# This class is derived from: -# https://github.com/adafruit/Adafruit_ImageCapture/blob/main/src/arch/rp2040.cpp -# Released under the MIT license. -# Copyright (c) 2021 Adafruit Industries -#------------------------------------------------------------------------------- - -import rp2 -from machine import Pin, PWM - -class DVP_RP2_PIO(): - """ - This class implements a DVP (Digital Video Port) interface using the RP2 PIO - (Programmable Input/Output) interface. This is only available on Raspberry - Pi RP2 processors. - """ - def __init__( - self, - pin_d0, - pin_vsync, - pin_hsync, - pin_pclk, - pin_xclk, - xclk_freq, - sm_id, - num_data_pins, - bytes_per_frame, - byte_swap - ): - """ - Initializes the DVP interface with the specified parameters. - - Args: - pin_d0 (int): Data 0 pin number for DVP interface - pin_vsync (int): Vertical sync pin number - pin_hsync (int): Horizontal sync pin number - pin_pclk (int): Pixel clock pin number - pin_xclk (int): External clock pin number - xclk_freq (int): Frequency in Hz for the external clock - sm_id (int): PIO state machine ID - num_data_pins (int): Number of data pins used in DVP interface - bytes_per_frame (int): Number of bytes per frame to capture - byte_swap (bool): Whether to swap bytes in the captured data - """ - self._pin_d0 = pin_d0 - self._pin_vsync = pin_vsync - self._pin_hsync = pin_hsync - self._pin_pclk = pin_pclk - self._pin_xclk = pin_xclk - self._sm_id = sm_id - - # Initialize DVP pins as inputs - for i in range(num_data_pins): - Pin(pin_d0+i, Pin.IN) - Pin(pin_vsync, Pin.IN) - Pin(pin_hsync, Pin.IN) - Pin(pin_pclk, Pin.IN) - - # Set up XCLK pin if provided - if self._pin_xclk is not None: - self._xclk = PWM(Pin(pin_xclk)) - self._xclk.freq(xclk_freq) - self._xclk.duty_u16(32768) # 50% duty cycle - - # Copy the PIO program - program = self._pio_read_dvp - - # Mask in the GPIO pins - program[0][0] |= self._pin_hsync & 0x1F - program[0][1] |= self._pin_pclk & 0x1F - program[0][3] |= self._pin_pclk & 0x1F - - # Mask in the number of data pins - program[0][2] &= 0xFFFFFFE0 - program[0][2] |= num_data_pins - - # Create PIO state machine to capture DVP data - self._sm = rp2.StateMachine( - self._sm_id, - program, - in_base = pin_d0 - ) - - # Create DMA controller to transfer data from PIO to buffer - self._dma = rp2.DMA() - req_num = ((self._sm_id // 4) << 3) + (self._sm_id % 4) + 4 - bytes_per_transfer = 4 - dma_ctrl = self._dma.pack_ctrl( - # 0 = 1 byte, 1 = 2 bytes, 2 = 4 bytes - size = {1:0, 2:1, 4:2}[bytes_per_transfer], - inc_read = False, - treq_sel = req_num, - bswap = byte_swap - ) - self._dma.config( - read = self._sm, - count = bytes_per_frame // bytes_per_transfer, - ctrl = dma_ctrl - ) - - def _active(self, active=None): - """ - Sets or gets the active state of the DVP interface. - - Args: - active (bool, optional): - - True: Activate the DVP interface - - False: Deactivate the DVP interface - - None: Get the current active state - - Returns: - bool: Current active state if no argument is provided - """ - # If no argument is provided, return the current active state - if active == None: - return self._sm.active() - - # Disable the DMA, the VSYNC handler will re-enable it when needed - self._dma.active(False) - - # Set the active state of the state machine - self._sm.active(active) - - # If active, set up the VSYNC interrupt handler - if active: - Pin(self._pin_vsync).irq( - trigger = Pin.IRQ_FALLING, - handler = lambda pin: self._vsync_handler() - ) - # If not active, disable the VSYNC interrupt handler - else: - Pin(self._pin_vsync).irq( - handler = None - ) - - def _vsync_handler(self): - """ - Handles the VSYNC interrupt to capture a frame of data. - """ - # Disable DMA before reconfiguring it - self._dma.active(False) - - # Reset state machine to ensure ISR is cleared - self._sm.restart() - - # Ensure PIO RX FIFO is empty (it's not emptied by `sm.restart()`) - while self._sm.rx_fifo() > 0: - self._sm.get() - - # Reset the DMA write address - self._dma.write = self._buffer - - # Start the DMA - self._dma.active(True) - - # Here is the PIO program, which is configurable to mask in the GPIO pins - # and the number of data pins. It must be configured before the state - # machine is created - @rp2.asm_pio( - in_shiftdir = rp2.PIO.SHIFT_LEFT, - push_thresh = 32, - autopush = True, - fifo_join = rp2.PIO.JOIN_RX - ) - def _pio_read_dvp(): - """ - PIO program to read DVP data from the GPIO pins. - """ - wait(1, gpio, 0) # Mask in HSYNC pin - wait(1, gpio, 0) # Mask in PCLK pin - in_(pins, 1) # Mask in number of pins - wait(0, gpio, 0) # Mask in PCLK pin diff --git a/cv2_drivers/cameras/hm01b0.py b/cv2_drivers/cameras/hm01b0.py deleted file mode 100644 index b847a6b..0000000 --- a/cv2_drivers/cameras/hm01b0.py +++ /dev/null @@ -1,352 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# hm01b0.py -# -# Base class for OpenCV HM01B0 camera drivers. -# -# This class is derived from: -# https://github.com/openmv/openmv/blob/5acf5baf92b4314a549bdd068138e5df6cc0bac7/drivers/sensors/hm01b0.c -# Released under the MIT license. -# Copyright (C) 2013-2024 OpenMV, LLC. -#------------------------------------------------------------------------------- - -from .dvp_camera import DVP_Camera -from time import sleep_us -import cv2 - -class HM01B0(DVP_Camera): - """ - Base class for OpenCV HM01B0 camera drivers. - """ - # Read only registers - _MODEL_ID_H = 0x0000 - _MODEL_ID_L = 0x0001 - _FRAME_COUNT = 0x0005 - _PIXEL_ORDER = 0x0006 - # Sensor mode control - _MODE_SELECT = 0x0100 - _IMG_ORIENTATION = 0x0101 - _SW_RESET = 0x0103 - _GRP_PARAM_HOLD = 0x0104 - # Sensor exposure gain control - _INTEGRATION_H = 0x0202 - _INTEGRATION_L = 0x0203 - _ANALOG_GAIN = 0x0205 - _DIGITAL_GAIN_H = 0x020E - _DIGITAL_GAIN_L = 0x020F - # Frame timing control - _FRAME_LEN_LINES_H = 0x0340 - _FRAME_LEN_LINES_L = 0x0341 - _LINE_LEN_PCK_H = 0x0342 - _LINE_LEN_PCK_L = 0x0343 - # Binning mode control - _READOUT_X = 0x0383 - _READOUT_Y = 0x0387 - _BINNING_MODE = 0x0390 - # Test pattern control - _TEST_PATTERN_MODE = 0x0601 - # Black level control - _BLC_CFG = 0x1000 - _BLC_TGT = 0x1003 - _BLI_EN = 0x1006 - _BLC2_TGT = 0x1007 - # Sensor reserved - _DPC_CTRL = 0x1008 - _SINGLE_THR_HOT = 0x100B - _SINGLE_THR_COLD = 0x100C - # VSYNC,HSYNC and pixel shift register - _VSYNC_HSYNC_PIXEL_SHIFT_EN = 0x1012 - # Automatic exposure gain control - _AE_CTRL = 0x2100 - _AE_TARGET_MEAN = 0x2101 - _AE_MIN_MEAN = 0x2102 - _CONVERGE_IN_TH = 0x2103 - _CONVERGE_OUT_TH = 0x2104 - _MAX_INTG_H = 0x2105 - _MAX_INTG_L = 0x2106 - _MIN_INTG = 0x2107 - _MAX_AGAIN_FULL = 0x2108 - _MAX_AGAIN_BIN2 = 0x2109 - _MIN_AGAIN = 0x210A - _MAX_DGAIN = 0x210B - _MIN_DGAIN = 0x210C - _DAMPING_FACTOR = 0x210D - _FS_CTRL = 0x210E - _FS_60HZ_H = 0x210F - _FS_60HZ_L = 0x2110 - _FS_50HZ_H = 0x2111 - _FS_50HZ_L = 0x2112 - _FS_HYST_TH = 0x2113 - # Motion detection control - _MD_CTRL = 0x2150 - _I2C_CLEAR = 0x2153 - _WMEAN_DIFF_TH_H = 0x2155 - _WMEAN_DIFF_TH_M = 0x2156 - _WMEAN_DIFF_TH_L = 0x2157 - _MD_THH = 0x2158 - _MD_THM1 = 0x2159 - _MD_THM2 = 0x215A - _MD_THL = 0x215B - _STATISTIC_CTRL = 0x2000 - _MD_LROI_X_START_H = 0x2011 - _MD_LROI_X_START_L = 0x2012 - _MD_LROI_Y_START_H = 0x2013 - _MD_LROI_Y_START_L = 0x2014 - _MD_LROI_X_END_H = 0x2015 - _MD_LROI_X_END_L = 0x2016 - _MD_LROI_Y_END_H = 0x2017 - _MD_LROI_Y_END_L = 0x2018 - _MD_INTERRUPT = 0x2160 - # Sensor timing control - _QVGA_WIN_EN = 0x3010 - _SIX_BIT_MODE_EN = 0x3011 - _PMU_AUTOSLEEP_FRAMECNT = 0x3020 - _ADVANCE_VSYNC = 0x3022 - _ADVANCE_HSYNC = 0x3023 - _EARLY_GAIN = 0x3035 - # IO and clock control - _BIT_CONTROL = 0x3059 - _OSC_CLK_DIV = 0x3060 - _ANA_Register_11 = 0x3061 - _IO_DRIVE_STR = 0x3062 - _IO_DRIVE_STR2 = 0x3063 - _ANA_Register_14 = 0x3064 - _OUTPUT_PIN_STATUS_CONTROL = 0x3065 - _ANA_Register_17 = 0x3067 - _PCLK_POLARITY = 0x3068 - - # Useful values of Himax registers - _HIMAX_RESET = 0x01 - _HIMAX_MODE_STANDBY = 0x00 - _HIMAX_MODE_STREAMING = 0x01 # I2C triggered streaming enable - _HIMAX_MODE_STREAMING_NFRAMES = 0x03 # Output N frames - _HIMAX_MODE_STREAMING_TRIG = 0x05 # Hardware Trigger - # _HIMAX_SET_HMIRROR (r, x) ((r & 0xFE) | ((x & 1) << 0)) - # _HIMAX_SET_VMIRROR (r, x) ((r & 0xFD) | ((x & 1) << 1)) - - _PCLK_RISING_EDGE = 0x00 - _PCLK_FALLING_EDGE = 0x01 - _AE_CTRL_ENABLE = 0x00 - _AE_CTRL_DISABLE = 0x01 - - _HIMAX_BOOT_RETRY = 10 - _HIMAX_LINE_LEN_PCK_FULL = 0x178 - _HIMAX_FRAME_LENGTH_FULL = 0x109 - - _HIMAX_LINE_LEN_PCK_QVGA = 0x178 - _HIMAX_FRAME_LENGTH_QVGA = 0x104 - - _HIMAX_LINE_LEN_PCK_QQVGA = 0x178 - _HIMAX_FRAME_LENGTH_QQVGA = 0x084 - - _INIT_COMMANDS = ( - (_BLC_TGT, 0x08), # BLC target :8 at 8 bit mode - (_BLC2_TGT, 0x08), # BLI target :8 at 8 bit mode - (0x3044, 0x0A), # Increase CDS time for settling - (0x3045, 0x00), # Make symmetric for cds_tg and rst_tg - (0x3047, 0x0A), # Increase CDS time for settling - (0x3050, 0xC0), # Make negative offset up to 4x - (0x3051, 0x42), - (0x3052, 0x50), - (0x3053, 0x00), - (0x3054, 0x03), # tuning sf sig clamping as lowest - (0x3055, 0xF7), # tuning dsun - (0x3056, 0xF8), # increase adc nonoverlap clk - (0x3057, 0x29), # increase adc pwr for missing code - (0x3058, 0x1F), # turn on dsun - (0x3059, 0x1E), - (0x3064, 0x00), - (0x3065, 0x04), # pad pull 0 - (_ANA_Register_17, 0x00), # Disable internal oscillator - - (_BLC_CFG, 0x43), # BLC_on, IIR - - (0x1001, 0x43), # BLC dithering en - (0x1002, 0x43), # blc_darkpixel_thd - (0x0350, 0x7F), # Dgain Control - (_BLI_EN, 0x01), # BLI enable - (0x1003, 0x00), # BLI Target [Def: 0x20] - - (_DPC_CTRL, 0x01), # DPC option 0: DPC off 1 : mono 3 : bayer1 5 : bayer2 - (0x1009, 0xA0), # cluster hot pixel th - (0x100A, 0x60), # cluster cold pixel th - (_SINGLE_THR_HOT, 0x90), # single hot pixel th - (_SINGLE_THR_COLD, 0x40), # single cold pixel th - (0x1012, 0x00), # Sync. shift disable - (_STATISTIC_CTRL, 0x07), # AE stat en | MD LROI stat en | magic - (0x2003, 0x00), - (0x2004, 0x1C), - (0x2007, 0x00), - (0x2008, 0x58), - (0x200B, 0x00), - (0x200C, 0x7A), - (0x200F, 0x00), - (0x2010, 0xB8), - (0x2013, 0x00), - (0x2014, 0x58), - (0x2017, 0x00), - (0x2018, 0x9B), - - (_AE_CTRL, 0x01), #Automatic Exposure - (_AE_TARGET_MEAN, 0x64), #AE target mean [Def: 0x3C] - (_AE_MIN_MEAN, 0x0A), #AE min target mean [Def: 0x0A] - (_CONVERGE_IN_TH, 0x03), #Converge in threshold [Def: 0x03] - (_CONVERGE_OUT_TH, 0x05), #Converge out threshold [Def: 0x05] - (_MAX_INTG_H, (_HIMAX_FRAME_LENGTH_QVGA - 2) >> 8), #Maximum INTG High Byte [Def: 0x01] - (_MAX_INTG_L, (_HIMAX_FRAME_LENGTH_QVGA - 2) & 0xFF), #Maximum INTG Low Byte [Def: 0x54] - (_MAX_AGAIN_FULL, 0x04), #Maximum Analog gain in full frame mode [Def: 0x03] - (_MAX_AGAIN_BIN2, 0x04), #Maximum Analog gain in bin2 mode [Def: 0x04] - (_MAX_DGAIN, 0xC0), - - (_INTEGRATION_H, 0x01), #Integration H [Def: 0x01] - (_INTEGRATION_L, 0x08), #Integration L [Def: 0x08] - (_ANALOG_GAIN, 0x00), #Analog Global Gain [Def: 0x00] - (_DAMPING_FACTOR, 0x20), #Damping Factor [Def: 0x20] - (_DIGITAL_GAIN_H, 0x01), #Digital Gain High [Def: 0x01] - (_DIGITAL_GAIN_L, 0x00), #Digital Gain Low [Def: 0x00] - - (_FS_CTRL, 0x00), #Flicker Control - - (_FS_60HZ_H, 0x00), - (_FS_60HZ_L, 0x3C), - (_FS_50HZ_H, 0x00), - (_FS_50HZ_L, 0x32), - - (_MD_CTRL, 0x00), - (_FRAME_LEN_LINES_H, _HIMAX_FRAME_LENGTH_QVGA >> 8), - (_FRAME_LEN_LINES_L, _HIMAX_FRAME_LENGTH_QVGA & 0xFF), - (_LINE_LEN_PCK_H, _HIMAX_LINE_LEN_PCK_QVGA >> 8), - (_LINE_LEN_PCK_L, _HIMAX_LINE_LEN_PCK_QVGA & 0xFF), - (_QVGA_WIN_EN, 0x01), # Enable QVGA window readout - (0x0383, 0x01), - (0x0387, 0x01), - (0x0390, 0x00), - (0x3011, 0x70), - (0x3059, 0x22), # 1-bit mode - (_OSC_CLK_DIV, 0x14), - (_IMG_ORIENTATION, 0x00), # change the orientation - (0x0104, 0x01), - (_MODE_SELECT, 0x01), # Streaming mode - ) - - def __init__( - self, - i2c, - i2c_address = 0x24, - num_data_pins = 1 - ): - """ - Initializes the HM01B0 camera with default settings. - - Args: - i2c (I2C): I2C object for communication - i2c_address (int, optional): I2C address (default: 0x24) - num_data_pins (int, optional): Number of data pins - - 1 (Default) - - 4 - - 8 - """ - super().__init__(i2c, i2c_address) - - self._soft_reset() - self._send_init(num_data_pins) - - def _is_connected(self): - """ - Checks if the camera is connected by reading the chip ID. - - Returns: - bool: True if the camera is connected and the chip ID is correct, - otherwise False. - """ - try: - # Try to read the chip ID - # If it throws an I/O error - the device isn't connected - id = self._get_chip_id() - - # Confirm the chip ID is correct - if id == 0x01B0: - return True - else: - return False - except: - return False - - def _get_chip_id(self): - """ - Reads the chip ID. - - Returns: - int: The chip ID of the HM01B0 (should be 0x01B0). - """ - data = self._read_register(self._MODEL_ID_H, 2) - return (data[0] << 8) | data[1] - - def _soft_reset(self): - """ - Performs a software reset of the HM01B0 sensor. - This resets the sensor to its default state. - """ - # HM01B0 can require multiple attempts to reset properly - for i in range(self._HIMAX_BOOT_RETRY): - self._write_register(self._SW_RESET, self._HIMAX_RESET) - sleep_us(1000) - mode = self._read_register(self._MODE_SELECT) - if mode[0] == self._HIMAX_MODE_STANDBY: - break - sleep_us(10000) - - def _set_mode(self, mode): - """ - Sets the operating mode of the HM01B0 sensor. - Args: - mode (int): The mode to set, e.g., _HIMAX_MODE_STREAMING. - """ - self._write_register(self._MODE_SELECT, mode) - - def _trigger(self): - """ - Triggers the HM01B0 sensor to capture a number of images. See - _set_n_frames(). - """ - self._write_register(self._MODE_SELECT, self._HIMAX_MODE_STREAMING_NFRAMES) - - def _set_n_frames(self, n_frames): - """ - Sets the number of frames to capture before stopping. See _trigger(). - """ - self._write_register(self._PMU_AUTOSLEEP_FRAMECNT, n_frames) - - def _send_init(self, num_data_pins): - """ - Initializes the HM01B0 sensor with default settings. - This includes setting up exposure, gain, and frame timing. - """ - for reg, value in self._INIT_COMMANDS: - if reg == 0x3059: - # Set the data pin mode based on the number of data pins - if num_data_pins == 1: - value = 0x22 - elif num_data_pins == 4: - value = 0x42 - else: - value = 0x02 - self._write_register(reg, value) - sleep_us(1000) - - def read(self, image=None): - """ - Reads an image from the camera. - - Args: - image (ndarray, optional): Image to read into - - Returns: - tuple: (success, image) - - success (bool): True if the image was read, otherwise False - - image (ndarray): The captured image, or None if reading failed - """ - return (True, cv2.cvtColor(self._buffer, cv2.COLOR_BayerRG2BGR, image)) diff --git a/cv2_drivers/cameras/hm01b0_pio.py b/cv2_drivers/cameras/hm01b0_pio.py deleted file mode 100644 index e07dc31..0000000 --- a/cv2_drivers/cameras/hm01b0_pio.py +++ /dev/null @@ -1,86 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# hm01b0_pio.py -# -# OpenCV HM01B0 camera driver using a PIO interface. Only available on -# Raspberry Pi RP2 processors. -#------------------------------------------------------------------------------- - -from .hm01b0 import HM01B0 -from .dvp_rp2_pio import DVP_RP2_PIO -from ulab import numpy as np - -class HM01B0_PIO(HM01B0, DVP_RP2_PIO): - """ - OpenCV HM01B0 camera driver using a PIO interface. Only available on - Raspberry Pi RP2 processors. - """ - def __init__( - self, - i2c, - sm_id, - pin_d0, - pin_vsync, - pin_hsync, - pin_pclk, - pin_xclk = None, - xclk_freq = 25_000_000, - num_data_pins = 1, - i2c_address = 0x24, - ): - """ - Initializes the HM01B0 PIO camera driver. - - Args: - i2c (I2C): I2C object for communication - sm_id (int): PIO state machine ID - pin_d0 (int): Data 0 pin number for DVP interface - pin_vsync (int): Vertical sync pin number - pin_hsync (int): Horizontal sync pin number - pin_pclk (int): Pixel clock pin number - pin_xclk (int, optional): External clock pin number - xclk_freq (int, optional): Frequency in Hz for the external clock - Default is 25 MHz - num_data_pins (int, optional): Number of data pins used in DVP interface - Default is 1 - i2c_address (int, optional): I2C address of the camera - Default is 0x24 - """ - # Create the frame buffer - self._buffer = np.zeros((244, 324), dtype=np.uint8) - - # Call both parent constructors - DVP_RP2_PIO.__init__( - self, - pin_d0, - pin_vsync, - pin_hsync, - pin_pclk, - pin_xclk, - xclk_freq, - sm_id, - num_data_pins, - bytes_per_frame = self._buffer.size, - byte_swap = True - ) - HM01B0.__init__( - self, - i2c, - i2c_address, - num_data_pins - ) - - def open(self): - """ - Opens the camera and prepares it for capturing images. - """ - self._active(True) - - def release(self): - """ - Releases the camera and frees any resources. - """ - self._active(False) diff --git a/cv2_drivers/cameras/ov5640.py b/cv2_drivers/cameras/ov5640.py deleted file mode 100644 index 2539942..0000000 --- a/cv2_drivers/cameras/ov5640.py +++ /dev/null @@ -1,1190 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ov5640.py -# -# Base class for OpenCV OV5640 camera drivers. -# -# This class is derived from: -# https://github.com/adafruit/Adafruit_CircuitPython_OV5640 -# Released under the MIT license. -# Copyright (c) 2021 Jeff Epler for Adafruit Industries -#------------------------------------------------------------------------------- - -from .dvp_camera import DVP_Camera -from time import sleep_us -import cv2 - -class OV5640(DVP_Camera): - """ - Base class for OpenCV OV5640 camera drivers. - """ - _OV5640_COLOR_RGB = 0 - _OV5640_COLOR_YUV = 1 - _OV5640_COLOR_GRAYSCALE = 2 - _OV5640_COLOR_JPEG = 3 - - # fmt: off - - _SYSTEM_RESET00 = 0x3000 # Reset for Individual Block - # (0: enable block; 1: reset block) - # Bit[7]: Reset BIST - # Bit[6]: Reset MCU program memory - # Bit[5]: Reset MCU - # Bit[4]: Reset OTP - # Bit[3]: Reset STB - # Bit[2]: Reset d5060 - # Bit[1]: Reset timing control - # Bit[0]: Reset array control - - _SYSTEM_RESET02 = 0x3002 # Reset for Individual Block - # (0: enable block; 1: reset block) - # Bit[7]: Reset VFIFO - # Bit[5]: Reset format - # Bit[4]: Reset JFIFO - # Bit[3]: Reset SFIFO - # Bit[2]: Reset JPG - # Bit[1]: Reset format MUX - # Bit[0]: Reset average - - _CLOCK_ENABLE02 = 0x3006 # Clock Enable Control - # (0: disable clock; 1: enable clock) - # Bit[7]: Enable PSRAM clock - # Bit[6]: Enable FMT clock - # Bit[5]: Enable JPEG 2x clock - # Bit[3]: Enable JPEG clock - # Bit[1]: Enable format MUX clock - # Bit[0]: Enable average clock - - _SYSTEM_CTROL0 = 0x3008 - # Bit[7]: Software reset - # Bit[6]: Software power down - # Bit[5]: Reserved - # Bit[4]: SRB clock SYNC enable - # Bit[3]: Isolation suspend select - # Bit[2:0]: Not used - - _CHIP_ID_HIGH = 0x300A - - _DRIVE_CAPABILITY = 0x302C - # Bit[7:6]: - # 00: 1x - # 01: 2x - # 10: 3x - # 11: 4x - - _SC_PLLS_CTRL0 = 0x303A - # Bit[7]: PLLS bypass - _SC_PLLS_CTRL1 = 0x303B - # Bit[4:0]: PLLS multiplier - _SC_PLLS_CTRL2 = 0x303C - # Bit[6:4]: PLLS charge pump control - # Bit[3:0]: PLLS system divider - _SC_PLLS_CTRL3 = 0x303D - # Bit[5:4]: PLLS pre-divider - # 00: 1 - # 01: 1.5 - # 10: 2 - # 11: 3 - # Bit[2]: PLLS root-divider - 1 - # Bit[1:0]: PLLS seld5 - # 00: 1 - # 01: 1 - # 10: 2 - # 11: 2.5 - - # AEC/AGC control functions - _AEC_PK_MANUAL = 0x3503 - # AEC Manual Mode Control - # Bit[7:6]: Reserved - # Bit[5]: Gain delay option - # Valid when 0x3503[4]=1’b0 - # 0: Delay one frame latch - # 1: One frame latch - # Bit[4:2]: Reserved - # Bit[1]: AGC manual - # 0: Auto enable - # 1: Manual enable - # Bit[0]: AEC manual - # 0: Auto enable - # 1: Manual enable - - # gain = {0x350A[1:0], 0x350B[7:0]} / 16 - - - _X_ADDR_ST_H = 0x3800 - # Bit[3:0]: X address start[11:8] - _X_ADDR_ST_L = 0x3801 - # Bit[7:0]: X address start[7:0] - _Y_ADDR_ST_H = 0x3802 - # Bit[2:0]: Y address start[10:8] - _Y_ADDR_ST_L = 0x3803 - # Bit[7:0]: Y address start[7:0] - _X_ADDR_END_H = 0x3804 - # Bit[3:0]: X address end[11:8] - _X_ADDR_END_L = 0x3805 - # Bit[7:0]: - _Y_ADDR_END_H = 0x3806 - # Bit[2:0]: Y address end[10:8] - _Y_ADDR_END_L = 0x3807 - # Bit[7:0]: - # Size after scaling - _X_OUTPUT_SIZE_H = 0x3808 - # Bit[3:0]: DVP output horizontal width[11:8] - _X_OUTPUT_SIZE_L = 0x3809 - # Bit[7:0]: - _Y_OUTPUT_SIZE_H = 0x380A - # Bit[2:0]: DVP output vertical height[10:8] - _Y_OUTPUT_SIZE_L = 0x380B - # Bit[7:0]: - _X_TOTAL_SIZE_H = 0x380C - # Bit[3:0]: Total horizontal size[11:8] - _X_TOTAL_SIZE_L = 0x380D - # Bit[7:0]: - _Y_TOTAL_SIZE_H = 0x380E - # Bit[7:0]: Total vertical size[15:8] - _Y_TOTAL_SIZE_L = 0x380F - # Bit[7:0]: - _X_OFFSET_H = 0x3810 - # Bit[3:0]: ISP horizontal offset[11:8] - _X_OFFSET_L = 0x3811 - # Bit[7:0]: - _Y_OFFSET_H = 0x3812 - # Bit[2:0]: ISP vertical offset[10:8] - _Y_OFFSET_L = 0x3813 - # Bit[7:0]: - _X_INCREMENT = 0x3814 - # Bit[7:4]: Horizontal odd subsample increment - # Bit[3:0]: Horizontal even subsample increment - _Y_INCREMENT = 0x3815 - # Bit[7:4]: Vertical odd subsample increment - # Bit[3:0]: Vertical even subsample increment - # Size before scaling - # X_INPUT_SIZE = (X_ADDR_END - X_ADDR_ST + 1 - (2 * X_OFFSET)) - # Y_INPUT_SIZE = (Y_ADDR_END - Y_ADDR_ST + 1 - (2 * Y_OFFSET)) - - # mirror and flip registers - _TIMING_TC_REG20 = 0x3820 - # Timing Control Register - # Bit[2:1]: Vertical flip enable - # 00: Normal - # 11: Vertical flip - # Bit[0]: Vertical binning enable - _TIMING_TC_REG21 = 0x3821 - # Timing Control Register - # Bit[5]: Compression Enable - # Bit[2:1]: Horizontal mirror enable - # 00: Normal - # 11: Horizontal mirror - # Bit[0]: Horizontal binning enable - - _PCLK_RATIO = 0x3824 - # Bit[4:0]: PCLK ratio manual - - # frame control registers - _FRAME_CTRL01 = 0x4201 - # Control Passed Frame Number When both ON and OFF number set to 0x00,frame - # control is in bypass mode - # Bit[7:4]: Not used - # Bit[3:0]: Frame ON number - _FRAME_CTRL02 = 0x4202 - # Control Masked Frame Number When both ON and OFF number set to 0x00,frame - # control is in bypass mode - # Bit[7:4]: Not used - # BIT[3:0]: Frame OFF number - - # format control registers - _FORMAT_CTRL00 = 0x4300 - - _CLOCK_POL_CONTROL = 0x4740 - # Bit[5]: PCLK polarity 0: active low - # 1: active high - # Bit[3]: Gate PCLK under VSYNC - # Bit[2]: Gate PCLK under HREF - # Bit[1]: HREF polarity - # 0: active low - # 1: active high - # Bit[0] VSYNC polarity - # 0: active low - # 1: active high - - _ISP_CONTROL_01 = 0x5001 - # Bit[5]: Scale enable - # 0: Disable - # 1: Enable - - # output format control registers - _FORMAT_CTRL = 0x501F - # Format select - # Bit[2:0]: - # 000: YUV422 - # 001: RGB - # 010: Dither - # 011: RAW after DPC - # 101: RAW after CIP - - # ISP top control registers - _PRE_ISP_TEST_SETTING_1 = 0x503D - # Bit[7]: Test enable - # 0: Test disable - # 1: Color bar enable - # Bit[6]: Rolling - # Bit[5]: Transparent - # Bit[4]: Square black and white - # Bit[3:2]: Color bar style - # 00: Standard 8 color bar - # 01: Gradual change at vertical mode 1 - # 10: Gradual change at horizontal - # 11: Gradual change at vertical mode 2 - # Bit[1:0]: Test select - # 00: Color bar - # 01: Random data - # 10: Square data - # 11: Black image - - # exposure = {0x3500[3:0], 0x3501[7:0], 0x3502[7:0]} / 16 × tROW - - _SCALE_CTRL_1 = 0x5601 - # Bit[6:4]: HDIV RW - # DCW scale times - # 000: DCW 1 time - # 001: DCW 2 times - # 010: DCW 4 times - # 100: DCW 8 times - # 101: DCW 16 times - # Others: DCW 16 times - # Bit[2:0]: VDIV RW - # DCW scale times - # 000: DCW 1 time - # 001: DCW 2 times - # 010: DCW 4 times - # 100: DCW 8 times - # 101: DCW 16 times - # Others: DCW 16 times - - _SCALE_CTRL_2 = 0x5602 - # X_SCALE High Bits - _SCALE_CTRL_3 = 0x5603 - # X_SCALE Low Bits - _SCALE_CTRL_4 = 0x5604 - # Y_SCALE High Bits - _SCALE_CTRL_5 = 0x5605 - # Y_SCALE Low Bits - _SCALE_CTRL_6 = 0x5606 - # Bit[3:0]: V Offset - - _VFIFO_CTRL0C = 0x460C - # Bit[1]: PCLK manual enable - # 0: Auto - # 1: Manual by PCLK_RATIO - - _VFIFO_X_SIZE_H = 0x4602 - _VFIFO_X_SIZE_L = 0x4603 - _VFIFO_Y_SIZE_H = 0x4604 - _VFIFO_Y_SIZE_L = 0x4605 - - _COMPRESSION_CTRL00 = 0x4400 - _COMPRESSION_CTRL01 = 0x4401 - _COMPRESSION_CTRL02 = 0x4402 - _COMPRESSION_CTRL03 = 0x4403 - _COMPRESSION_CTRL04 = 0x4404 - _COMPRESSION_CTRL05 = 0x4405 - _COMPRESSION_CTRL06 = 0x4406 - _COMPRESSION_CTRL07 = 0x4407 - # Bit[5:0]: QS - _COMPRESSION_ISI_CTRL = 0x4408 - _COMPRESSION_CTRL09 = 0x4409 - _COMPRESSION_CTRL0A = 0x440A - _COMPRESSION_CTRL0B = 0x440B - _COMPRESSION_CTRL0C = 0x440C - _COMPRESSION_CTRL0D = 0x440D - _COMPRESSION_CTRL0E = 0x440E - - _TEST_COLOR_BAR = 0xC0 - # Enable Color Bar roling Test - - _AEC_PK_MANUAL_AGC_MANUALEN = 0x02 - # Enable AGC Manual enable - _AEC_PK_MANUAL_AEC_MANUALEN = 0x01 - # Enable AEC Manual enable - - _TIMING_TC_REG20_VFLIP = 0x06 - # Vertical flip enable - _TIMING_TC_REG21_HMIRROR = 0x06 - # Horizontal mirror enable - - _OV5640_SIZE_96X96 = 0 # 96x96 - _OV5640_SIZE_QQVGA = 1 # 160x120 - _OV5640_SIZE_QCIF = 2 # 176x144 - _OV5640_SIZE_HQVGA = 3 # 240x176 - _OV5640_SIZE_240X240 = 4 # 240x240 - _OV5640_SIZE_QVGA = 5 # 320x240 - _OV5640_SIZE_CIF = 6 # 400x296 - _OV5640_SIZE_HVGA = 7 # 480x320 - _OV5640_SIZE_VGA = 8 # 640x480 - _OV5640_SIZE_SVGA = 9 # 800x600 - _OV5640_SIZE_XGA = 10 # 1024x768 - _OV5640_SIZE_HD = 11 # 1280x720 - _OV5640_SIZE_SXGA = 12 # 1280x1024 - _OV5640_SIZE_UXGA = 13 # 1600x1200 - _OV5640_SIZE_QHDA = 14 # 2560x1440 - _OV5640_SIZE_WQXGA = 15 # 2560x1600 - _OV5640_SIZE_PFHD = 16 # 1088x1920 - _OV5640_SIZE_QSXGA = 17 # 2560x1920 - - _ASPECT_RATIO_4X3 = 0 - _ASPECT_RATIO_3X2 = 1 - _ASPECT_RATIO_16X10 = 2 - _ASPECT_RATIO_5X3 = 3 - _ASPECT_RATIO_16X9 = 4 - _ASPECT_RATIO_21X9 = 5 - _ASPECT_RATIO_5X4 = 6 - _ASPECT_RATIO_1X1 = 7 - _ASPECT_RATIO_9X16 = 8 - - _resolution_info = [ - [96, 96, _ASPECT_RATIO_1X1], # 96x96 - [160, 120, _ASPECT_RATIO_4X3], # QQVGA - [176, 144, _ASPECT_RATIO_5X4], # QCIF - [240, 176, _ASPECT_RATIO_4X3], # HQVGA - [240, 240, _ASPECT_RATIO_1X1], # 240x240 - [320, 240, _ASPECT_RATIO_4X3], # QVGA - [400, 296, _ASPECT_RATIO_4X3], # CIF - [480, 320, _ASPECT_RATIO_3X2], # HVGA - [640, 480, _ASPECT_RATIO_4X3], # VGA - [800, 600, _ASPECT_RATIO_4X3], # SVGA - [1024, 768, _ASPECT_RATIO_4X3], # XGA - [1280, 720, _ASPECT_RATIO_16X9], # HD - [1280, 1024, _ASPECT_RATIO_5X4], # SXGA - [1600, 1200, _ASPECT_RATIO_4X3], # UXGA - [2560, 1440, _ASPECT_RATIO_16X9], # QHD - [2560, 1600, _ASPECT_RATIO_16X10], # WQXGA - [1088, 1920, _ASPECT_RATIO_9X16], # Portrait FHD - [2560, 1920, _ASPECT_RATIO_4X3], # QSXGA - - ] - - - _ratio_table = [ - # mw, mh, sx, sy, ex, ey, ox, oy, tx, ty - [2560, 1920, 0, 0, 2623, 1951, 32, 16, 2844, 1968], # 4x3 - [2560, 1704, 0, 110, 2623, 1843, 32, 16, 2844, 1752], # 3x2 - [2560, 1600, 0, 160, 2623, 1791, 32, 16, 2844, 1648], # 16x10 - [2560, 1536, 0, 192, 2623, 1759, 32, 16, 2844, 1584], # 5x3 - [2560, 1440, 0, 240, 2623, 1711, 32, 16, 2844, 1488], # 16x9 - [2560, 1080, 0, 420, 2623, 1531, 32, 16, 2844, 1128], # 21x9 - [2400, 1920, 80, 0, 2543, 1951, 32, 16, 2684, 1968], # 5x4 - [1920, 1920, 320, 0, 2543, 1951, 32, 16, 2684, 1968], # 1x1 - [1088, 1920, 736, 0, 1887, 1951, 32, 16, 1884, 1968], # 9x16 - ] - - _pll_pre_div2x_factors = [1, 1, 2, 3, 4, 1.5, 6, 2.5, 8] - _pll_pclk_root_div_factors = [1,2,4,8] - - _REG_DLY = 0xFFFF - _REGLIST_TAIL = 0x0000 - - _OV5640_STAT_FIRMWAREBAD = 0x7F - _OV5640_STAT_STARTUP = 0x7E - _OV5640_STAT_IDLE = 0x70 - _OV5640_STAT_FOCUSING = 0x00 - _OV5640_STAT_FOCUSED = 0x10 - - _OV5640_CMD_TRIGGER_AUTOFOCUS = 0x03 - _OV5640_CMD_AUTO_AUTOFOCUS = 0x04 - _OV5640_CMD_RELEASE_FOCUS = 0x08 - _OV5640_CMD_AF_SET_VCM_STEP = 0x1A - _OV5640_CMD_AF_GET_VCM_STEP = 0x1B - - _OV5640_CMD_MAIN = 0x3022 - _OV5640_CMD_ACK = 0x3023 - _OV5640_CMD_PARA0 = 0x3024 - _OV5640_CMD_PARA1 = 0x3025 - _OV5640_CMD_PARA2 = 0x3026 - _OV5640_CMD_PARA3 = 0x3027 - _OV5640_CMD_PARA4 = 0x3028 - _OV5640_CMD_FW_STATUS = 0x3029 - - - _sensor_default_regs = [ - _SYSTEM_CTROL0, 0x82, # software reset - _REG_DLY, 10, # delay 10ms - _SYSTEM_CTROL0, 0x42, # power down - # enable pll - 0x3103, 0x13, - # io direction - 0x3017, 0xFF, - 0x3018, 0xFF, - _DRIVE_CAPABILITY, 0xC3, - _CLOCK_POL_CONTROL, 0x21, - 0x4713, 0x02, # jpg mode select - _ISP_CONTROL_01, 0x83, # turn color matrix, awb and SDE - # sys reset - _SYSTEM_RESET00, 0x00, # enable all blocks - _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg - # clock enable - 0x3004, 0xFF, - _CLOCK_ENABLE02, 0xC3, - # isp control - 0x5000, 0xA7, - _ISP_CONTROL_01, 0xA3, # +scaling? - 0x5003, 0x08, # special_effect - # unknown - 0x370C, 0x02, #!!IMPORTANT - 0x3634, 0x40, #!!IMPORTANT - # AEC/AGC - 0x3A02, 0x03, - 0x3A03, 0xD8, - 0x3A08, 0x01, - 0x3A09, 0x27, - 0x3A0A, 0x00, - 0x3A0B, 0xF6, - 0x3A0D, 0x04, - 0x3A0E, 0x03, - 0x3A0F, 0x30, # ae_level - 0x3A10, 0x28, # ae_level - 0x3A11, 0x60, # ae_level - 0x3A13, 0x43, - 0x3A14, 0x03, - 0x3A15, 0xD8, - 0x3A18, 0x00, # gainceiling - 0x3A19, 0xF8, # gainceiling - 0x3A1B, 0x30, # ae_level - 0x3A1E, 0x26, # ae_level - 0x3A1F, 0x14, # ae_level - # vcm debug - 0x3600, 0x08, - 0x3601, 0x33, - # 50/60Hz - 0x3C01, 0xA4, - 0x3C04, 0x28, - 0x3C05, 0x98, - 0x3C06, 0x00, - 0x3C07, 0x08, - 0x3C08, 0x00, - 0x3C09, 0x1C, - 0x3C0A, 0x9C, - 0x3C0B, 0x40, - 0x460C, 0x22, # disable jpeg footer - # BLC - 0x4001, 0x02, - 0x4004, 0x02, - # AWB - 0x5180, 0xFF, - 0x5181, 0xF2, - 0x5182, 0x00, - 0x5183, 0x14, - 0x5184, 0x25, - 0x5185, 0x24, - 0x5186, 0x09, - 0x5187, 0x09, - 0x5188, 0x09, - 0x5189, 0x75, - 0x518A, 0x54, - 0x518B, 0xE0, - 0x518C, 0xB2, - 0x518D, 0x42, - 0x518E, 0x3D, - 0x518F, 0x56, - 0x5190, 0x46, - 0x5191, 0xF8, - 0x5192, 0x04, - 0x5193, 0x70, - 0x5194, 0xF0, - 0x5195, 0xF0, - 0x5196, 0x03, - 0x5197, 0x01, - 0x5198, 0x04, - 0x5199, 0x12, - 0x519A, 0x04, - 0x519B, 0x00, - 0x519C, 0x06, - 0x519D, 0x82, - 0x519E, 0x38, - # color matrix (Saturation) - 0x5381, 0x1E, - 0x5382, 0x5B, - 0x5383, 0x08, - 0x5384, 0x0A, - 0x5385, 0x7E, - 0x5386, 0x88, - 0x5387, 0x7C, - 0x5388, 0x6C, - 0x5389, 0x10, - 0x538A, 0x01, - 0x538B, 0x98, - # CIP control (Sharpness) - 0x5300, 0x10, # sharpness - 0x5301, 0x10, # sharpness - 0x5302, 0x18, # sharpness - 0x5303, 0x19, # sharpness - 0x5304, 0x10, - 0x5305, 0x10, - 0x5306, 0x08, # denoise - 0x5307, 0x16, - 0x5308, 0x40, - 0x5309, 0x10, # sharpness - 0x530A, 0x10, # sharpness - 0x530B, 0x04, # sharpness - 0x530C, 0x06, # sharpness - # GAMMA - 0x5480, 0x01, - 0x5481, 0x00, - 0x5482, 0x1E, - 0x5483, 0x3B, - 0x5484, 0x58, - 0x5485, 0x66, - 0x5486, 0x71, - 0x5487, 0x7D, - 0x5488, 0x83, - 0x5489, 0x8F, - 0x548A, 0x98, - 0x548B, 0xA6, - 0x548C, 0xB8, - 0x548D, 0xCA, - 0x548E, 0xD7, - 0x548F, 0xE3, - 0x5490, 0x1D, - # Special Digital Effects (SDE) (UV adjust) - 0x5580, 0x06, # enable brightness and contrast - 0x5583, 0x40, # special_effect - 0x5584, 0x10, # special_effect - 0x5586, 0x20, # contrast - 0x5587, 0x00, # brightness - 0x5588, 0x00, # brightness - 0x5589, 0x10, - 0x558A, 0x00, - 0x558B, 0xF8, - 0x501D, 0x40, # enable manual offset of contrast - # power on - 0x3008, 0x02, - # 50Hz - 0x3C00, 0x04, - #_REG_DLY, 300, - ] - - - - _reset_awb = [ - _ISP_CONTROL_01, 0x83, # turn color matrix, awb and SDE - # sys reset - _SYSTEM_RESET00, 0x00, # enable all blocks - _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg - # clock enable - #0x3004, 0xFF, - #_CLOCK_ENABLE02, 0xC3, - # isp control - 0x5000, 0xA7, - _ISP_CONTROL_01, 0xA3, # +scaling? - 0x5003, 0x08, # special_effect - # unknown - 0x370C, 0x02, #!!IMPORTANT - 0x3634, 0x40, #!!IMPORTANT - # AEC/AGC - 0x3A02, 0x03, - 0x3A03, 0xD8, - 0x3A08, 0x01, - 0x3A09, 0x27, - 0x3A0A, 0x00, - 0x3A0B, 0xF6, - 0x3A0D, 0x04, - 0x3A0E, 0x03, - 0x3A0F, 0x30, # ae_level - 0x3A10, 0x28, # ae_level - 0x3A11, 0x60, # ae_level - 0x3A13, 0x43, - 0x3A14, 0x03, - 0x3A15, 0xD8, - 0x3A18, 0x00, # gainceiling - 0x3A19, 0xF8, # gainceiling - 0x3A1B, 0x30, # ae_level - 0x3A1E, 0x26, # ae_level - 0x3A1F, 0x14, # ae_level - # vcm debug - 0x3600, 0x08, - 0x3601, 0x33, - # 50/60Hz - 0x3C01, 0xA4, - 0x3C04, 0x28, - 0x3C05, 0x98, - 0x3C06, 0x00, - 0x3C07, 0x08, - 0x3C08, 0x00, - 0x3C09, 0x1C, - 0x3C0A, 0x9C, - 0x3C0B, 0x40, - 0x460C, 0x22, # disable jpeg footer - # BLC - 0x4001, 0x02, - 0x4004, 0x02, - # AWB - 0x5180, 0xFF, - 0x5181, 0xF2, - 0x5182, 0x00, - 0x5183, 0x14, - 0x5184, 0x25, - 0x5185, 0x24, - 0x5186, 0x09, - 0x5187, 0x09, - 0x5188, 0x09, - 0x5189, 0x75, - 0x518A, 0x54, - 0x518B, 0xE0, - 0x518C, 0xB2, - 0x518D, 0x42, - 0x518E, 0x3D, - 0x518F, 0x56, - 0x5190, 0x46, - 0x5191, 0xF8, - 0x5192, 0x04, - 0x5193, 0x70, - 0x5194, 0xF0, - 0x5195, 0xF0, - 0x5196, 0x03, - 0x5197, 0x01, - 0x5198, 0x04, - 0x5199, 0x12, - 0x519A, 0x04, - 0x519B, 0x00, - 0x519C, 0x06, - 0x519D, 0x82, - 0x519E, 0x38, - # color matrix (Saturation) - 0x5381, 0x1E, - 0x5382, 0x5B, - 0x5383, 0x08, - 0x5384, 0x0A, - 0x5385, 0x7E, - 0x5386, 0x88, - 0x5387, 0x7C, - 0x5388, 0x6C, - 0x5389, 0x10, - 0x538A, 0x01, - 0x538B, 0x98, - # CIP control (Sharpness) - 0x5300, 0x10, # sharpness - 0x5301, 0x10, # sharpness - 0x5302, 0x18, # sharpness - 0x5303, 0x19, # sharpness - 0x5304, 0x10, - 0x5305, 0x10, - 0x5306, 0x08, # denoise - 0x5307, 0x16, - 0x5308, 0x40, - 0x5309, 0x10, # sharpness - 0x530A, 0x10, # sharpness - 0x530B, 0x04, # sharpness - 0x530C, 0x06, # sharpness - # GAMMA - 0x5480, 0x01, - 0x5481, 0x00, - 0x5482, 0x1E, - 0x5483, 0x3B, - 0x5484, 0x58, - 0x5485, 0x66, - 0x5486, 0x71, - 0x5487, 0x7D, - 0x5488, 0x83, - 0x5489, 0x8F, - 0x548A, 0x98, - 0x548B, 0xA6, - 0x548C, 0xB8, - 0x548D, 0xCA, - 0x548E, 0xD7, - 0x548F, 0xE3, - 0x5490, 0x1D, - # Special Digital Effects (SDE) (UV adjust) - 0x5580, 0x06, # enable brightness and contrast - 0x5583, 0x40, # special_effect - 0x5584, 0x10, # special_effect - 0x5586, 0x20, # contrast - 0x5587, 0x00, # brightness - 0x5588, 0x00, # brightness - 0x5589, 0x10, - 0x558A, 0x00, - 0x558B, 0xF8, - 0x501D, 0x40, # enable manual offset of contrast - ] - _sensor_format_jpeg = [ - _FORMAT_CTRL, 0x00, # YUV422 - _FORMAT_CTRL00, 0x30, # YUYV - _SYSTEM_RESET02, 0x00, # enable everything - _CLOCK_ENABLE02, 0xFF, # enable all clocks - 0x471C, 0x50, # 0xd0 to 0x50 !!! - ] - - _sensor_format_raw = [ - _FORMAT_CTRL, 0x03, # RAW (DPC) - _FORMAT_CTRL00, 0x00, # RAW - ] - - _sensor_format_grayscale = [ - _FORMAT_CTRL, 0x00, # YUV422 - _FORMAT_CTRL00, 0x10, # Y8 - ] - - _sensor_format_yuv422 = [ - _FORMAT_CTRL, 0x00, # YUV422 - _FORMAT_CTRL00, 0x30, # YUYV - ] - - _sensor_format_rgb565 = [ - _FORMAT_CTRL, 0x01, # RGB - _FORMAT_CTRL00, 0x61, # RGB565 (BGR) - _SYSTEM_RESET02, 0x1C, # reset jfifo, sfifo, jpg, fmux, avg - _CLOCK_ENABLE02, 0xC3, # reset to how it was before (no jpg clock) - - ] - - _ov5640_color_settings = { - _OV5640_COLOR_RGB: _sensor_format_rgb565, - _OV5640_COLOR_YUV: _sensor_format_yuv422, - _OV5640_COLOR_GRAYSCALE: _sensor_format_grayscale, - _OV5640_COLOR_JPEG: _sensor_format_jpeg, - } - - _contrast_settings = [ - [0x20, 0x00], # 0 - [0x24, 0x10], # +1 - [0x28, 0x18], # +2 - [0x2c, 0x1c], # +3 - [0x14, 0x14], # -3 - [0x18, 0x18], # -2 - [0x1c, 0x1c], # -1 - ] - - _sensor_saturation_levels = [ - [0x1D, 0x60, 0x03, 0x0C, 0x78, 0x84, 0x7D, 0x6B, 0x12, 0x01, 0x98], # 0 - [0x1D, 0x60, 0x03, 0x0D, 0x84, 0x91, 0x8A, 0x76, 0x14, 0x01, 0x98], # +1 - [0x1D, 0x60, 0x03, 0x0E, 0x90, 0x9E, 0x96, 0x80, 0x16, 0x01, 0x98], # +2 - [0x1D, 0x60, 0x03, 0x10, 0x9C, 0xAC, 0xA2, 0x8B, 0x17, 0x01, 0x98], # +3 - [0x1D, 0x60, 0x03, 0x11, 0xA8, 0xB9, 0xAF, 0x96, 0x19, 0x01, 0x98], # +4 - [0x1D, 0x60, 0x03, 0x07, 0x48, 0x4F, 0x4B, 0x40, 0x0B, 0x01, 0x98], # -4 - [0x1D, 0x60, 0x03, 0x08, 0x54, 0x5C, 0x58, 0x4B, 0x0D, 0x01, 0x98], # -3 - [0x1D, 0x60, 0x03, 0x0A, 0x60, 0x6A, 0x64, 0x56, 0x0E, 0x01, 0x98], # -2 - [0x1D, 0x60, 0x03, 0x0B, 0x6C, 0x77, 0x70, 0x60, 0x10, 0x01, 0x98], # -1 - ] - - _sensor_ev_levels = [ - [0x38, 0x30, 0x61, 0x38, 0x30, 0x10], # 0 - [0x40, 0x38, 0x71, 0x40, 0x38, 0x10], # +1 - [0x50, 0x48, 0x90, 0x50, 0x48, 0x20], # +2 - [0x60, 0x58, 0xa0, 0x60, 0x58, 0x20], # +3 - [0x10, 0x08, 0x10, 0x08, 0x20, 0x10], # -3 - [0x20, 0x18, 0x41, 0x20, 0x18, 0x10], # -2 - [0x30, 0x28, 0x61, 0x30, 0x28, 0x10], # -1 - ] - - _OV5640_WHITE_BALANCE_AUTO = 0 - _OV5640_WHITE_BALANCE_SUNNY = 1 - _OV5640_WHITE_BALANCE_FLUORESCENT = 2 - _OV5640_WHITE_BALANCE_CLOUDY = 3 - _OV5640_WHITE_BALANCE_INCANDESCENT = 4 - - _light_registers = [0x3406, 0x3400, 0x3401, 0x3402, 0x3403, 0x3404, 0x3405] - _light_modes = [ - [0x00, 0x04, 0x00, 0x04, 0x00, 0x04, 0x00], # auto - [0x01, 0x06, 0x1c, 0x04, 0x00, 0x04, 0xf3], # sunny - [0x01, 0x05, 0x48, 0x04, 0x00, 0x07, 0xcf], # office / fluorescent - [0x01, 0x06, 0x48, 0x04, 0x00, 0x04, 0xd3], # cloudy - [0x01, 0x04, 0x10, 0x04, 0x00, 0x08, 0x40], # home / incandescent - - ] - - _OV5640_SPECIAL_EFFECT_NONE = 0 - _OV5640_SPECIAL_EFFECT_NEGATIVE = 1 - _OV5640_SPECIAL_EFFECT_GRAYSCALE = 2 - _OV5640_SPECIAL_EFFECT_RED_TINT = 3 - _OV5640_SPECIAL_EFFECT_GREEN_TINT = 4 - _OV5640_SPECIAL_EFFECT_BLUE_TINT = 5 - _OV5640_SPECIAL_EFFECT_SEPIA = 6 - - _sensor_special_effects = [ - [0x06, 0x40, 0x10, 0x08], # Normal - [0x46, 0x40, 0x28, 0x08], # Negative - [0x1E, 0x80, 0x80, 0x08], # Grayscale - [0x1E, 0x80, 0xC0, 0x08], # Red Tint - [0x1E, 0x60, 0x60, 0x08], # Green Tint - [0x1E, 0xA0, 0x40, 0x08], # Blue Tint - [0x1E, 0x40, 0xA0, 0x08], # Sepia - ] - - _sensor_regs_gamma0 = [ - 0x5480, 0x01, - 0x5481, 0x08, - 0x5482, 0x14, - 0x5483, 0x28, - 0x5484, 0x51, - 0x5485, 0x65, - 0x5486, 0x71, - 0x5487, 0x7D, - 0x5488, 0x87, - 0x5489, 0x91, - 0x548A, 0x9A, - 0x548B, 0xAA, - 0x548C, 0xB8, - 0x548D, 0xCD, - 0x548E, 0xDD, - 0x548F, 0xEA, - 0x5490, 0x1D, - ] - - _sensor_regs_gamma1 = [ - 0x5480, 0x1, - 0x5481, 0x0, - 0x5482, 0x1E, - 0x5483, 0x3B, - 0x5484, 0x58, - 0x5485, 0x66, - 0x5486, 0x71, - 0x5487, 0x7D, - 0x5488, 0x83, - 0x5489, 0x8F, - 0x548A, 0x98, - 0x548B, 0xA6, - 0x548C, 0xB8, - 0x548D, 0xCA, - 0x548E, 0xD7, - 0x548F, 0xE3, - 0x5490, 0x1D, - ] - - _sensor_regs_awb0 = [ - 0x5180, 0xFF, - 0x5181, 0xF2, - 0x5182, 0x00, - 0x5183, 0x14, - 0x5184, 0x25, - 0x5185, 0x24, - 0x5186, 0x09, - 0x5187, 0x09, - 0x5188, 0x09, - 0x5189, 0x75, - 0x518A, 0x54, - 0x518B, 0xE0, - 0x518C, 0xB2, - 0x518D, 0x42, - 0x518E, 0x3D, - 0x518F, 0x56, - 0x5190, 0x46, - 0x5191, 0xF8, - 0x5192, 0x04, - 0x5193, 0x70, - 0x5194, 0xF0, - 0x5195, 0xF0, - 0x5196, 0x03, - 0x5197, 0x01, - 0x5198, 0x04, - 0x5199, 0x12, - 0x519A, 0x04, - 0x519B, 0x00, - 0x519C, 0x06, - 0x519D, 0x82, - 0x519E, 0x38, - ] - # fmt: on - - def __init__( - self, - i2c, - i2c_address = 0x3C - ): - """ - Initializes the OV5640 camera sensor with default settings. - - Args: - i2c (I2C): I2C object for communication - i2c_address (int, optional): I2C address (default: 0x3C) - """ - super().__init__(i2c, i2c_address) - - self._write_list(self._sensor_default_regs) - - self._colorspace = self._OV5640_COLOR_RGB - self._flip_x = False - self._flip_y = False - self._w = None - self._h = None - self._size = self._OV5640_SIZE_QVGA - self._test_pattern = False - self._binning = False - self._scale = False - self._ev = 0 - self._white_balance = 0 - - self._set_size_and_colorspace() - - def _is_connected(self): - """ - Checks if the camera is connected by reading the chip ID. - - Returns: - bool: True if the camera is connected and the chip ID is correct, - otherwise False. - """ - try: - # Try to read the chip ID - # If it throws an I/O error - the device isn't connected - id = self._get_chip_id() - - # Confirm the chip ID is correct - if id == 0x5640: - return True - else: - return False - except: - return False - - def _get_chip_id(self): - """ - Reads the chip ID. - - Returns: - int: The chip ID of the OV5640 (should be 0x5640). - """ - data = self._read_register(self._CHIP_ID_HIGH, 2) - return (data[0] << 8) | data[1] - - def _soft_reset(self): - """ - Performs a software reset of the OV5640 sensor. - This resets the sensor to its default state. - """ - self._write_register(self._SYSTEM_CTROL0, 0x82) - - def _write_list(self, data): - """ - Initializes the OV5640 sensor with default settings. - This includes setting up exposure, gain, and frame timing. - - Args: - data (list): List of register-value pairs to write to the sensor. - """ - for i in range(len(data) // 2): - reg = data[i * 2] - value = data[i * 2 + 1] - if reg == self._REG_DLY: - sleep_us(value) - else: - self._write_register(reg, value) - sleep_us(1000) - - def _set_size_and_colorspace(self) -> None: - """ - Sets the camera resolution and colorspace based on the current size. - """ - size = self._size - width, height, ratio = self._resolution_info[size] - self._w = width - self._h = height - ( - max_width, - max_height, - start_x, - start_y, - end_x, - end_y, - offset_x, - offset_y, - total_x, - total_y, - ) = self._ratio_table[ratio] - - self._binning = (width <= max_width // 2) and (height <= max_height // 2) - self._scale = not ( - (width == max_width and height == max_height) - or (width == max_width // 2 and height == max_height // 2) - ) - - self._write_addr_reg(self._X_ADDR_ST_H, start_x, start_y) - self._write_addr_reg(self._X_ADDR_END_H, end_x, end_y) - self._write_addr_reg(self._X_OUTPUT_SIZE_H, width, height) - - if not self._binning: - self._write_addr_reg(self._X_TOTAL_SIZE_H, total_x, total_y) - self._write_addr_reg(self._X_OFFSET_H, offset_x, offset_y) - else: - if width > 920: - self._write_addr_reg(self._X_TOTAL_SIZE_H, total_x - 200, total_y // 2) - else: - self._write_addr_reg(self._X_TOTAL_SIZE_H, 2060, total_y // 2) - self._write_addr_reg(self._X_OFFSET_H, offset_x // 2, offset_y // 2) - - self._write_reg_bits(self._ISP_CONTROL_01, 0x20, self._scale) - - self._set_image_options() - - if self._colorspace == self._OV5640_COLOR_JPEG: - sys_mul = 200 - if size < self._OV5640_SIZE_QVGA: - sys_mul = 160 - if size < self._OV5640_SIZE_XGA: - sys_mul = 180 - self._set_pll(False, sys_mul, 4, 2, False, 2, True, 4) - else: - self._set_pll(False, 32, 1, 1, False, 1, True, 4) - - self._set_colorspace() - - def _set_pll( - self, - bypass: bool, - multiplier: int, - sys_div: int, - pre_div: int, - root_2x: bool, - pclk_root_div: int, - pclk_manual: bool, - pclk_div: int, - ) -> None: - """ - Sets the PLL (Phase-Locked Loop) configuration for the OV5640 camera. - - Args: - bypass (bool): Whether to bypass the PLL. - multiplier (int): PLL multiplier. - sys_div (int): System divider. - pre_div (int): Pre-divider. - root_2x (bool): Whether to use 2x root clock. - pclk_root_div (int): PCLK root divider. - pclk_manual (bool): Whether to use manual PCLK. - pclk_div (int): PCLK divider. - """ - if ( - multiplier > 252 - or multiplier < 4 - or sys_div > 15 - or pre_div > 8 - or pclk_div > 31 - or pclk_root_div > 3 - ): - raise ValueError("Invalid argument to internal function") - - self._write_register(0x3039, 0x80 if bypass else 0) - self._write_register(0x3034, 0x1A) - self._write_register(0x3035, 1 | ((sys_div & 0xF) << 4)) - self._write_register(0x3036, multiplier & 0xFF) - self._write_register(0x3037, (pre_div & 0xF) | (0x10 if root_2x else 0)) - self._write_register(0x3108, (pclk_root_div & 3) << 4 | 0x06) - self._write_register(0x3824, pclk_div & 0x1F) - self._write_register(0x460C, 0x22 if pclk_manual else 0x22) - self._write_register(0x3103, 0x13) - - def _set_colorspace(self) -> None: - """ - Sets the colorspace of the OV5640 camera based on the current colorspace - setting. - """ - colorspace = self._colorspace - settings = self._ov5640_color_settings[colorspace] - - self._write_list(settings) - - def _set_image_options(self) -> None: - """ - Sets the image options such as binning, flipping, and colorspace. - """ - reg20 = reg21 = reg4514 = reg4514_test = 0 - if self._colorspace == self._OV5640_COLOR_JPEG: - reg21 |= 0x20 - - if self._binning: - reg20 |= 1 - reg21 |= 1 - reg4514_test |= 4 - else: - reg20 |= 0x40 - - if self._flip_y: - reg20 |= 0x06 - reg4514_test |= 1 - - if self._flip_x: - reg21 |= 0x06 - reg4514_test |= 2 - - if reg4514_test == 0: - reg4514 = 0x88 - elif reg4514_test == 1: - reg4514 = 0x00 - elif reg4514_test == 2: - reg4514 = 0xBB - elif reg4514_test == 3: - reg4514 = 0x00 - elif reg4514_test == 4: - reg4514 = 0xAA - elif reg4514_test == 5: - reg4514 = 0xBB - elif reg4514_test == 6: - reg4514 = 0xBB - elif reg4514_test == 7: - reg4514 = 0xAA - - self._write_register(self._TIMING_TC_REG20, reg20) - self._write_register(self._TIMING_TC_REG21, reg21) - self._write_register(0x4514, reg4514) - - if self._binning: - self._write_register(0x4520, 0x0B) - self._write_register(self._X_INCREMENT, 0x31) - self._write_register(self._Y_INCREMENT, 0x31) - else: - self._write_register(0x4520, 0x10) - self._write_register(self._X_INCREMENT, 0x11) - self._write_register(self._Y_INCREMENT, 0x11) - - def _write_addr_reg(self, reg: int, x_value: int, y_value: int) -> None: - """ - Writes 2 16-bit values to 4 8-bit registers. - - Args: - reg (int): The base register address to write to. - x_value (int): The first 16-bit value to write. - y_value (int): The second 16-bit value to write. - """ - self._write_register(reg, [ - (x_value >> 8) & 0xFF, - x_value & 0xFF, - (y_value >> 8) & 0xFF, - y_value & 0xFF, - ]) - - def _write_reg_bits(self, reg: int, mask: int, enable: bool) -> None: - """ - Writes a bitmask to a register, enabling or disabling specific bits. - - Args: - reg (int): The register address to write to. - mask (int): The bitmask to apply. - enable (bool): If True, enables the bits in the mask; if False, - disables them. - """ - val = self._read_register(reg)[0] - if enable: - val |= mask - else: - val &= ~mask - self._write_register(reg, val) - - def read(self, image = None): - """ - Reads an image from the camera. - - Args: - image (ndarray, optional): Image to read into - - Returns: - tuple: (success, image) - - success (bool): True if the image was read, otherwise False - - image (ndarray): The captured image, or None if reading failed - """ - if self._colorspace == self._OV5640_COLOR_RGB: - return (True, cv2.cvtColor(self._buffer, cv2.COLOR_BGR5652BGR, image)) - elif self._colorspace == self._OV5640_COLOR_GRAYSCALE: - return (True, cv2.cvtColor(self._buffer, cv2.COLOR_GRAY2BGR, image)) - else: - NotImplementedError( - f"OV5640: Reading images in colorspace {self._colorspace} is not yet implemented." - ) diff --git a/cv2_drivers/cameras/ov5640_pio.py b/cv2_drivers/cameras/ov5640_pio.py deleted file mode 100644 index dd35f05..0000000 --- a/cv2_drivers/cameras/ov5640_pio.py +++ /dev/null @@ -1,82 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ov5640_pio.py -# -# OpenCV OV5640 camera driver using a PIO interface. Only available on -# Raspberry Pi RP2 processors. -#------------------------------------------------------------------------------- - -from .ov5640 import OV5640 -from .dvp_rp2_pio import DVP_RP2_PIO -from ulab import numpy as np - -class OV5640_PIO(OV5640, DVP_RP2_PIO): - """ - OpenCV OV5640 camera driver using a PIO interface. Only available on - Raspberry Pi RP2 processors. - """ - def __init__( - self, - i2c, - sm_id, - pin_d0, - pin_vsync, - pin_hsync, - pin_pclk, - pin_xclk = None, - xclk_freq = 5_000_000, - i2c_address = 0x3c - ): - """ - Initializes the OV5640 PIO camera driver. - - Args: - i2c (I2C): I2C object for communication - sm_id (int): PIO state machine ID - pin_d0 (int): Data 0 pin number for DVP interface - pin_vsync (int): Vertical sync pin number - pin_hsync (int): Horizontal sync pin number - pin_pclk (int): Pixel clock pin number - pin_xclk (int, optional): External clock pin number - xclk_freq (int, optional): Frequency in Hz for the external clock - Default is 5 MHz - i2c_address (int, optional): I2C address of the camera - Default is 0x3c - """ - # Create the frame buffer - self._buffer = np.zeros((240, 320, 2), dtype=np.uint8) - - # Call both parent constructors - DVP_RP2_PIO.__init__( - self, - pin_d0, - pin_vsync, - pin_hsync, - pin_pclk, - pin_xclk, - xclk_freq, - sm_id, - num_data_pins = 8, - bytes_per_frame = self._buffer.size, - byte_swap = False - ) - OV5640.__init__( - self, - i2c, - i2c_address - ) - - def open(self): - """ - Opens the camera and prepares it for capturing images. - """ - self._active(True) - - def release(self): - """ - Releases the camera and frees any resources. - """ - self._active(False) diff --git a/cv2_drivers/displays/__init__.py b/cv2_drivers/displays/__init__.py deleted file mode 100644 index a4b35ed..0000000 --- a/cv2_drivers/displays/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_drivers/displays/__init__.py -# -# Imports all available display drivers for MicroPython OpenCV. -#------------------------------------------------------------------------------- - -# Import platform agnostic drivers -from . import st7789_spi - -# Import sys module to check platform -import sys - -# Import RP2 drivers -if 'rp2' in sys.platform: - from . import st7789_pio diff --git a/cv2_drivers/displays/cv2_display.py b/cv2_drivers/displays/cv2_display.py deleted file mode 100644 index 15bf249..0000000 --- a/cv2_drivers/displays/cv2_display.py +++ /dev/null @@ -1,193 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_display.py -# -# Base class for OpenCV display drivers. -#------------------------------------------------------------------------------- - -import cv2 -from ulab import numpy as np -from machine import Pin - -class CV2_Display(): - """ - Base class for OpenCV display drivers. - """ - def __init__(self, buffer_shape): - """ - Initializes the display. - - Args: - buffer_shape (tuple): Shape of the buffer as (rows, cols, channels) - """ - # Create the frame buffer - self._buffer = np.zeros(buffer_shape, dtype=np.uint8) - - def imshow(self, image): - """ - Shows a NumPy image on the display. - - Args: - image (ndarray): Image to show - """ - raise NotImplementedError("imshow() must be implemented by driver") - - def clear(self): - """ - Clears the display by filling it with black color. - """ - raise NotImplementedError("clear() must be implemented by driver") - - def _get_common_roi_with_buffer(self, image): - """ - Gets the common region of interest (ROI) between the image and the - display's internal buffer. - - Args: - image (ndarray): Image to display - - Returns: - tuple: (image_roi, buffer_roi) - - image_roi (ndarray): ROI of the image - - buffer_roi (ndarray): ROI of the display's buffer - """ - # Ensure image is a NumPy ndarray - if type(image) is not np.ndarray: - raise TypeError("Image must be a NumPy ndarray") - - # Determing number of rows and columns in the image - image_rows = image.shape[0] - if image.ndim < 2: - image_cols = 1 - else: - image_cols = image.shape[1] - - # Get the common ROI between the image and the buffer - row_max = min(image_rows, self._buffer.shape[0]) - col_max = min(image_cols, self._buffer.shape[1]) - img_roi = image[:row_max, :col_max] - buffer_roi = self._buffer[:row_max, :col_max] - return img_roi, buffer_roi - - def _convert_to_uint8(self, image): - """ - Converts the image to uint8 format if necessary. - - Args: - image (ndarray): Image to convert - - Returns: - Image: Converted image - """ - # Check if the image is already in uint8 format - if image.dtype is np.uint8: - return image - - # Convert to uint8 format. This unfortunately requires creating a new - # buffer for the converted image, which takes more memory - if image.dtype == np.int8: - return cv2.convertScaleAbs(image, alpha=1, beta=127) - elif image.dtype == np.int16: - return cv2.convertScaleAbs(image, alpha=1/255, beta=127) - elif image.dtype == np.uint16: - return cv2.convertScaleAbs(image, alpha=1/255) - elif image.dtype == np.float: - # This implementation creates an additional buffer from np.clip() - # TODO: Find another solution that avoids an additional buffer - return cv2.convertScaleAbs(np.clip(image, 0, 1), alpha=255) - else: - raise ValueError(f"Unsupported image dtype: {image.dtype}") - - def _convert_to_bgr565(self, src, dst): - """ - Converts an image to BGR565 format. - - Args: - src (ndarray): Input image - dst (ndarray): Output BGR565 buffer - """ - # Determine the number of channels in the image - if src.ndim < 3: - ch = 1 - else: - ch = src.shape[2] - - # Convert the image to BGR565 format based on the number of channels - if ch == 1: # Grayscale - dst = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR565, dst) - elif ch == 2: # Already in BGR565 format - # For some reason, this is relatively slow and creates a new buffer: - # https://github.com/v923z/micropython-ulab/issues/726 - dst[:] = src - elif ch == 3: # BGR - dst = cv2.cvtColor(src, cv2.COLOR_BGR2BGR565, dst) - else: - raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") - - def _save_pin_mode_alt(self, pin): - """ - Saves the current `mode` and `alt` of the pin so it can be restored - later. Mostly used for SPI displays on a shared SPI bus with a driver - that needs non-SPI pin modes, such as the RP2 PIO driver. This allows - other devices on the bus to continue using the SPI interface after the - display driver finishes communicating with the display. - - Returns: - tuple: (mode, alt) - """ - # See: https://github.com/micropython/micropython/issues/17515 - # There's no way to get the mode and alt of a pin directly, so we - # convert the pin to a string and parse it. Example formats: - # "Pin(GPIO16, mode=OUT)" - # "Pin(GPIO16, mode=ALT, alt=SPI)" - pin_str = str(pin) - - # Extract the "mode" parameter from the pin string - try: - # Split between "mode=" and the next comma or closing parenthesis - mode_str = pin_str[pin_str.index("mode=") + 5:].partition(",")[0].partition(")")[0] - - # Look up the mode in Pin class dictionary - mode = Pin.__dict__[mode_str] - except (ValueError, KeyError): - # No mode specified, just set to -1 (default) - mode = -1 - - # Extrct the "alt" parameter from the pin string - try: - # Split between "alt=" and the next comma or closing parenthesis - alt_str = pin_str[pin_str.index("alt=") + 4:].partition(",")[0].partition(")")[0] - - # Sometimes the value comes back as a number instead of a valid - # "ALT_xyz" string, so we need to check it - if "ALT_" + alt_str in Pin.__dict__: - # Look up the alt in Pin class dictionary (with "ALT_" prefix) - alt = Pin.__dict__["ALT_" + alt_str] - else: - # Convert the altStr to an integer - alt = int(alt_str) - except (ValueError, KeyError): - # No alt specified, just set to -1 (default) - alt = -1 - - # Return the mode and alt as a tuple - return (mode, alt) - - def splash(self, filename="splash.png"): - """ - Shows a splash image on the display if one is available, otherwise - clears the display of any previous content. - - Args: - filename (str, optional): Path to a splash image file. Defaults to - "splash.png" - """ - try: - # Attempt to load and show the splash image - self.imshow(cv2.imread(filename)) - except Exception: - # Couldn't load the image, just clear the display as a fallback - self.clear() diff --git a/cv2_drivers/displays/st7789.py b/cv2_drivers/displays/st7789.py deleted file mode 100644 index 80d47bf..0000000 --- a/cv2_drivers/displays/st7789.py +++ /dev/null @@ -1,272 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# st7789.py -# -# Base class for OpenCV ST7789 display drivers. -# -# This class is derived from: -# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py -# Released under the MIT license. -# Copyright (c) 2024 Owen Carter -# Copyright (c) 2024 Ethan Lacasse -# Copyright (c) 2020-2023 Russ Hughes -# Copyright (c) 2019 Ivan Belokobylskiy -#------------------------------------------------------------------------------- - -from .cv2_display import CV2_Display -from time import sleep_ms -import struct - -class ST7789(CV2_Display): - """ - Base class for OpenCV ST7789 display drivers. - """ - # ST7789 commands - _ST7789_SWRESET = b"\x01" - _ST7789_SLPIN = b"\x10" - _ST7789_SLPOUT = b"\x11" - _ST7789_NORON = b"\x13" - _ST7789_INVOFF = b"\x20" - _ST7789_INVON = b"\x21" - _ST7789_DISPOFF = b"\x28" - _ST7789_DISPON = b"\x29" - _ST7789_CASET = b"\x2a" - _ST7789_RASET = b"\x2b" - _ST7789_RAMWR = b"\x2c" - _ST7789_VSCRDEF = b"\x33" - _ST7789_COLMOD = b"\x3a" - _ST7789_MADCTL = b"\x36" - _ST7789_VSCSAD = b"\x37" - _ST7789_RAMCTL = b"\xb0" - - # MADCTL bits - _ST7789_MADCTL_MY = 0x80 - _ST7789_MADCTL_MX = 0x40 - _ST7789_MADCTL_MV = 0x20 - _ST7789_MADCTL_ML = 0x10 - _ST7789_MADCTL_BGR = 0x08 - _ST7789_MADCTL_MH = 0x04 - _ST7789_MADCTL_RGB = 0x00 - - _ENCODE_POS = ">HH" - - # Rotation tables - # (madctl, width, height, xstart, ystart)[rotation % 4] - - _DISPLAY_240x320 = ( - (0x00, 240, 320, 0, 0), - (0x60, 320, 240, 0, 0), - (0xc0, 240, 320, 0, 0), - (0xa0, 320, 240, 0, 0)) - - _DISPLAY_170x320 = ( - (0x00, 170, 320, 35, 0), - (0x60, 320, 170, 0, 35), - (0xc0, 170, 320, 35, 0), - (0xa0, 320, 170, 0, 35)) - - _DISPLAY_240x240 = ( - (0x00, 240, 240, 0, 0), - (0x60, 240, 240, 0, 0), - (0xc0, 240, 240, 0, 80), - (0xa0, 240, 240, 80, 0)) - - _DISPLAY_135x240 = ( - (0x00, 135, 240, 52, 40), - (0x60, 240, 135, 40, 53), - (0xc0, 135, 240, 53, 40), - (0xa0, 240, 135, 40, 52)) - - _DISPLAY_128x128 = ( - (0x00, 128, 128, 2, 1), - (0x60, 128, 128, 1, 2), - (0xc0, 128, 128, 2, 1), - (0xa0, 128, 128, 1, 2)) - - # Supported displays (physical width, physical height, rotation table) - _SUPPORTED_DISPLAYS = ( - (240, 320, _DISPLAY_240x320), - (170, 320, _DISPLAY_170x320), - (240, 240, _DISPLAY_240x240), - (135, 240, _DISPLAY_135x240), - (128, 128, _DISPLAY_128x128)) - - # init tuple format (b'command', b'data', delay_ms) - _ST7789_INIT_CMDS = ( - ( b'\x11', b'\x00', 120), # Exit sleep mode - ( b'\x13', b'\x00', 0), # Turn on the display - ( b'\xb6', b'\x0a\x82', 0), # Set display function control - ( b'\x3a', b'\x55', 10), # Set pixel format to 16 bits per pixel (RGB565) - ( b'\xb2', b'\x0c\x0c\x00\x33\x33', 0), # Set porch control - ( b'\xb7', b'\x35', 0), # Set gate control - ( b'\xbb', b'\x28', 0), # Set VCOMS setting - ( b'\xc0', b'\x0c', 0), # Set power control 1 - ( b'\xc2', b'\x01\xff', 0), # Set power control 2 - ( b'\xc3', b'\x10', 0), # Set power control 3 - ( b'\xc4', b'\x20', 0), # Set power control 4 - ( b'\xc6', b'\x0f', 0), # Set VCOM control 1 - ( b'\xd0', b'\xa4\xa1', 0), # Set power control A - # Set gamma curve positive polarity - ( b'\xe0', b'\xd0\x00\x02\x07\x0a\x28\x32\x44\x42\x06\x0e\x12\x14\x17', 0), - # Set gamma curve negative polarity - ( b'\xe1', b'\xd0\x00\x02\x07\x0a\x28\x31\x54\x47\x0e\x1c\x17\x1b\x1e', 0), - ( b'\x21', b'\x00', 0), # Enable display inversion - ( b'\x29', b'\x00', 120) # Turn on the display - ) - - def __init__( - self, - width, - height, - rotation=0, - bgr_order=True, - reverse_bytes_in_word=True, - ): - """ - Initializes the ST7789 display driver. - - Args: - width (int): Display width in pixels - height (int): Display height in pixels - rotation (int, optional): Orientation of display - - 0: Portrait (default) - - 1: Landscape - - 2: Inverted portrait - - 3: Inverted landscape - bgr_order (bool, optional): Color order - - True: BGR (default) - - False: RGB - reverse_bytes_in_word (bool, optional): - - Enable if the display uses LSB byte order for color words - """ - # Initial dimensions and offsets; will be overridden when rotation applied - self._width = width - self._height = height - self._xstart = 0 - self._ystart = 0 - # Check display is known and get rotation table - self._rotations = self._find_rotations(width, height) - if not self._rotations: - supported_displays = ", ".join( - [f"{display[0]}x{display[1]}" for display in self._SUPPORTED_DISPLAYS]) - raise ValueError( - f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") - # Colors - self._bgr_order = bgr_order - self._needs_swap = reverse_bytes_in_word - # Reset the display - self._soft_reset() - # Yes, send init twice, once is not always enough - self._send_init(self._ST7789_INIT_CMDS) - self._send_init(self._ST7789_INIT_CMDS) - # Initial rotation - self._rotation = rotation % 4 - # Apply rotation - self._set_rotation(self._rotation) - # Create the framebuffer for the correct rotation - super().__init__((self._height, self._width, 2)) - - def _send_init(self, commands): - """ - Sends initialization commands to display. - - Args: - commands (list): List of tuples (command, data, delay_ms) - """ - for command, data, delay_ms in commands: - self._write(command, data) - sleep_ms(delay_ms) - - def _soft_reset(self): - """ - Sends a software reset command to the display. - """ - self._write(self._ST7789_SWRESET) - sleep_ms(150) - - def _find_rotations(self, width, height): - """ - Find the correct rotation for our display or returns None. - - Args: - width (int): Display width in pixels - height (int): Display height in pixels - Returns: - list: Rotation table for the display or None if not found - """ - for display in self._SUPPORTED_DISPLAYS: - if display[0] == width and display[1] == height: - return display[2] - return None - - def _set_rotation(self, rotation): - """ - Sets display rotation. - - Args: - rotation (int): - - 0: Portrait - - 1: Landscape - - 2: Inverted portrait - - 3: Inverted landscape - """ - if ((rotation % 2) != (self._rotation % 2)) and (self._width != self._height): - # non-square displays can currently only be rotated by 180 degrees - # TODO: can framebuffer of super class be destroyed and re-created - # to match the new dimensions? or it's width/height changed? - return - - # find rotation parameters and send command - rotation %= len(self._rotations) - ( madctl, - self._width, - self._height, - self._xstart, - self._ystart, ) = self._rotations[rotation] - if self._bgr_order: - madctl |= self._ST7789_MADCTL_BGR - else: - madctl &= ~self._ST7789_MADCTL_BGR - self._write(self._ST7789_MADCTL, bytes([madctl])) - # Set window for writing into - self._write(self._ST7789_CASET, - struct.pack(self._ENCODE_POS, self._xstart, self._width + self._xstart - 1)) - self._write(self._ST7789_RASET, - struct.pack(self._ENCODE_POS, self._ystart, self._height + self._ystart - 1)) - self._write(self._ST7789_RAMWR) - # TODO: Can we swap (modify) framebuffer width/height in the super() class? - self._rotation = rotation - - def imshow(self, image): - """ - Shows a NumPy image on the display. - - Args: - image (ndarray): Image to show - """ - # Get the common ROI between the image and internal display buffer - image_roi, buffer_roi = self._get_common_roi_with_buffer(image) - - # Ensure the image is in uint8 format - image_roi = self._convert_to_uint8(image_roi) - - # Convert the image to BGR565 format and write it to the buffer - self._convert_to_bgr565(image_roi, buffer_roi) - - # Write buffer to display. Swap bytes if needed - if self._needs_swap: - self._write(None, self._buffer[:, :, ::-1]) - else: - self._write(None, self._buffer) - - def clear(self): - """ - Clears the display by filling it with black color. - """ - # Clear the buffer by filling it with zeros (black) - self._buffer[:] = 0 - # Write the buffer to the display - self._write(None, self._buffer) diff --git a/cv2_drivers/displays/st7789_pio.py b/cv2_drivers/displays/st7789_pio.py deleted file mode 100644 index 5ef57af..0000000 --- a/cv2_drivers/displays/st7789_pio.py +++ /dev/null @@ -1,205 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# st7789_pio.py -# -# OpenCV ST7789 display driver using a PIO interface. Only available on -# Raspberry Pi RP2 processors. -# -# This class is derived from: -# https://github.com/raspberrypi/pico-examples/tree/master/pio/st7789_lcd -# Released under the MIT license. -# Copyright (c) 2024 Owen Carter -# Copyright (c) 2024 Ethan Lacasse -# Copyright (c) 2020-2023 Russ Hughes -# Copyright (c) 2019 Ivan Belokobylskiy -#------------------------------------------------------------------------------- - -from .st7789 import ST7789 -from machine import Pin -import rp2 - -class ST7789_PIO(ST7789): - """ - OpenCV ST7789 display driver using a PIO interface. Only available on - Raspberry Pi RP2 processors. - """ - def __init__( - self, - width, - height, - sm_id, - pin_clk, - pin_tx, - pin_dc, - pin_cs=None, - freq=-1, - rotation=0, - bgr_order=True, - reverse_bytes_in_word=True, - ): - """ - Initializes the ST7789 PIO display driver. - - Args: - width (int): Display width in pixels - height (int): Display height in pixels - sm_id (int): PIO state machine ID - pin_clk (int): Clock pin number - pin_tx (int): Data pin number - pin_dc (int): Data/Command pin number - pin_cs (int, optional): Chip Select pin number - freq (int, optional): Frequency in Hz for the PIO state machine - Default is -1, which uses the default frequency of 125MHz - rotation (int, optional): Orientation of display - - 0: Portrait (default) - - 1: Landscape - - 2: Inverted portrait - - 3: Inverted landscape - bgr_order (bool, optional): Color order - - True: BGR (default) - - False: RGB - reverse_bytes_in_word (bool, optional): - - Enable if the display uses LSB byte order for color words - """ - # Store PIO arguments - self._sm_id = sm_id - self._clk = Pin(pin_clk) # Don't change mode/alt - self._tx = Pin(pin_tx) # Don't change mode/alt - self._dc = Pin(pin_dc) # Don't change mode/alt - self._cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - self._freq = freq - - # Start the PIO state machine and DMA with 1 bytes per transfer - self._setup_sm_and_dma(1) - - # Call the parent class constructor - super().__init__(width, height, rotation, bgr_order, reverse_bytes_in_word) - - # Change the transfer size to 2 bytes for faster throughput. Can't do 4 - # bytes, because then pairs of pixels get swapped - self._setup_sm_and_dma(2) - - def _setup_sm_and_dma(self, bytes_per_transfer): - """ - Sets up the PIO state machine and DMA for writing to the display. - - Args: - bytes_per_transfer (int): Number of bytes to transfer in each write - """ - # Store the bytes per transfer for later use - self._bytes_per_transfer = bytes_per_transfer - - # Get the current mode and alt of the pins so they can be restored - txMode, txAlt = self._save_pin_mode_alt(self._tx) - clkMode, clkAlt = self._save_pin_mode_alt(self._clk) - - # Initialize the PIO state machine - self._sm = rp2.StateMachine( - self._sm_id, - self._pio_write_spi, - freq = self._freq, - out_base = self._tx, - sideset_base = self._clk, - pull_thresh = bytes_per_transfer * 8 - ) - - # The tx and clk pins just got their mode and alt set for PIO0 or PIO1. - # We need to save them again to restore later when _write() is called, - # if we haven't already - if not hasattr(self, 'txMode'): - self._txMode, self._txAlt = self._save_pin_mode_alt(self._tx) - self._clkMode, self._clkAlt = self._save_pin_mode_alt(self._clk) - - # Now restore the original mode and alt of the pins - self._tx.init(mode=txMode, alt=txAlt) - self._clk.init(mode=clkMode, alt=clkAlt) - - # Instantiate a DMA controller if not already done - if not hasattr(self, 'dma'): - self._dma = rp2.DMA() - - # Configure up DMA to write to the PIO state machine - req_num = ((self._sm_id // 4) << 3) + (self._sm_id % 4) - dma_ctrl = self._dma.pack_ctrl( - size = {1:0, 2:1, 4:2}[bytes_per_transfer], # 0 = 8-bit, 1 = 16-bit, 2 = 32-bit - inc_write = False, - treq_sel = req_num, - bswap = False - ) - self._dma.config( - write = self._sm, - ctrl = dma_ctrl - ) - - def _write(self, command=None, data=None): - """ - Writes commands and data to the display. - - Args: - command (bytes, optional): Command to send to the display - data (bytes, optional): Data to send to the display - """ - # Save the current mode and alt of the spi pins in case they're used by - # another device on the same SPI bus - dcMode, dcAlt = self._save_pin_mode_alt(self._dc) - txMode, txAlt = self._save_pin_mode_alt(self._tx) - clkMode, clkAlt = self._save_pin_mode_alt(self._clk) - - # Temporarily set the SPI pins to the correct mode and alt for PIO - self._dc.init(mode=Pin.OUT) - self._tx.init(mode=self._txMode, alt=self._txAlt) - self._clk.init(mode=self._clkMode, alt=self._clkAlt) - - # Write to the display - if self._cs: - self._cs.off() - if command is not None: - self._dc.off() - self._pio_write(command) - if data is not None: - self._dc.on() - self._pio_write(data) - if self._cs: - self._cs.on() - - # Restore the SPI pins to their original mode and alt - self._dc.init(mode=dcMode, alt=dcAlt) - self._tx.init(mode=txMode, alt=txAlt) - self._clk.init(mode=clkMode, alt=clkAlt) - - def _pio_write(self, data): - """ - Writes data to the display using the PIO. - - Args: - data (bytes, bytearray, or ndarray): Data to write to the display - """ - # Configure the DMA transfer count and read address - count = len(data) if isinstance(data, (bytes, bytearray)) else data.size - self._dma.count = count // self._bytes_per_transfer - self._dma.read = data - - # Start the state machine and DMA transfer, and wait for it to finish - self._sm.active(1) - self._dma.active(True) - while self._dma.active(): - pass - - # Stop the state machine - self._sm.active(0) - - @rp2.asm_pio( - out_init = rp2.PIO.OUT_LOW, - sideset_init = rp2.PIO.OUT_LOW, - out_shiftdir = rp2.PIO.SHIFT_LEFT, - autopull = True - ) - def _pio_write_spi(): - """ - PIO program to write data to the display. - """ - out(pins, 1).side(0) - nop().side(1) diff --git a/cv2_drivers/displays/st7789_spi.py b/cv2_drivers/displays/st7789_spi.py deleted file mode 100644 index 13c34f9..0000000 --- a/cv2_drivers/displays/st7789_spi.py +++ /dev/null @@ -1,92 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# st7789_spi.py -# -# OpenCV ST7789 display driver using a SPI interface. -# -# This class is derived from: -# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py -# Released under the MIT license. -# Copyright (c) 2024 Owen Carter -# Copyright (c) 2024 Ethan Lacasse -# Copyright (c) 2020-2023 Russ Hughes -# Copyright (c) 2019 Ivan Belokobylskiy -#------------------------------------------------------------------------------- - -from .st7789 import ST7789 -from machine import Pin - -class ST7789_SPI(ST7789): - """ - OpenCV ST7789 display driver using a SPI interface. - """ - def __init__( - self, - width, - height, - spi, - pin_dc, - pin_cs=None, - rotation=0, - bgr_order=True, - reverse_bytes_in_word=True, - ): - """ - Initializes the ST7789 SPI display driver. - - Args: - width (int): Display width in pixels - height (int): Display height in pixels - spi (SPI): SPI bus object - pin_dc (int): Data/Command pin number - pin_cs (int, optional): Chip Select pin number - rotation (int, optional): Orientation of display - - 0: Portrait (default) - - 1: Landscape - - 2: Inverted portrait - - 3: Inverted landscape - bgr_order (bool, optional): Color order - - True: BGR (default) - - False: RGB - reverse_bytes_in_word (bool, optional): - - Enable if the display uses LSB byte order for color words - """ - # Store SPI arguments - self._spi = spi - self._dc = Pin(pin_dc) # Don't change mode/alt - self._cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None - - super().__init__(width, height, rotation, bgr_order, reverse_bytes_in_word) - - def _write(self, command=None, data=None): - """ - Writes commands and data to the display. - - Args: - command (bytes, optional): Command to send to the display - data (bytes, optional): Data to send to the display - """ - # Save the current mode and alt of the DC pin in case it's used by - # another device on the same SPI bus - dcMode, dcAlt = self._save_pin_mode_alt(self._dc) - - # Temporarily set the DC pin to output mode - self._dc.init(mode=Pin.OUT) - - # Write to the display - if self._cs: - self._cs.off() - if command is not None: - self._dc.off() - self._spi.write(command) - if data is not None: - self._dc.on() - self._spi.write(data) - if self._cs: - self._cs.on() - - # Restore the DC pin to its original mode and alt - self._dc.init(mode=dcMode, alt=dcAlt) diff --git a/cv2_drivers/touch_screens/__init__.py b/cv2_drivers/touch_screens/__init__.py deleted file mode 100644 index 48e2e4e..0000000 --- a/cv2_drivers/touch_screens/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_drivers/touch_screens/__init__.py -# -# Imports all available touch screen drivers for MicroPython OpenCV. -#------------------------------------------------------------------------------- - -# Import platform agnostic drivers -from . import cst816 diff --git a/cv2_drivers/touch_screens/cst816.py b/cv2_drivers/touch_screens/cst816.py deleted file mode 100644 index 8b56448..0000000 --- a/cv2_drivers/touch_screens/cst816.py +++ /dev/null @@ -1,165 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# st7789.py -# -# Base class for OpenCV ST7789 display drivers. -# -# This class is derived from: -# https://github.com/fbiego/CST816S -# Released under the MIT license. -# Copyright (c) 2021 Felix Biego -#------------------------------------------------------------------------------- - -from .cv2_touch_screen import CV2_Touch_Screen - -class CST816(CV2_Touch_Screen): - """ - OpenCV CST816 touch screen driver using an I2C interface. - """ - _I2C_ADDRESS = 0x15 - _CHIP_ID = 0xB6 - - # Registers - _REG_GESTURE_ID = 0x01 - _REG_FINGER_NUM = 0x02 - _REG_X_POS_H = 0x03 - _REG_X_POS_L = 0x04 - _REG_Y_POS_H = 0x05 - _REG_Y_POS_L = 0x06 - _REG_BPC0H = 0xB0 - _REG_BPC0L = 0xB1 - _REG_BPC1H = 0xB2 - _REG_BPC1L = 0xB3 - _REG_CHIP_ID = 0xA7 - _REG_PROJ_ID = 0xA8 - _REG_FW_VERSION = 0xA9 - _REG_MOTION_MASK = 0xEC - _REG_IRQ_PULSE_WIDTH = 0xED - _REG_NOR_SCAN_PER = 0xEE - _REG_MOTION_SL_ANGLE = 0xEF - _REG_LP_SCAN_RAW_1H = 0xF0 - _REG_LP_SCAN_RAW_1L = 0xF1 - _REG_LP_SCAN_RAW_2H = 0xF2 - _REG_LP_SCAN_RAW_2L = 0xF3 - _REG_LP_AUTO_WAKE_TIME = 0xF4 - _REG_LP_SCAN_TH = 0xF5 - _REG_LP_SCAN_WIN = 0xF6 - _REG_LP_SCAN_FREQ = 0xF7 - _REG_LP_SCAN_IDAC = 0xF8 - _REG_AUTO_SLEEP_TIME = 0xF9 - _REG_IRQ_CTL = 0xFA - _REG_AUTO_RESET = 0xFB - _REG_LONG_PRESS_TIME = 0xFC - _REG_IO_CTL = 0xFD - _REG_DIS_AUTO_SLEEP = 0xFE - - def __init__(self, i2c, width=240, height=320, rotation=1, address=_I2C_ADDRESS): - """ - Initializes the CST816 driver. - - Args: - i2c (I2C): I2C object for communication - width (int, optional): Touch screen width in pixels. - Default is 240 - height (int, optional): Touch screen height in pixels. - Default is 320 - rotation (int, optional): Orientation of touch screen - - 0: Portrait (default) - - 1: Landscape - - 2: Inverted portrait - - 3: Inverted landscape - address (int, optional): I2C address of the camera. - Default is 0x15 - """ - self.i2c = i2c - self.address = address - self.width = width - self.height = height - self.rotation = rotation - - def _is_connected(self): - """ - Checks if the touch screen is connected by reading the chip ID. - - Returns: - bool: True if the touch screen is connected and the chip ID is - correct, otherwise False. - """ - try: - # Try to read the chip ID - # If it throws an I/O error - the device isn't connected - chip_id = self._get_chip_id() - - # Confirm the chip ID is correct - if chip_id == self._CHIP_ID: - return True - else: - return False - except: - return False - - def _get_chip_id(self): - """ - Reads the chip ID. - - Returns: - int: The chip ID of the HM01B0 (should be 0xB6). - """ - return self.read_register_value(self._REG_CHIP_ID) - - def is_touched(self): - """ - Check if the touch screen is currently being touched. - - Returns: - bool: True if touching, False otherwise - """ - # Read the number of touches - touch_num = self.read_register_value(self._REG_FINGER_NUM) - - # If there are any touches, return True - return touch_num > 0 - - def get_touch_xy(self): - """ - Get the X and Y coordinates of the touch point. Will return the last - touch point if no touch is currently detected. - - Returns: - tuple: (x, y) coordinates of the touch point - """ - x = self.read_register_value(self._REG_X_POS_H, 2) & 0x0FFF - y = self.read_register_value(self._REG_Y_POS_H, 2) & 0x0FFF - - # Adjust for the rotation - if self.rotation == 0: - x,y = x, y - elif self.rotation == 1: - x,y = y, self.width - x - elif self.rotation == 2: - x,y = self.height - x, self.width - y - elif self.rotation == 3: - x,y = self.height - y, x - - return (x, y) - - def read_register_value(self, reg, num_bytes=1): - """ - Read a single byte from the specified register. - - Args: - reg (int): Register address to read from - num_bytes (int, optional): Number of bytes to read from the register. - Default is 1 - - Returns: - int: Value read from the register - """ - data = self.i2c.readfrom_mem(self.address, reg, num_bytes) - value = 0 - for i in range(num_bytes): - value = (value << 8) | data[i] - return value diff --git a/cv2_drivers/touch_screens/cv2_touch_screen.py b/cv2_drivers/touch_screens/cv2_touch_screen.py deleted file mode 100644 index 439a314..0000000 --- a/cv2_drivers/touch_screens/cv2_touch_screen.py +++ /dev/null @@ -1,21 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# cv2_touch_screen.py -# -# Base class for OpenCV touch screen drivers. -#------------------------------------------------------------------------------- - -class CV2_Touch_Screen(): - """ - Base class for OpenCV touch screen drivers. - """ - def __init__(self): - """ - Initializes the touch screen. - """ - pass - - # TODO: Implement common methods for all touch screens diff --git a/manifest.py b/manifest.py deleted file mode 100644 index 21966ed..0000000 --- a/manifest.py +++ /dev/null @@ -1,9 +0,0 @@ -# The manifest gets overwritten by the Makefile, so re-include the board's -# original manifest file -include("$(BOARD_DIR)/manifest.py") - -# Include this directory as one package -package("cv2_drivers") - -# Include the SD card module -require("sdcard") diff --git a/micropython b/micropython deleted file mode 160000 index 7e728e8..0000000 --- a/micropython +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7e728e8c6aad74ca244183f3e0705db6f332abd9 diff --git a/src/opencv_upy.cmake b/micropython_opencv.cmake similarity index 84% rename from src/opencv_upy.cmake rename to micropython_opencv.cmake index 84bf549..2e1b3b1 100644 --- a/src/opencv_upy.cmake +++ b/micropython_opencv.cmake @@ -13,14 +13,14 @@ add_library(usermod_cv2 INTERFACE) # Add our source files to the library. target_sources(usermod_cv2 INTERFACE - ${CMAKE_CURRENT_LIST_DIR}/alloc.c - ${CMAKE_CURRENT_LIST_DIR}/convert.cpp - ${CMAKE_CURRENT_LIST_DIR}/core.cpp - ${CMAKE_CURRENT_LIST_DIR}/highgui.cpp - ${CMAKE_CURRENT_LIST_DIR}/imgcodecs.cpp - ${CMAKE_CURRENT_LIST_DIR}/imgproc.cpp - ${CMAKE_CURRENT_LIST_DIR}/numpy.cpp - ${CMAKE_CURRENT_LIST_DIR}/opencv_upy.c + ${CMAKE_CURRENT_LIST_DIR}/src/alloc.c + ${CMAKE_CURRENT_LIST_DIR}/src/convert.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/core.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/highgui.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/imgcodecs.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/imgproc.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/numpy.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/opencv_upy.c ) # Add the src directory as an include directory. @@ -49,7 +49,7 @@ target_compile_definitions(usermod INTERFACE ULAB_MAX_DIMS=4) include(${CMAKE_CURRENT_LIST_DIR}/ulab/code/micropython.cmake) # Include OpenCV -set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/opencv/opencv/build) +set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/opencv/build) find_package(OpenCV REQUIRED) target_include_directories(usermod INTERFACE ${OpenCV_INCLUDE_DIRS}) target_link_libraries(usermod INTERFACE ${OpenCV_LIBS}) diff --git a/src/opencv/opencv b/opencv similarity index 100% rename from src/opencv/opencv rename to opencv diff --git a/opencv-examples/cv2_hardware_init/__init__.py b/opencv-examples/cv2_hardware_init/__init__.py deleted file mode 100644 index 1bf8755..0000000 --- a/opencv-examples/cv2_hardware_init/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Initializes various hardware components for OpenCV in MicroPython. The -# examples import this module, but you could instead create/edit a `boot.py` -# script to automatically initialize the hardware when the board boots up. See: -# https://micropython.org/resources/docs/en/latest/reference/reset_boot.html#id4 - -# Import the display driver -try: - from .display import display -except: - print("Display initialization failed, skipping...") - -# Optional - Show a splash screen on the display with an optional filename (if -# not provided, it defaults to `splash.png` in the root directory of the -# MicroPython filesystem). If the file is not present, the driver will simply -# clear the display of any previous content -display.splash("opencv-examples/images/splash.png") - -# Import the camera driver -try: - from .camera import camera -except: - print("Camera initialization failed, skipping...") - -# Import the touch screen driver -try: - from .touch_screen import touch_screen -except: - print("Touch screen initialization failed, skipping...") - -# Mount the SD card -try: - # We don't actually need to import anything here, just want to run the - # sd_card module so the SD card gets mounted to the filesystem. So just - # import something then delete it to avoid clutter - from .sd_card import sdcard - del sdcard -except: - print("SD card initialization failed, skipping...") diff --git a/opencv-examples/cv2_hardware_init/bus_i2c.py b/opencv-examples/cv2_hardware_init/bus_i2c.py deleted file mode 100644 index 593d3d5..0000000 --- a/opencv-examples/cv2_hardware_init/bus_i2c.py +++ /dev/null @@ -1,11 +0,0 @@ -# Import the machine.I2C class -from machine import I2C - -# Initialize default I2C bus. You may need to adjust the arguments based on your -# specific board and configuration -i2c = I2C( - # id = 0, - # sda = machine.Pin(0), - # scl = machine.Pin(1), - # freq = 400_000 -) diff --git a/opencv-examples/cv2_hardware_init/bus_spi.py b/opencv-examples/cv2_hardware_init/bus_spi.py deleted file mode 100644 index 35fa59d..0000000 --- a/opencv-examples/cv2_hardware_init/bus_spi.py +++ /dev/null @@ -1,13 +0,0 @@ -# Import the machine.SPI class -from machine import SPI - -# Initialize default SPI bus. You may need to adjust the arguments based on your -# specific board and configuration -spi = SPI( - # id = 0, - baudrate = 24_000_000, # Use the fastest baudrate you can for best performance! - # sck = machine.Pin(2), - # mosi = machine.Pin(3), - # miso = machine.Pin(4), - # freq = 100_000 -) diff --git a/opencv-examples/cv2_hardware_init/camera.py b/opencv-examples/cv2_hardware_init/camera.py deleted file mode 100644 index 1856709..0000000 --- a/opencv-examples/cv2_hardware_init/camera.py +++ /dev/null @@ -1,40 +0,0 @@ -# Initializes a camera object. Multiple options are provided below, so you can -# choose one that best fits your needs. You may need to adjust the arguments -# based on your specific camera and board configuration - -# Import the OpenCV camera drivers -from cv2_drivers.cameras import * - -# Import the I2C bus -from .bus_i2c import i2c - -################################################################################ -# HM01B0 -################################################################################ - -# PIO interface, only available on Raspberry Pi RP2 processors -camera = hm01b0_pio.HM01B0_PIO( - i2c, - pin_d0 = 12, - pin_vsync = 13, - pin_hsync = 14, - pin_pclk = 15, - sm_id = 5, - pin_xclk = None, # Optional xclock pin, specify if needed - num_data_pins = 1 # Number of data pins used by the camera (1, 4, or 8) -) - -################################################################################ -# OV5640 -################################################################################ - -# PIO interface, only available on Raspberry Pi RP2 processors -# camera = ov5640_pio.OV5640_PIO( -# i2c, -# sm_id = 5, -# pin_d0 = 8, -# pin_vsync = 22, -# pin_hsync = 21, -# pin_pclk = 20, -# pin_xclk = 3 # Optional xclock pin, specify if needed -# ) diff --git a/opencv-examples/cv2_hardware_init/display.py b/opencv-examples/cv2_hardware_init/display.py deleted file mode 100644 index 2d411be..0000000 --- a/opencv-examples/cv2_hardware_init/display.py +++ /dev/null @@ -1,37 +0,0 @@ -# Initializes a display object. Multiple options are provided below, so you can -# choose one that best fits your needs. You may need to adjust the arguments -# based on your specific display and board configuration - -# Import the OpenCV display drivers -from cv2_drivers.displays import * - -# Import the SPI bus -from .bus_spi import spi - -################################################################################ -# ST7789 -################################################################################ - -# SPI interface. This should work on any platform, but it's not always the -# fastest option (24Mbps on RP2350) -display = st7789_spi.ST7789_SPI( - width = 240, - height = 320, - spi = spi, - pin_dc = 16, - pin_cs = 17, - rotation = 1 -) - -# PIO interface. This is only available on Raspberry Pi RP2 processors, -# but is much faster than the SPI interface (75Mbps on RP2350) -# display = st7789_pio.ST7789_PIO( -# width = 240, -# height = 320, -# sm_id = 4, -# pin_clk = 18, -# pin_tx = 19, -# pin_dc = 16, -# pin_cs = 17, -# rotation = 1 -# ) diff --git a/opencv-examples/cv2_hardware_init/sd_card.py b/opencv-examples/cv2_hardware_init/sd_card.py deleted file mode 100644 index 73d3b99..0000000 --- a/opencv-examples/cv2_hardware_init/sd_card.py +++ /dev/null @@ -1,50 +0,0 @@ -# Initializes SD card and mounts it to the filesystem. This assumes the SD card -# is on the same SPI bus as the display with a different chip select pin. You -# may need to adjust this based on your specific board and configuration - -# Import the Pin class for the chip select pin -from machine import Pin - -# Import the SPI bus -from .bus_spi import spi - -# When the SD card is initialized, it changes the SPI bus baudrate. We'll -# want to revert it, so we need to know the original baudrate. There's no -# way to get it directly, so we convert the bus to a string and parse it. -# Example format: -# "SPI(0, baudrate=24000000, sck=Pin(2), mosi=Pin(3), miso=Pin(4))" -spi_str = str(spi) -baudrate = int(spi_str[spi_str.index("baudrate=") + 9:].partition(",")[0]) - -# Set the chip select pin for the SD card -sd_cs = Pin(7, Pin.OUT) - -try: - # Import the SD card module. This is often not installed by default in - # MicroPython, so you may need to install it manually. For example, you can - # use `mpremote mip install sdcard` - import sdcard - - # Initialize the SD card, then restore the original SPI bus baudrate. This - # is wrapped in a try/finally block to ensure the baudrate is restored even if - # the SD card initialization fails - try: - sd_card = sdcard.SDCard(spi, sd_cs) - finally: - spi.init(baudrate = baudrate) - - # Mount the SD card to the filesystem under the "/sd" directory, which makes - # it accessible just like the normal MicroPython filesystem - import uos - vfs = uos.VfsFat(sd_card) - uos.mount(vfs, "/sd") -except ImportError: - print("`sdcard` module not found, skipping SD card initialization...") -except OSError as e: - eStr = str(e) - if "no SD card" in eStr: - print("No SD card found, skipping SD card initialization...") - elif "Errno 1" in eStr: - print("SD card already mounted, skipping SD card initialization...") - else: - print("Failed to mount SD card, skipping SD card initialization...") diff --git a/opencv-examples/cv2_hardware_init/touch_screen.py b/opencv-examples/cv2_hardware_init/touch_screen.py deleted file mode 100644 index 6ffc1a7..0000000 --- a/opencv-examples/cv2_hardware_init/touch_screen.py +++ /dev/null @@ -1,16 +0,0 @@ -# Initializes a touch screen object. Multiple options are provided below, so you -# can choose one that best fits your needs. You may need to adjust the arguments -# based on your specific touch screen and board configuration - -# Import the OpenCV touch screen drivers -from cv2_drivers.touch_screens import * - -# Import the I2C bus -from .bus_i2c import i2c - -################################################################################ -# CST816 -################################################################################ - -# I2C interface -touch_screen = cst816.CST816(i2c) diff --git a/opencv-examples/ex01_hello_opencv.py b/opencv-examples/ex01_hello_opencv.py deleted file mode 100644 index 0ee6b9b..0000000 --- a/opencv-examples/ex01_hello_opencv.py +++ /dev/null @@ -1,83 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex01_hello_opencv.py -# -# This example demonstrates near-minimal code to get started with OpenCV in -# MicroPython. It can be used to verify that OpenCV is working correctly, and -# that the display driver is functioning. It simpy imports the required modules, -# creates a blank image, draws some things on it, and shows it on the display. -#------------------------------------------------------------------------------- - -# Import OpenCV, just as you would in any other Python environment! -import cv2 as cv - -# Standard OpenCV leverages the host operating system to access hardware, but we -# don't have that luxury in MicroPython. Instead, drivers are provided for -# various hardware components, which need to be initialized before using them. -# The examples import a module called `cv2_hardware_init`, which initializes the -# drivers. You may need to edit the contents of the `cv2_hardware_init` module -# based on your specific board and hardware configuration -from cv2_hardware_init import * - -# Import NumPy, almost like any other Python environment! The only difference is -# the addition of `from ulab` since MicroPython does not have a full NumPy -# implementation; ulab NumPy is a lightweight version of standard NumPy -from ulab import numpy as np - -# Initialize an image (NumPy array) to be displayed, just like in any other -# Python environment! Here we create a 240x320 pixel image with 3 color channels -# (BGR order, like standard OpenCV) and a data type of `uint8` (you should -# always specify the data type, because NumPy defaults to `float`) -img = np.zeros((240, 320, 3), dtype=np.uint8) - -# Images can be accessed and modified directly if desired with array slicing. -# Here we set the top 50 rows of the image to blue (remember, BGR order!) -img[0:50, :] = (255, 0, 0) - -# OpenCV's drawing functions can be used to modify the image as well. For -# example, we can draw a green ellipse at the center of the image -img = cv.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) - -# Note - Most OpenCV functions return the resulting image. It's redundant for -# the drawing functions and often ignored, but if you call those functions from -# the REPL without assigning it to a variable, the entire array will be printed. -# To avoid this, you can simply re-assign the image variable (for example, -# `img = cv.function(...)`) - -# And the obligatory "Hello OpenCV" text! This time in red -img = cv.putText(img, "Hello OpenCV!", (50, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) - -# Once we have an image ready to show, just call `cv.imshow()`, almost like any -# other Python environment! However, there is one important difference: -# -# Standard OpenCV takes a window name string in `cv.imshow()`, which is used -# to display the image in a window. We don't have windows in MicroPython, so -# there is an API change where the first argument must be a display driver. Any -# display driver can be used, as long as it implements an `imshow()` method that -# takes a NumPy array as input -cv.imshow(display, img) # Can alternatively call `display.imshow(img)` - -# Standard OpenCV requires a call to `cv.waitKey()` to process events and -# actually display the image. However, the display driver shows the image -# immediately, so it's not necessary to call `cv.waitKey()` in MicroPython. -# But it is available, and behaves almost like any other Python environment! The -# only difference is that it requires a key to be pressed in the REPL instead of -# a window. It will wait for up to the specified number of milliseconds (0 for -# indefinite), and return the ASCII code of the key pressed (-1 if no key press) -# -# Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses -# until you hit Enter on your keyboard -print("Press any key to continue") -key = cv.waitKey(0) # Not necessary to display image, can remove if desired - -# Print the key pressed -print("Key pressed:", chr(key)) - -# Normally at the end of OpenCV scripts, you would call `cv.destroyAllWindows()` -# to close all OpenCV windows. That function doesn't exist in the MicroPython -# port of OpenCV, but you can instead call `display.clear()` to set the display -# to a blank state, or `display.splash()` to show the splash screen -display.clear() # Can instead call `display.splash()` with optional filename diff --git a/opencv-examples/ex02_camera.py b/opencv-examples/ex02_camera.py deleted file mode 100644 index 88a078f..0000000 --- a/opencv-examples/ex02_camera.py +++ /dev/null @@ -1,52 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex02_camera.py -# -# This example demonstrates how to read frames from a camera and show them on a -# display using OpenCV in MicroPython. It can be used to verify that the camera -# driver is functioning. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Open a camera, similar to any other Python environment! In standard OpenCV, -# you would use `cv.VideoCapture(0)` or similar, and OpenCV would leverage the -# host operating system to open a camera object and return it as a -# `cv.VideoCapture` object. However, we don't have that luxury in MicroPython, -# so a camera driver is required instead. Any camera driver can be used, as long -# as it implements the same methods as the standard OpenCV `cv.VideoCapture` -# class, such as `open()`, `read()`, and `release()` -camera.open() - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Loop to continuously read frames from the camera and display them -while True: - # Read a frame from the camera, just like any other Python environment! It - # returns a tuple, where the first element is a boolean indicating success, - # and the second element is the frame (NumPy array) read from the camera - success, frame = camera.read() - - # Check if the frame was read successfully - if not success: - print("Error reading frame from camera") - break - - # Display the frame - cv.imshow(display, frame) - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break - -# Release the camera, just like in any other Python environment! -camera.release() diff --git a/opencv-examples/ex03_touch_screen.py b/opencv-examples/ex03_touch_screen.py deleted file mode 100644 index 94a4483..0000000 --- a/opencv-examples/ex03_touch_screen.py +++ /dev/null @@ -1,70 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex03_touch_screen.py -# -# This example demonstrates how to read input from a touch screen, which can be -# used to verify that the touch screen driver is functioning. It simply draws -# lines on a blank image based on touch input, similar to a drawing application. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Import NumPy -from ulab import numpy as np - -# Initialize an image to draw on -img = np.zeros((240, 320, 3), dtype=np.uint8) - -# Prompt the user to draw on the screen -img = cv.putText(img, "Touch to draw!", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Create variables to store touch coordinates and state -x0, y0, x1, y1 = 0, 0, 0, 0 -touch_input = False - -# Loop to continuously read touch input and draw on the image -while True: - # Check if there is touch input - if touch_screen.is_touched(): - # Check if this is the first touch or a continuation - if not touch_input: - # This is the first touch, set both (x0, y0) and (x1, y1) to the - # initial touch coordinates. This will draw a point at the touch - # location if no further touch inputs are made - x0, y0 = touch_screen.get_touch_xy() - x1, y1 = x0, y0 - # Set the state to indicate there is touch input - touch_input = True - else: - # This is a continuation of the touch, set (x0, y0) to the previous - # coordinates and set (x1, y1) to the current touch coordinates so - # we can draw a line between them - x0, y0 = x1, y1 - x1, y1 = touch_screen.get_touch_xy() - else: - # Check if there was touch input before - if touch_input: - # There was touch input before, but not anymore - touch_input = False - - # Draw a line if there was touch input - if touch_input: - img = cv.line(img, (x0, y0), (x1, y1), (255, 255, 255), 2) - - # Display the frame - display.imshow(img) - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break diff --git a/opencv-examples/ex04_imread_imwrite.py b/opencv-examples/ex04_imread_imwrite.py deleted file mode 100644 index cef4cb7..0000000 --- a/opencv-examples/ex04_imread_imwrite.py +++ /dev/null @@ -1,61 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex04_imread_imwrite.py -# -# This example demonstrates how to read and write images to and from the -# MicroPython filesystem using `cv.imread()` and `cv.imwrite()`. Any paths -# accessible to the MicroPython filesystem can be used, including an SD card if -# your board has one connected. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Call `cv.imread()` to read an image from the MicroPython filesystem, just -# like in any other Python environment! Make sure to copy the image to the -# MicroPython filesystem first, and set the path to the image file as needed -# -# If your board can mount an SD card, you can instead load the image to the SD -# card and change the path to point to the SD card -# -# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV -print("Loading image...") -img = cv.imread("opencv-examples/images/splash.png") - -# Show the image -# -# Note - If the image is larger or smaller than the display, the behavior will -# depend on the display driver. For example, the default ST7789 display driver -# will crop large images, and show small images in the top-left corner -cv.imshow(display, img) - -# Prompt the user to press a key to continue -print("Press any key to continue") -key = cv.waitKey(0) - -# Let's modify the image! Here we use `cv.Canny()` to perform edge detection -# on the image, which is a common operation in computer vision -print("Performing edge detection...") -edges = cv.Canny(img, 100, 200) - -# Display the modified image -cv.imshow(display, edges) - -# Now we'll save the modified image to the MicroPython filesystem using -# `cv.imwrite()`, just like in any other Python environment! -# -# Again, SD cards are supported, just change the path to point to the SD card -# -# Note - only BMP and PNG formats are currently supported in MicroPython OpenCV -print("Saving modified image...") -success = cv.imwrite("opencv-examples/images/splash_edges.png", edges) - -# Check if the image was saved successfully -if success: - print("Image saved successfully!") -else: - print("Failed to save the image!") diff --git a/opencv-examples/ex05_performance.py b/opencv-examples/ex05_performance.py deleted file mode 100644 index f6c4b7f..0000000 --- a/opencv-examples/ex05_performance.py +++ /dev/null @@ -1,137 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex05_performance.py -# -# This example demonstrates some performance optimization techniques, and ways -# to measure performance in the MicroPython port of OpenCV. Read through the -# comments in this example to learn more! -# -# Note that most examples do not include these optimizations for simplicity, but -# if maximum performance is needed for your application, use the techniques -# shown here. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Import NumPy to create arrays -from ulab import numpy as np - -# Import time for frame rate calculation -import time - -# Import garbage collector to measure memory usage -import gc - -# Many OpenCV functions can take an optional output argument to store the result -# of the operation. If it's not provided, OpenCV allocates a new array to store -# the result, which can be slow and waste memory. When it is provided, OpenCV -# instead writes the result to the provided array, reducing memory usage and -# improving performance. The array must have the same shape and data type as the -# expected output of the operation, otherwise a new array will be allocated. -# -# Here we preallocate arrays for the destination arguments of this example. If -# the shapes or data types are incorrect, OpenCV will simply allocate new arrays -# for each on the first loop iteration. The variables will then be re-assigned, -# so this only negatively affects the first loop iteration. -frame = np.zeros((240, 320, 3), dtype=np.uint8) -result_image = np.zeros((240, 320, 3), dtype=np.uint8) - -# Open the camera -camera.open() - -# Initialize a loop timer to calculate processing speed in FPS -loop_time = time.ticks_us() - -# Initialize a variable to track memory usage -last_mem_free = gc.mem_free() - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Loop to continuously read frames from the camera and display them -while True: - # Read a frame from the camera and measure how long it takes. Try running - # this both with and without the pre-allocated `frame` array to see the - # difference in performance - t0 = time.ticks_us() - success, frame = camera.read(frame) - t1 = time.ticks_us() - print("Read frame: %.2f ms" % ((t1 - t0) / 1_000), end='\t') - - # Check if the frame was read successfully - if not success: - print("Failed to read frame from camera") - break - - # Now we'll do some processing on the frame. Try running this with and - # without the pre-allocated `result_image` array, and try different OpenCV - # functions to compare performance - t0 = time.ticks_us() - result_image = cv.cvtColor(frame, cv.COLOR_BGR2HSV, result_image) - t1 = time.ticks_us() - print("Processing: %.2f ms" % ((t1 - t0) / 1_000), end='\t') - - # It's a good idea to measure the frame rate of the main loop to see how - # fast the entire pipeline is running. This will include not only the - # processing steps, but also any overhead from the hardware drivers and - # other code. We can calculate the FPS with the loop timer and draw it on - # the frame for visualization - current_time = time.ticks_us() - fps = 1_000_000 / (current_time - loop_time) - loop_time = current_time - print("FPS: %.2f" % fps, end='\t') - result_image = cv.putText(result_image, f"FPS: {fps:.2f}", (10, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) - - # Display the frame - cv.imshow(display, result_image) - - # We can also measure memory usage to see how much RAM is being consumed by - # this code. If you remove the output arguments from the functions above, - # you'll see that the memory consumption increases significantly as new - # arrays must be allocated each loop iteration - # - # Note that calling `gc.mem_free()` actually takes a relatively long time to - # execute, so it should only be used for debugging, not in production code - mem_free = gc.mem_free() - memory_used = last_mem_free - mem_free - last_mem_free = mem_free - print("Memory free: %d KiB" % (mem_free // 1024), end='\t') - print("Memory consumed: %d KiB" % (memory_used // 1024), end='\n') - - # If the memory usage is negative, it means the garbage collector triggered - # and freed some memory. Garbage collection can take some time, so you'll - # notice a drop in FPS when it happens, and you may see a stutter in the - # video stream on the display. This is another reason to preallocate arrays, - # since it mitigates how frequently garbage collection is triggered - if memory_used < 0: - print("Garbage collection triggered!") - - # Something to try is triggering the garbage collector manually each loop - # iteration to immediately free up memory. Garbage collection can be faster - # if less memory has been allocated, so this can help avoid long stutters - # from occasional garbage collection. However, garbage collection always - # takes *some* time, so this will lower the average FPS. You can choose to - # do this if you prefer a consistent frame rate, or don't if you prefer - # maximum frame rate and are okay with occasional stutters gc.collect() - - # For advanced users, you can use the internal buffers of the camera and - # display drivers: `camera._buffer` and `display._buffer`. Using these - # buffers directly can avoid the colorspace conversions implemented in - # `camera.read()` and `display.imshow()`, which can improve performance if - # your application can make use of the native color spaces and improve - # overall performance - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break - -# Release the camera -camera.release() diff --git a/opencv-examples/ex06_detect_sfe_logo.py b/opencv-examples/ex06_detect_sfe_logo.py deleted file mode 100644 index b15abee..0000000 --- a/opencv-examples/ex06_detect_sfe_logo.py +++ /dev/null @@ -1,193 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex06_detect_sfe_logo.py -# -# This example demonstrates a basic vision processing pipeline. A pipeline is -# just a sequence of steps used to extract meaningful data from an image. The -# pipeline in this example attempts to detect the SparkFun flame logo using -# contour matching. If it's detected, it will be outlined on the display for -# visualization. The bounding box and center of the logo will also be drawn, -# demonstrating how to acquire useful numerical data from an image (eg. the -# position and size of an object). -# -# Note that this pipeline is very simple and does not include many of the steps -# that would typically be included in more robust pipelines. This was done for -# simplicity and performance, so it may produce false positives or miss the logo -# entirely sometimes. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Import NumPy -from ulab import numpy as np - -# Import time for frame rate calculation -import time - -# Here we define a reference contour for the SparkFun flame logo. This was -# created manually by picking points on the boundary of a small image of the -# logo in an image editor. Below is also ASCII art of the logo for reference, -# but the actual contour is drawn in the top left corner of the display. -# ___ -# / _\ -# \ \ -# /|_| \/\ -# | | -# | | -# | / -# | _____/ -# | / -# |/ -logo_contour = np.array( - [[[0,48]], - [[0,22]], - [[4,16]], - [[9,16]], - [[7,19]], - [[10,22]], - [[13,22]], - [[16,19]], - [[16,17]], - [[10,10]], - [[10,5]], - [[15,1]], - [[20,0]], - [[24,2]], - [[19,5]], - [[19,8]], - [[23,12]], - [[26,11]], - [[26,8]], - [[32,14]], - [[32,25]], - [[28,32]], - [[20,36]], - [[12,36]]], dtype=np.float) - -# This is the pipeline implementation. This gets called for each frame captured -# by the camera in the main loop -def sfe_logo_detection_pipeline(frame): - # Here we binarize the image. There are many ways to do this, but here we - # simply convert the image to grayscale and then apply Otsu's thresholding - # method to create a binary image. This means it will only detect a dark - # logo on a light background (or vice versa), but you can modify this to - # find specific colors or use other methods if desired - gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) - ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) - - # Find contours in the binary image, which are simply lists of points around - # the boundaries of shapes. Contours are a powerful tool in OpenCV for shape - # analysis and object detection - contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) - - # It's possible that no contours were found, so first check if any were - # found before proceeding - if contours: - # We'll compare the contours found in the image to the reference logo - # contour defined earlier. We will use the `cv.matchShapes()` function - # to compare the shapes to pick the best match, so we need to initialize - # variables to keep track of the best match found so far - best_contour = None - best_similarity = float('inf') # Start with a very high similarity score - - # Loop through each contour found in the image to find the best match - for i in range(len(contours)): - # If the image is noisy, the binarized image may contain many tiny - # contours that are obviously not the logo. `cv.matchShapes()` can - # take some time, so we can be more efficient by skipping obviously - # wrong contours. In this example, the logo we're looking for is - # fairly complex, so we can skip contours that have too few points - # since they will definitely be too simple to match the logo - if len(contours[i]) < 20: - continue - - # Now we call `cv.matchShapes()` which returns a "similarity" score - # between the two shapes. The lower the score, the more similar the - # shapes are - similarity = cv.matchShapes(logo_contour, contours[i], cv.CONTOURS_MATCH_I2, 0) - - # Check if this contour is a better match than the best so far - if similarity < best_similarity: - # This contour is a better match, so update the best match - best_similarity = similarity - best_contour = contours[i] - - # We're done checking all contours. It's possible that the best contour - # found is not a good match, so we can check if the score is below a - # threshold to determine whether it's close enough. Testing has shown - # that good matches are usually around 0.5, so we'll use a slightly - # higher threshold of 1.0 - if best_similarity < 1.0: - # The best contour found is a good match, so we'll draw it on the - # frame to outline the detected logo for visualization - frame = cv.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) - - # Visualization is great, but the purpose of most real pipelines is - # to extract useful data from the image. For example, suppose we - # want to know where the logo is located in the image and how large - # it is. We can use the bounding rectangle of the contour to get the - # position and size of the logo - left, top, width, height = cv.boundingRect(best_contour) - center_x = left + width // 2 - center_y = top + height // 2 - - # Now we could use this data for some task! For example, if we were - # detecting an object that a robot needs to drive in front of, we - # could turn to face it with the center point, then drive forwards - # until the size is big enough (meaning we're close enough to it). - # - # This example doesn't actually make use of the data, so we'll just - # draw the bounding box and center of the logo for visualization, - # and add text of the position and size of the logo - frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) - frame = cv.drawMarker(frame, (center_x, center_y), (0, 255, 0), cv.MARKER_CROSS, 10, 2) - frame = cv.putText(frame, f"({center_x}, {center_y})", (center_x - 45, center_y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) - frame = cv.putText(frame, f"{width}x{height}", (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) - -# Initialize a loop timer to calculate processing speed in FPS -loop_time = time.ticks_us() - -# Open the camera -camera.open() - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Loop to continuously read frames from the camera and display them -while True: - # Read a frame from the camera - success, frame = camera.read() - if not success: - print("Failed to read frame from camera") - break - - # Call the pipeline function to process the frame - sfe_logo_detection_pipeline(frame) - - # All processing is done! Calculate the frame rate and display it - current_time = time.ticks_us() - fps = 1_000_000 / (current_time - loop_time) - loop_time = current_time - frame = cv.putText(frame, f"FPS: {fps:.2f}", (40, 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) - - # Draw the reference logo contour in the top left corner of the frame - frame[0:50, 0:40] = (0,0,0) - frame = cv.drawContours(frame, [logo_contour], -1, (255, 255, 255), 1, offset=(2, 2)) - - # Display the frame - cv.imshow(display, frame) - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break - -# Release the camera -camera.release() diff --git a/opencv-examples/ex07_animation.py b/opencv-examples/ex07_animation.py deleted file mode 100644 index 759e4a1..0000000 --- a/opencv-examples/ex07_animation.py +++ /dev/null @@ -1,61 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex07_animation.py -# -# This example demonstrates how to play an animation using a series of frames -# stored in a single image file. It assumes full 320x240 frames are stacked -# vertically in the image, and the animation plays by displaying each frame in -# sequence. This can be the basis for things like sprite sheets, where smaller -# icons or characters are stored in a single image and displayed as needed. -#------------------------------------------------------------------------------- - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Load an animation sheet image that contains multiple frames of an animation -animation_sheet = cv.imread("opencv-examples/images/animation_sheet.png") - -# This example assumes the image has full 320x240 frames stacked vertically -frame_height = 240 - -# Calculate the number of frames in the sheet by dividing the sheet height by -# the frame height -frame_num = animation_sheet.shape[0] // frame_height - -# Initialize variables to keep track of the current row in the sheet and the -# direction of animation playback (up or down) -row_index = 0 -direction = 1 - -# Prompt the user to press a key to continue -print("Press any key to continue") - -# Loop to continuously play the animation -while True: - # Calculate the starting and ending pixel row for the current frame - row_start_px = row_index * frame_height - row_end_px = row_start_px + frame_height - cv.imshow(display, animation_sheet[row_start_px:row_end_px, :]) - - # Update the row index based on the direction of playback - row_index += direction - - # If we reach the end of the sheet, reverse the direction - if row_index == frame_num-1: - direction = -1 - elif row_index == 0: - direction = 1 - - # Check for key presses. If you want the animation to play at a specific - # frame rate, you can change the wait time to slow it down. This example - # plays the animation as fast as possible, which is often needed to look - # smooth in MicroPython - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break diff --git a/opencv-examples/images/animation_sheet.png b/opencv-examples/images/animation_sheet.png deleted file mode 100644 index ee77cca87581e1a6be2cc53f1797209bb86bcb33..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8487 zcmeI2c~p{V`^Nz(q0A-AHOVDqEX}RO0yQl)v!v3Qa!k$CurzT?$qXq?n_S2;mr=1Y z$7b3Nbd zdwp(x+U;PwWRc1u2n4dkZu_>q5D0V&1R_aBz`%&%BjsT51;<y1}^0Ri!p?5 zO(SdU{JmlKH<235NQPPEq$s-_efNTQ*^CCa81v+%=HNaJZayL8{_g4%YsjM(>@jqc9e#p>(f4ab%Yhj}Gxayz7b-m!Jp z9Z=`qleV|7b#`I;=kDy#T75Moou%c9dmv$q8Mr=8Mq=BXHC(d?*rAV=V5q}Cny64g z9JR{&&U9ZmwjmmKRe`q*8vI}dUsj+bUpJy3BXELI-*$&D=|zawg->JVbY8PvD%dW+ zVZTerY4Jj_O$ufdvZPS9?h4ba)EbALXFIr1^KJv9*D>gFw*>S-)L>oy3Gs%Z8u*Vq z;n-T%aLBu&{enbsNXM)y>#klcIJT}|?fESBGFcMiE2Y9zgHij#Th%yi6UqskdvGlE z#KU7S)L?fYewvLSQp*&MOf1yz@^o!)T)gnZy3U#NdkL+jdO87*P&q`*AS!l17OaI0 ziSYL;Pdq$rv;Qi->e7&mYS+la72Yma7aRzPd`r5skypJ48rDq^zH%U0Ob6O54S1v_ zN6H9~g0Fc0K0Y@?o135KrPpH$ojR4{)dS`-FIc4Hccp3_M09;Tj#l`RM3^T(z72y{ z$%R%=FHBXuq~FL)U4%;|&P5)LyJmFyc_XQ9WbK-D;_X#|hFRnqx_|7s{9);0q58PC zf6??Jgay^0=bGnQ7!o7s6CJvdmIV_G=Yl|SpH(+kf zoLCK&31us-BVmA)Or| zxq&T9mNqfup!bC}W8N(J-3s%+JNvV)$2Fu3aO}_K+25VBxMTe!jrG>sf)|#$Tt!@a z6UbHBd(#YN_>UrfS`cwTtPRQk=!QTb5e=ng?Bk5P5UUjLQRV;a5dMnWeewB{SB9sG z%F*qlx$((p#Zo_-1$;#)y3^(U^lEtKM1p^j?j{G8CC&~w3_Q|wfXAyw_o+j}uzN?c z5RsqK`JCv(Dc!qh)cGqBMTorTI zg=w{FFqCJFknup4x&st`mm-Z78o)saq{hRXT;6Yop^nrn^o?693p6z?w&U7sIRRv4 zjneyaTiP4me1;3liWO-Ob_@F1zmCG2YP-|cQ^Y%~)!}*~o{U$j$V!}9zWAja2uZvZ zT<(L3h$h#k-OcID&RZ%th)=NWE8ohxbU=pQ=R!Bn8O5ZL>rG@Tcd=?SG5mOr3bZAd ztyush<0pUs-ILALY2Kfq{-oN~FPZ>e|uSxn~O2 zp0Hn2XRdQxfAA6O?0OiVw8NwNebpMVSFjwk*W)_=p;Q06mk5Cfv>PY)w@-}05Y8zN z@bg&auWvYIxTJ7sV8O9(ZOpee=Bw9--zs66pYtrkiAcXMnsannmVnH6o4iB{bG`~H zf-D3qFDRnhov2}vH>r$d-q+sX<2B-e!wEmK)5 zD>xXNo7mv?fPcQ@|jCKcwrf~IE$?Ip3`C|>@B!j>T4s>4!H8IuC#B`qLM zVz=jT(|Q=yt0r%ET*!HH0ss-D> z0V)&BuP`A22GrslcRM`lH<^!$P#Gp!-`wcqtRQddP1$o-EtpapgaB*tg_v)ox!9VJ zwDMcadKxBT6T}> z(g3!jaf*K?Sya8J)E~@sf4I_~J=_R9*|8bA{Nn$}(*9)HeNLo9rQRQ=_V)6Yj?CeX z?OCT&9NvNZ*yDXx4Dt4Bart@62JM+u#_orpE4`}k!_2k+(Si(txR-wRI^+x9&Jhd$ z2sQr6?fFN|(N`;X#h)F&W!H#10arLP`+68@;ML(5i7MUV=;`NspxmiKIvK>oqYFxCJh{G zjxSoGh#(f%wzUPY*+nk8j+5th-0;>BY;SiQB{w;2(zQ4UqlEmhy&Ye9$JHc;9GlLL zj}gbQ<(GsGsz7nZy^c4mH(Rp~7{vk(QWrgXGEM^!)V$5*Zd*;Q#JZq7IC6vbCJByh zi(bN3a8(y4wM?0HixwcX(ME3w6u&1==c|E4vdolyQ)O>M!M;Awk5wA^qQoG^wzO(Y z2~eK^jGAbT!d<{Z@Ji7Y2k2 zj|$2l$8HMiR%Qb6e4TIoBg67HnYT1g2xVS-rg~d14w+e6|J}>P%6gAY(d0b(dPufv zS3!<4Q|^a19sK@}wRK_v@A$z7s@R=PcOTwG2cS%CO0~M;5=SO7GYOtKwmq8j%S&}o z7tB0)hmod3x`5F*uugHlb1nyS4f-kCLz4r^u<8S=v94icpa%T=dksW(%&P7kNPzW= zZCKTZJ;ktraP{iy|25?J&13v!IPSLth?-9TmopOilG<7>rZ1WA50*^vDt7|t!UOyk z7?Kjl^7do^P|wu10f|@uqYm5i%+tD2_5G{BImNvulF|z!yvIfuibruZW8CO#d1wMo zIWYQayPs6zU&jH+nMw3o0RXK3`rt!`5(Yob5r*0strAT;2iP4WO?YWE_oj{%eORTb zt0TV^Uxo6K$gn%mN@inc?keV=y{)spr4=Ef_%P!Ox1@vIqp5pWRqb1?w2*tvPm z-L}r&#WHb<$P-EFAYeQ*72G4sPy5$oerAZGX~mIdTsi$0reir_JHqLiZ1K*s6bQA za=o6UAcfxxkG?H(S|kj*DG2F-(yX6UDE`QJho0e&HA;yKk}v$7?*?(-@IgW4me>(w zFa2|r|IfX03n63!=g$ax%fnJVbBV9RpMUcW^EAc(4`ttqp{&eDap&?ViN!`YflZxMF5df z?gx9BJS~h>g-6gVehKnN*BXozp}CGdEuIdlmxrcB2q8A?R$BTt{+T%#%)FsMX)v{w zF}80N*Ce8I%NnwxFk7&cMw;2sR^rxIFvnZKQE7f>$HK=+uN@Cv2wnLm+$PtgqMEo> z7yvVGSuAA~EcAn!KXNzDlIrzD3RfAJR7v$Z{)<<@jTv*;A3HxdiRXh9|5>gv1iCqhwP0tp@u9t;c&NlH>w84T=;AgFu=3l94Gp+L@g{HnXs@=Oejn>mnhxGUX%DV3T8%a}Y7Jw3764GE?!AS2gjnG2t>L z5#T50_2336urqTpBKEMewRh(B;3N5$UT#qNpT`U&#Q&<|V#7!BU!BsFQzRAvIGGW1 z(KFGRFfuX|b8yicF`1Y$F`Kh8(GoK=GP5!;F)=W5(lN1fGjee=GZO#zhXmB1lc_nk zvZ(lfHwXI0M`G#X;=s+o;O_2D@6JLGaI#=v;^N|BU}R=sW~KwxpmX-LcQNvyvv(%_ zw+2x&XA>tY2Nx@VJ@G#pjf?@VE_@`Q&i>aZ>>U0_t-bSqTMA?|1`i_#1}1vOe@6B1 zigI%Q|5fem{-?IHi?Z4O_V@o&v9qeDgBgRenKQuE$pkca=A{2L<-jfCWM|OpXYx=*kBL7kL zA9t{G0JSV?=49n&W-9Ikup|E0n7OU~b6>drqrU%^HT}N~POcYeS_^Z5tyO zmE3-xKmoR`fIiP%>;Uth{S->zkl;#O-}~tWiQ~IrA;ICI|EqwG6DUX=hz1M!?*bVk zc%UFSDJtwg1xRsXR7pd~|1JDm>|gQ(f=1ZHqhDypYAXWvV84GyO#~0>rWXPyrRL>~ zWyyd-6roBk$PxIC?ko-g%%QCL1w|(Ib70t%j4?ky5xYbNbr#ERpYKIFqaV)q4L0E? z6nOC#TNbD7a?=L7KVc*q`j3YNe;9E0LeZqq4zH|cd*7|vxqUtFz(1}^K}ATf?w0C4 zCj9=|f(eYXHFF;b@fdwRHQ1&tAMPEz zOB~Ya5OH3w2RxTZei85^e~I`iu93r|L*Ls_qa_Lo4w!XAVG3+Tl5-AFSHWwRjIjp# zq1y$v=)CV+@0#?PzpaW;B|(ZgQD9gw=f8-71`LfeXfe;37njp^+0f+!;BHqq_O1r= z>ilG4On}&Fcqz1ECX-!l4Sq$>-07zGdw>O*R+X~`@#Ks_wR8iE7?zszYkj?pecJ~C z=oemDHpEZQ0H-U5FJf?-ixC|ml4Po{4&o3~rl5Jcrh-No**|jRjtM9E^PtxwW$wC4 zA{x2{XVg7}u<(0pw*pIbK+>?E6ii4-R=ezE+;M8)jiU4H_J&ijF>^rKQoBx^4-@ZD z;2z>L&6#ucv?3qP42r^uSQ=t=^z?qzPr+N-2@75b1sM2!u{`zHIpTCvrVvAwmd<;x zjc}UoY|Rg$#6b~nShT^``0k(KRUsz@8Tr3jck!6AWu#9z)bH>Fp~C|oX=3e;1>P}~ zTwGr@W{c=1judn-sz}1ec47?aS@Tu5Y0asbxm^~ZsmUtFlQrpcx?X$1zAP`39eU)V zM0@-qF`z93Yz$|!CDdt;=|(i$i+C>S`y^t4T+d^e`Q&+VTDT+Pd{|!B*_Aa=p33#rhz}`lfQ)1rA~fH zt!+x^{^_(&52LN2Tb=L{y7UD1{ZsID$ktwd^U>f2lD>XR)q0~fr_j+Z`91qb6uSMi zXjDfGq&{B<6nAt7dmhQU@3_&uPi|X+h^y=O~u; zR?vbxHgj2K&eWmgGwCsl-(ptfT=2R_|31ho4YClraF-QdMrsru4qSAJkD*iD{n`hX zq~beCAX+kNdQQr$RFi|uw|^>nbuH1`XnZ~es3?LW;x1Z1own2AxG7spGd>o`g3s}f z3&n$^uW)JLZ6L=i@u>oQ+*F4QERm6bh6Im%k)F(z<2wvf$W2GKLF0|0n#UxXE>xa9=ZICCU!(LpC=+_N2+m$``RQ1HY>M9UybiQj7gsS*eC;}3`fF9J3nXdqC>&58W!M<16 z2*d^&(4*4KwYxT4K!+i}w{nTH2|yLqP?Kl}Q&Ud=Fp25Vxj2NH7mW}!Ze2eHy9qCg z_Uq;EYmP|GQJ5gvwBxaZ@ob~zmKk#{Qg6_bkq*s-BC_$*5gK-c5*CAmuFd8&(G;9} zQ$OFVq#D*~RlI#S&Xd(|#e}6D(;9~=PQlc-+!9J5eky!x&i(=yP4!JPj15Ort!$K= z&uqCiVPcBLQ7_PKVziy%=*ISuKRnG$>v!PYH718Gb?I6_WSKNH%OFx`ypIqaRsGAj(UIR>JUJ1<$H|D-a@>EZvqg+9a{5?3Jw7nCS}$7;Zt zb^jMqaTUD!F>7yJoSK)}aM4FA+&-I?^;n-5736SN!bIs%h;()Xg{d%I*f1a`RR)JO^Ax^ZKY9)oGp4g>Oh~_z)cVRRIgY#j= zT_t{q@WFclnnlBlf)DlzXnvD`C${X*o!mk*mubqnq+gwAfwx`ma+Jtxjg`CYU8&7} zCI}Xq_Pc?nS^zHZXL$2_z|tFlw61H?}2ETUubDm zOG|4cQw_8+st#!1LRfGvoC52#HG_LPzkodufV=~3o>yk{>)+jl6nqWN=_X{WUyzd@ z{Q8iGoHR)Csy*Xmu;xSKZ9)4yd4i^E`+4K$1;On{&Ty6LAGf{UD}a6g`FF2eR0NUR zsou?7-ftL~@D}_#B4z8@14VNc;HwXu4{NnPWY0pt%-n{zGlx<#yKm%V2r^Dr^ADTO z7lb-c+mTz3gIphW!nfP#OLaX22A&ZwDxviC(Qs#ZOJuI=r(M&%Wrj<MLe6L97Yuv#dN8Fu}aTGe*8uthi7RC@rs4 zKNC!(`Org_aMhN-h5u+68?^nKv2dT!`E2P(x|+<6xS2l#vFs1&62&&B zJT5;f3}JQw?2k}rRCKr>z*~|&yV$U)xx&4?D;j6AB!im11x_B6xSWQbs0&ChK)u zEL(iCM@IM0O5^=re_LT}*(j&w9|?-(^dyQ;4Clt9L#t_*JtX0AZu``JiKf8^(6Qxhf1M3=IHPT>!k|T9KIfv zfhcTqppdYYE{_MQm9CDG>YeY!{*KmU)-2$4&BGAeg0-2I|Kc}l80RoJHJPkxU2KUo zn^Z7`rwle8%0=MN(H(c0wQCNxe(K}#(Y$xqw~XVYedbQ=sI)r-*e8$g!*jcLV0Mr> z2Yw$3j+gAY@WZj)aai%wX^=wDp{*5)i;Xf*FB^}qA;d2JRg>@5*%hwg-CR+6=s5Z* ze=oN-4&%WNEV)6!Kx(dem#zSa!BX5Yh#Nyp%n6jbU#D7_PL0G^Zg%&ce0%I!-KABx zYNO|8Xs5878Ql0*DaCRBS+P)|HE?K(rtB%EqxcL9JAUdqOX29JvKg*_+nCH4`-%vi zi1Tncti!rQ;J+#5=d51WZo}||rdBzE!O2-LeVzx{GIhoCOw|v}q0P@P^J+%G#i=0R zRGq2odWbW)!SaVsPm3R3?__lNK=hIdF6soxrnq~JzCyq}S->G_@GeaGym(B$X%*E` zM#-)%M;qNER7<7z2O0@OLk2T<2|VPI^*KOyv_Iws;MR@A*BA@`+*!cJ`B6w@o~zVD z2KTnL^v6waJ$dB$9AdBY8^J^XE2940Z$6GWh!APax34FrijIg#()ihv_vHM3;^rN*a&-0J9=^ zu6O@%c-0k2>n6(LVUq|_InAs8*EYaElxZ|A#{NEFCfDmBbed>stnGn*&GCD#h;Yhh z-Z5Levkw+yrI_*kum+~WJ@!)7(Xc*`fL8FAPNWyxQiKQopK_)RLB=RAK50A;PW5cw z4~P>t)q?6lHz*%3#po+%MT8kX*Mdq6Mb(~w;RYxpM>m*toPg~($O!}Zpv0@c8xba_ zH*vac*=ygg$xv}@@we@^hY&IgoKCdQiBRuW+-CWm#ptZ$e%$Yz3cw@#gm!Vc2a&q6X}zu%*2f`+%W$scHoF-{r8y zO=*-$MllhR1Zs(wC|(6mnMCrI&;4mx`Maz+ZJEry-Qnrx{ilD88it>;N~;+00_t<+ zGppq>cn8_9x>*PqkXFYRo+~KE!nKkKEL42c`bxdUQM4SrnTW`uI0;|)8Lcq-?86wQ zp|brL6uv88Y1YA^)aJ*wuAyA5H0<6LVbjkWF!yjQB6KsUP-@^)iHI$o(FE;YD*R(7 zd?{nwb(s4hejad6A5idY7y7(!#Lj?S!A8u5iB4~`r8WcjX-C>%d|tMN6D_;(Vj#1b zr78Ip#kEEo$pQL~5>siQuP|~mI2y+N>_hh=xBK}A##XJ$#s>B|_=dn4qIVOMRcZTg z@&wrTx5A0Fl59dD=(dQv`V&}^L8m=%XXe2e)+}CK#yaEM1lsGPdmfIH4=w!mV~Nxc zLQBMru!csoeLL+qzq75?Ee&mezQw(E8wK`Ie_1xsghbwY_fhh77@v-3RCm#10C4-Y zMJH?MvV;2>voCO?-GZFVbEW47BYO~{4M~UN%Fnd`!Bc2>KVRFGR)Zd%7ab=owLnT& z%8gL!>O6|8~r33I#q_8{880dE_)Iq+9&uDp0;f&ZF))bI%Fe zC$YQ`Zm?q zYPnAC_{<4!y#s#5ESTYfW zNXPnJr9>qXv5tNEB7;dxjLisi#EkSf9g|$Zkj7=>`w$)6ya|gebS~;aV`vWo2c)}< z&+S{q^OYg0q>9>!-2naNCnQ;6F`u6F5$w7rv!yvia zvEAcVz^EC-14NrKrVN8|-RvB(=^*xceXbFSJ{=CYQq?o-y!1ZY4FU{CO)b#Ha0k`` zlYZXH8m5>{nAe2+>U)mqk&)b9a3HodVO<;89uZ=C+s0YA^^u!G@y+u*jX=g4YsqW`p&HcM4yK>LBZ#)C+{D2KHDq6@$5;4GaN6`7NRxz-iOFA2 z%w0cfRqhn13NFdK>b%0_$f{6&L7$98^m`ikQrpd?IhmEq;IS5#sW)x5_H4*A0 zBfhmnaoxj`s?EZ|<2*|PIcr3Sf>K5{bwOHTg_frQ|4L3HTI|IwCzD%c+VRj zL+{AUmGc*w1kS6zDRQKYQ}&8l{gz>gU*~@_eI?jZn7(Ze)h(4)+wn+xkZ$x)8FyPb z`sbrC0_8jHmTywM<~6aQgOtfjmzKTW;r-_;C6k&gk#pY9&Mna0w6>MSub9w-^;g^F zT4X=>=`-!tyXLaE_QY5d6qIf$Whk}DyuG<3ySuc2C)&v6zm^zr}DEo^R zbduT#VU(ola+4ki%AqF@qAIE~QGr9jB7I6Bwt@HK^q$8vZFJf%@7yaA@Q_HK%9(w? zUkfHsgw*f&TqA0EWq%DGX}5nwpjva<9RX{b|t%wDO1 zOmO-mObB+s_4Jk}an)SzSDSmc1DgTwn99owCdFRWM(>XWYkcmDs{02zp$LQkDzvY^ zS^l<07e$sWw)?A4iUgQF_Y?EW_f;35m8lAcMnZ;f2?QV_VT);CFIA9MZ7pv^uFMK| zU85)-_Cw$_Wt;f@q3=NSc5kk6C!}OV%%T6A(49FljCWr!W+yJ)cGR;cWJsKSGVFJe zo}(&aa97r@DJ-@WK$K<|8{X->=5qSeXozC3RwPqi#KTt|ECUF~?itaBicx@SOHHxC z7Fas%z@M;TkFd@WZN(_j(~Y(C6K%eHWiAHvuV+-4Ake|L8;%KMS9$~e3sdtkp~7Ls*xRSg!b}?Vu6dJ z9}d!s-Wc6v^g7=X!zP3~KSuml!`t~yJC>U&9h$%{P3ziHAnf4DsxP*QuAqATR`vzn zO&o)Z3JouV;Q$m2;S2l0wO3o6g*C8SaF@$37uPPqT6Ga@X^_1`JrW+vQg)DHfha|K zkIt*Zes8jSlX5;~dM9~=Y>R+dZ{3I!_#ov-C6sc{h*z>l+;{cqc0lQ{CBVvt^_M)+ zm`-av8=oQ`UGdy1R@>(hslv)9uIFVSIfavkJ})~m|GgwP3av8E+>8I>DMxj2@9%E! zqwVete2k zU;Z8%!o#y50uycJr1F9GvsYtTWZc(92QOw0$$`%(+CI?Vk*?fPF)Fx0y(d~@hsSVdu~~65Wsz~F9u?y zt)BkjPw3=VpMlECo{BHD@L;(sC z84jYwu)}l0xgSARB_6x|!<}m#h;cC%M{dUxVPd?YL}D^)dE27W4I@mpiS z+ZbefXecK=Odv3t=zwVpAJ2J;R^7~A0K;#=SuH$_Y=9r5SxeT!h*aES87Bc%O}L`3Q)KK1u%P?(S z^cRUwOamIQ8MF*rNJsWRk73>DJh$YfeeP?Sihi|dNTTtE${aYg*W$$`IP4KVJg{}3 zQmbl9$c26g>Z15P^^|tA2@!R~a`y2=etG68T6@PDgnnI`gb4h+I5JeF@j`YmlS^nf z(oK{oIDf4FJGJ1WpS0FZ+J%vve`)B>_KP8_p<3Osg)m;D%UxysNZxBy)*P3*)k_RE zJ0MG8ORD~2cqFi}yP)up{+agJ-=K|p^jr%xE@^j7n_-DrfmW*$2!G}!CIP#9gxO$B5~R&yzl z{K1-foo!zRtQu}&Hx4dZTh+Bn-Dy)NuPF}_$xiUMTgk;Ch|nJy`?&(!4yNOXU$(dJ zg2b$s@4~OU>6>-|-HM*oPBD@Q&p+H3ys5r+hrlWJm`7k`u46M z!2GsT6Ce63Ga;d8Jp1nZtLwFBq<~lz@_Q-I#K|UlIXWYjH$pk6c~!k?u?(ix4r zEKHfRHkw%cUkBz7G)`Mb8rTU|A+ou0BJ8%tRu8^`gNITlXWaI~$`>cI+K`_ucaV%a zOot<8v{}*@FST6oF&2mjkB!BLJr3+2{CW1Q>PjB<8I<4C5XD1+Vn7H36S=*s(SE0B z*O~NA{l`{r`0>n#vP0qpW)&Ll+5_zudKyC}2=2%BB?iLy+(F^U zS5V|^t)-bEpqptza|bP0C%xw+ySsImgpq&ndbn)TN|HbX2Y2aI;_>4R!VoDyOTkHl zh0#g{1%CVEf}~cMj{j`E@P4l&bb~Q8)97Q1eg6tW4THP~gWKNz8rL19ZO+z`pWXl= z@FINt?zw{Ei6dQ@5@m7YuYLM(A*Ock>&)$+FV+INBbhicmInm1yb|iWPv;Z$um-0N z6!S4hD<1R#Se_1~{TieOW;)dCN~|ZQG5rIhEV%z{HT#D*<9!xWwL<&cYd&`y)@cFs zSqRrFkJ+XR*C67ux^D3IdiZ3uBR87C{gqYu#AhsidhO%5t0aV7e0m7v2bcsBG1SN6 z*>F3CNGU)qAXlWY$6^e5ts2hM+yne_9=%xvK{r9`$#| zqv9L?ZnK!@w{7fS#)L|=&wT$5v{LsiKAut7H}B(Km?MqxcE@?JB_FK;UkPQB&6v!L zZ@4^6>C^Kb;dVKooMoAXO$dQU-kCF}o*x8b$G!O6EIyR6NNf8sn{pwv@@q0X!0|x9 zM*roPMTazB2`yw}hV`?6&jbJznsmtg$evT;Ol3Yb<>Q#|M=&R8h ze@dlMq!iHw>z=sO(6s3K_3ERoDZV1nK@|zDtjSJ^q}$tsB(HhS0~UtqA5CfFh#q~W zf{d7iI}^Qn!q+7l$njq@GO}I|GPlKObceo%2zQ+jWiVtO%5A|I$bCo0eXzatHER!d z_$q1lAqk(#6B%SRF;yinI&uBv;h|a&x@x*x;=cp!Mo#a{?Jgkjmh09Wita5CKCa-h zh&RNyj!+*P6}|MJ_hfy*nXf@S4}EApx#5D=W==}AJQn65sD3Q_HkJd87P_J#Y0b=v zY0P{oO^R}p3i0{s>;t3fgyVhZ1TBf$Vb=75du-JeZK?9|;E(t0)MtVM8bm50564-P zor6qk8Jo|L91W-1u@D_|#Qmt)`(oE`z^9&iO8EiJ_{_NVNo5+2p z{>yi7lN&o;c^q{#liO?jL%jIhXuR>{{Y{(dvN-eJx#0Dw)*7m zwc7RO#3rBG@F(C_JxWMm&s$60=KRTyJ_%Zu7p3zl&Wl@qlk$S;c-p7mMqBkH?4j~q`utWknK5pzJWra=_9os z8wRKGLu^>@5*I8Ccz@V%$0Wig;6K$>oOtdq_B;p54U)O8%}|&BVkzB(qp(NpStN3; zbvv@;nIRl{P8e|@<0!=^?ySP1kM4sY0bvwxSduFnu5ZW{#7|$j-5=4tqceZdVow!x zy;Ruur6(Syam^*XTUGOBeD0y=*;z1iH2o$(adTdZ5w@iB7q1O(TY)ZXYga-B zFAqc+KU3qe)5MZxQ!n0eUuf|`b4k5_!CbW5rZkCs?znn^;MXefGB2QUO?P{I?!4Rh z`hl`a9T}1Ae)7X^CM>cFfq+)9n-Ea z%rApqu-0jg+OEKo&zL$g*W)$e_L}y&rfrsb;35m!BC0m(LH$t{TQTx>N*-tzY%FCf z!oPf$)F7T{YmdCVqyGan3{9GTnj>Moh<1N^fk!;7;#S>U7E%kIeLasdp9 z1%yTe$V|uNgTK(l+I7RwX5!0UOOR|e?=pGGUa`<4lE&ImT~5o*7LXG?s5~z>3hoN# zGil+bI8%a-Fu&vpzdWwFVTauzp)OQfWK_|-SbAkW-+1bR5l3afHW-xT6ypxhj_o|#q4jH;Q)7HV6A&iUqjl@o_DfoJ+Z#S| zXCcw16>+}REM%=ImV-zA^Ve|VylU=wwjUeRs{c5XW$@Vo}&R~_1~IKC?j zoha!8-Rl?_3Sfca1wzA$D8GWCdd8H3mq_T9u5rX(%ccDzB5DKzn|P1uADX&}19lsG z4@<`^5qVgd=x@sdz*i-e`Y>;a%r@qh9}KU+VvN2=fv)cftKn2+4)R#lnVx-ePM;UI zT;S#$4_Wp5#WguQ`n^)ue~CsLi#DY2G0E0=+WW(UC9-r-NdGX-%g)K#Be&PQZqV)* z!;iXS5-L!xm0omfQZO8C#)Qa6g|}D9*>3G3LgP*pmpVXUCfZZq{dLr(fJ<$)n-2Ix ze{k?D;>iaE?&GmdfSF;KxQvN60W2!ARiF<$@N#2zfTq3nuoA=>{$kO`zAYIaKF5d%?f;=q$6yFeyE zhXs*os(6X}mHPtqaH~;3`Tlhw=&m?}hUW^2O7 zpu^sd?~^r}_x=^BCD|u02=eser1krxwg?dB>dcplKC+Ht?(Eq~;`iCX-pE*SyFp&ZvQj4If zJnA4&0z}XGIVx?4%n>C_JPq@Iv5Wp61^;3f{YO&LzdN9$BxHoG>c)r&Oi3AJwsnxK z5heMt&;TMm;l)r86AMSH1dK!#R}qxFx8SHtLV#= zi=KD-8I24y?j$6cw)Tv6Bic-@rO7D;f=)nIuw<+`lq+rn||hIV2X#{JZ{eq7a5gI(@w@%!ap`~wph)-Q2Dw7SZDp` zwn{M~KLj4rf<0Lq0s`(}^fgAgETCf(9S2WrF2&|L<*6>+Af@z0;J2R%9wv_Jlig5k zR+)Oa*TeYASSkaD<#hKLca!}_FI@Ac@5@)GkI5_^#0g7R?n?1jVm*82H0Ig4P+=o; z^OONw@$~P3L`51xsKyb3v_{T#bpn~AW@aUW9dbzu3z@%vQ5E8#qi=8Z#tq~@Tcw@! z?*wo~kt3zAb|Jzcoy?ccl_i%q+HdZX2Ld+QJ$IL{kEv()yzZ#p97dCV@bmL8lxalS z;64&h!O7ph68SY-?G09Kv_gf4Z2uAl7l0>;>d%ayr+=2k(Gyv5muykhLUxTO%Tv~= zHW=}{4b83gN8i|ER5ZItZ}5IN$Gu%G_w(~3SFdxw+^oO5%bViE#`&uGWaMGm4^pm3 z-?7vxXY+KeNX*1^-5ZK9Yple9YFnRdKUdUHjja&qkqFST{YjRkt&-bf{k)gffojLe2r#Zc~1s&jOoWW%!K!*ktA;W9|it0zEXJB*DTv z!H0C(tX8Xc{nvZji$QAV&-Bp2Mn|z(#Xxq)$Ix0P6D>u~}zmJRaqZ8Mh9QBh~} zrKE`%aOV*K@JQu5LfbfY=lTUNnjWW}B)I|16O24j>t;W-F8^@rZD)HM6P&!bYU5*> z5Y^MzzRUrVGF%nmavX8p-&mNq6Ixcp3>K`+D`|HZI@7$u)Xs~C%4sLsJ|ODJ$E6>< zKR7;*`-#ufblwC$N3!JhlT7Ml4v$8cP_WniDPI|fB2Cfa71|VoPOB^hkSkJX`*xXQ zD~aZ-q&!&-40s(R`Aybc6;=Ld2a4uKN|&=hnmK}Ef*7Z4A_#xuMg3)Bl{Y#B=#>?w z@Xr-blN+@#P(>l~&6)A!I;2T;0rB&eJCak8#nfsQtLE)Af8+6st5!n4t#nAlsnMus zaye6c|DGovgGXq8Hx-6N0W3A>@{{K^!Wqovh$s7@(d>sBBMwYVOiWAY`BPC{ZJ_9& zC@J}Kthou-kXfc%l-`_7<0*f{(srwqbb0eSb(h2L z&7!wJN4ZE6vQP{Z8msh(w33d*~F#fLHT{(U~Hc ztI(Gp{wmhN9Ms~OrBP{L9Q)JBCaF+zJmu~_waJm@=?%jvl({yLADR5$h@#rRRv>S$aM)c!Ixnx$&JR3Dya z|J$P<$&bGu5jvlYr>gi^w$f%QCE{!}s^4$MriNk(+cMIvS#h?JpkZKw<(M8S9b@p= z#K8q3P{YK~23DxZG=C>FnTQ!L-I4!b7odCvp7`Ycawey)P|sn)5LL5D;&M3P*~6H7 zR!J#OeS&Q3=D?3T<+ZK?tEv6!8@=_=QiHT<`eAwfPhgiSD#o3 zDLyIa7j4j%`--WSO0S>E?KygXwq#;#?S8RH$ID9qn+ePUrl%(+{kpqI1Z)?TFTFv( z{b(00U)XH+lLO{lYNC|<#RYWvzCPHs=zgqKmwVrzu-vsAbrVRo0Ko~Me{Yu-XIrl~ zUmrX@LA)YA!9bpGc34bO)>%xYi6}$Ok}+^Rn)f2r4g7goTFCKz`SEjp+3Q}L6;P#G zntOwQO%JO6+WUNa%l}#78;&fwO^LjCj+oz!J?7Xg%hz+j_9JX&^4+qyk0!Hu+|D(N<+F6tzpXXeL#VA( zdUIeWN>V`@Xsf#P>Q*mJz241bz`<3=`9jS9R#6w%)?UpEhI#zEVp6LaIZB{Rn-Y(_ z$hLfhfKA)%_}CsxUaO z#iHUiLIB)qhc`#m!XNJ=`qbnAH*WX8x)i7N%Vr8(9lK!@+)^|N!?C{v>&F1qi zoDCvVktPFI;pO0Hx{9z$XG)u>`{ZzcSZ|3v$6P+_6G-DJb_7xoQ#@R(MBzO#Vd)Ap zM%;EjX_h>EVRlwW$V#_uami-Ghm)pt_!F!pK3g*I+h48Ik6`(V0HMM)k>M@;5o~e? zWz3(m#^2xCntHiaKG}AwlaGPn@)>Q4rXBg#+`=r7ze4GE*tG9W+He$YQ$yG1`!ON-lG{Vrx$?N;U3MZpyo_&FGTnbxM#Pp8UgT7N zH|DFvNo<-@2q+jb@l^Y7dl>EiT7t{ATWhrA0BpWp_J~X&(*0@tPDu-#;QdH-&( z%S(iV1?q)}J<7yH^xwbBQ!1}>g{$>DPKTXWr6oebIc%3v{QwKCY;VhSK395igk11l zUte~v#;US-F430FEj@Hy)H=Len`doV`Owmjr5-%sO3yo!X_7G&0Ealx{5QBfayY1( ziE@)e^<2T511LUKbAi47-HWGF*<_l7E3HSI%G8+L{pHLqGFo-&beESpm0tIt%~iL}&F-dO%D>9MpJk^=rD2npkB*LoCCcfitq!gEmF)$j zn~P{RK6`~-o11w!qV@EfmX;O;aAYRy!GYn5bldKi%$DSQcU)=);%ThW0>+logLi94 zBC7}#^mA+mg=#5R%0E1Ju>}*t0w&tNY40W{v0}HUo{8z+8p%BW_`UGlK+u-$eZ4zS zu2IF>JcOEJ=5D+71@TJ`kZMAiI!%`StPiE)QX6C^X5ZB{p+F(EGWAmZ4rq@q*Ruh3 z`+2J2?@#B*xM^w7S@LdEjL>~wGS|!}GTIO+SZ(J~b-^US@$bB@QV_oFC05Yvj^Gwb z8ggWs;y5;>pE6;90Mh#+*6+X{UMODOqR1%IK3qdSAN9GK{8&_^QsMTZN^3n!Q%xd6 z$XkB569V`jKgb+}g~1tXq?4&DSL=UVZ0wYgZ~1y3gH2n14n(E?CEMciKBO%X%Ej_o zD6(5y8tO#D!aB@@j#}x(WP0OjRzDv^hZabol9p`7ewy0s;v>5P_9%PPvr3^2NF=Tl zcz6|PoYUbj9gy5}<(EW_X;m)vvZ7ZlrLDS zS!4SA69%pVVNh_>2T4{sdZ)!psTlK!~C<)Joj4No|rtsP8=Cw zkR*(`euj*cR4K_kpOU%z8BXbUAQN&n1GfB#uR?E+str0e^UZYU%;m06bmuNfHxgP^~VRBucgF!S65YDxn zQ`Ae+Isj`RPuKIjs`fr`kjEbCuB1Vv&;3`K0{WQlhx$>(a)Ihkg%W# zn+8n~r~7hV}K|%N9Dk9)+t~ zA?<0#L}tr^ot7|h77Yh>myXM#1L-gvJdIaX!j*PL4d&e1Ad=48m zz}|6NmN{M3bSL{qXxGy**)LZ63xo<^gOZ$G20iPAKW3U9nz;5EiHXcN&uE~P!R7eo zA@0?>r-6%$d-nOXHx?RY%Dgf+mBhGGq)NS&&Gb!DUdM{^0I>{*I9WQ&JU~`lrQ_xP z9B%d}(HHPNI1ycSkx)=RO(zNbKVQA`xJepgFAz-N^jyvB~s8mOEznsmh$!hOeLTpRnQVlMKPr|!b}AXe-P=t ztk>+A16#~1)8#TaXlmPKsUcz)A|o7iRY3M{xhYl)M<(QUdUEjbd0U{-n9Sryib<6$ z>^@?u?qGMg%Ke3N#F{)EKW#pd9va3iaSkoOCz_`^p2H!)M~75jC6luv|1lVY@6T(| zI5+tZ*df>un?#R%m6j))^vYE^H0t4V&f&%|l$)CCRP+>}kLlF`N1r1lj`}w-t($aW zd>oPv2@A2gIi|hO0J?KG9*o>b_+px8t`GgG-%Z@-x@w(>_VyZ;L5AM&^@ zWVb)l0O*|vVpL3Q&X|z@eTOXxUO{@f+qjlM!0mkS9$y@okY6%9JS-lA&ts39oSp5r z(+gF$Tu&x92a1Js>%F@m+(CohfS%Ak(73_aSeh4M$(Egn&uhN1J{c%UMyJ!5FZx){ z?R%;!Z%^No<3MeQ97%^=P-JMQ%^pM#7I1OnPDq5D`t&_-w1dh(b? zA}GHm8EwSL4ULP7+tY(2&?uC4k-Z3dUc=|G2YDJfnUpeBw9U%wMoN8Xv&;4FMw`17 zp=wk!*2B%TswJb862VY|<)*HhE&8x2^EV!^Br`2b3sqAPo}I1kwbAN%yFUU1;a%YJ zsJNI5Wv(>E)2u!jIjNMyu*u4BD3&?YZ7wIX8}q8tWV6-{(PREdtao3RJxV)^X!t3{mw3$|yK8W3Q^>l*n!75s|QE?|=~X3VnX zdvq}Ko39CS@O6qt<6^7JfFik;;I%IjlTN3BHlEnZ3S&qi;2NHch~38{qtOnkQLOzl zg9qCl=APm6Iz0B#EmE(^?V>E1Mvr}Oth)1bqnP6jIV6+QC`gl*9WRE(e6pUBEGRRb z*HKwq9J+>^p8oHU8qGz^kO94Jz1^AmccJ>9Q8pI?k)O!|RZp*%T>~VwU0qwJ4P6pq z*D$(oEgw?{TmFElpKo>(m-nYj?yc5a*&P@|ao93s>g5e?7pnr*9}8A=dEY=U?#gt& zSPwmX^K5A$Ryn_<;pH_xuE`@6kENAO(SI5U0!j6CoGw&9uBEdPqki(;UCxgH16!|# zE43RyuXnMfPbU-RvJ@+1xqYmAdh`^?Nv=^mMO0K&98*7HQdqKV2h&k%940+oGh2Jq1s;@#lJq+!4`R^jRIqz=pE$d2K z;8O4zJM?;au50WK)(b(Ax6%x|Yn2z=oQt0&Dx{w;zzK}Iy=n4vU@ih7iSVik*Fig`RB5JFg_@4THYO-?SI z^}a~cF?0_-29Kjmr^(*Ohc}w5#vBOPiY-%yVjVn;$q2}2{NQf1zj!t!l}PwP`r2YN z_5Ad-Isn7^Tf3p`0T4(wT>v_$L3|G^F}x1iGt|%mUlUjH>LA?5i+`Js0OFqDDyMz@ zxl!fYHfsfzS2Ml&F#ZNQQ3w5i%!iq;o_L_(c)Qn_+UyV&EB)yEb5Jaw31$VZ_k~_+ z&Fvz$B|MIx#o@!Jcrxo^FjR%IM6Il#zdvoR=;-LE@9bx(p7*B3Z4|&L2C^y%jAMk~ zbVn}u42rZd9lW(-w%&^bv-Sr`OR3Iaw!AYe?ef6{~gwc zZWliT1NI9MtIvYXQ>v-(sL2g76Mslc}3DYt1CMa6iT z4sGMw{e9Em9#jA%1qaj;Z^lM;6)4m*{thyAIj( zFilQQmg#r2_D_V3KR|<`J8;zWDqc~bd3_)zJu@?NgU7w6QmEnZaKHWf)J7Z#*NPw= zwDhu+c-`L(?5bO5kp-;Mov!N8)Jkx0agmhjy4Y$>3h<&)yd_@i~e-o z6ExKYi-d5oT1oR1;+5sM*{;B0{{%D6S2t2Icaj+f9-XaTsS_w=y99|S0zzSVvewm% zYXuf7U!8V@fivsHfSk0;4jo;{fF`&-zt2wJ2TS&74Mx;e_hVE%^+-9ETqHxd;Fa<6 zId5?xH+A&@kvmLQM$MV4Uk}Y_mV3vRA79VIknlcg-M+0a5w1fk&HkUQDwRH7)^zmu zzv}l-pFMNn{cQjH&he>sEvsw(*4WScyK#R#!=If`Rd*;NaVa`AnNrKR>%4{-4c8{?nJs zM{jbA`pz^f{kzNlz^R7aYkm~E-rcn^c6YfmlRcjYFk(t%BRWs-i{@G<((~iWl#t(V zGJJe|Ql=_towiH<_^*2NiR#P!Zj(1AxA)E0{c-4bl7o;$Ze`_9hjUxAuYY@aU3^E> zo}2DWc6$8UJm z^|Ri;9g=ZIT%Tdyp=rLeU8iv5tqohj!JnxQ0(i4wrncrKl z$J-bGGS&8NPGP+L1X!+pnt0+*%@1LgF1N+3-G^7!RrH_dljSPPeDga0hB(jv{C6=M zu6Ey62F|jw7yLQU!dy9j>eQ*zrls|7Jz&kYzT(jn&r{XyUmA-H?pQAoJ{Go6J1{)qk2XNTsJu|+wEk7DqWuN}|F=4@{SKDiU|N8n`^mD%Mx7&_? z`~K;M#9iR&D>hMDAQ5nD&rtkO4IJUly0(flk5xe0ZcXIsu---A ziZe21{7q~OX=G&dD{bOW`hP0HM8s{a&bF|R-Jf#PHE-D8*<5|T^YQV6$F99B1$Kya zZC;Ac`unJ3;&i>(N6c6Dua~sQm~d&^=i->t)8uPEeN6xS>~M7Ryq5a6ae5}e9jLp2 zLxDT)v)z8{@aH8j+XNS2W3DRs%?;N1^?ch(f`Trcd3kw%!W@coYDO29>1r&*yUt&2virGpy@4(IM1* zaKr3h-)?j5bXtDB`MvDpv?MpRse8YC(kO6TD%5#1N%H5R6OZmChfLK122vvfyV%hy zeTCP1Zk1+Rv+vzqVp;o2_I zY|X#FF!gnqU+J-HtCwHSbkt^g&Go!7``DGHuuj(`yS*Rdx~{&x?HRkfbg@f7(ASXl z^IA<0w4Mjn`pLh`Izy6-W(f%i0V5#g|El=^|A0}!w)^F=udiEcnf}NaWHhW_Kfk&B z!Ws5kJ_4buLNgTe~DWM4fH3D4; diff --git a/opencv-examples/xrp_examples/ex01_touch_screen_drive.py b/opencv-examples/xrp_examples/ex01_touch_screen_drive.py deleted file mode 100644 index 2fb921e..0000000 --- a/opencv-examples/xrp_examples/ex01_touch_screen_drive.py +++ /dev/null @@ -1,169 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex01_touch_screen_drive.py -# -# This example creates a simple touch screen interface to drive the XRP robot. -# It creates arrow buttons to drive around, and a stop button to exit the -# example. The XRP is available from SparkFun: -# https://www.sparkfun.com/experiential-robotics-platform-xrp-kit.html -#------------------------------------------------------------------------------- - -# Import XRPLib defaults -from XRPLib.defaults import * - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Import NumPy -from ulab import numpy as np - -# Dimensions and properties for the UI elements -ui_shape = (240, 320, 3) -ui_cx = ui_shape[1] // 2 -ui_cy = ui_shape[0] // 2 -button_size = 50 -button_cx = button_size // 2 -button_cy = button_size // 2 -button_spacing = 75 -button_shape = (button_size, button_size, 3) -button_color = (255, 255, 255) -arrow_length = 30 -arrow_thickness = 5 -arrow_tip_length = 0.5 -arrow_background_color = (255, 0, 0) -stop_size = 25 -stop_background_color = (0, 0, 255) - -def create_ui_image(): - # Initialize arrow button images. This could be done with a single image - # that gets transposed and flipped, but ulab's transpose() doesn't support - # the axes argument: - # https://github.com/v923z/micropython-ulab/issues/731 - # So we instead create separate images for vertical and horizontal arrows - img_arrow_vertical = np.zeros(button_shape, dtype=np.uint8) - img_arrow_vertical[:, :] = arrow_background_color - img_arrow_horizontal = img_arrow_vertical.copy() - img_arrow_vertical = cv.arrowedLine( - img_arrow_vertical, - (button_cx, button_cy + arrow_length // 2), - (button_cx, button_cy - arrow_length // 2), - button_color, - arrow_thickness, - cv.FILLED, - 0, - arrow_tip_length - ) - img_arrow_horizontal = cv.arrowedLine( - img_arrow_horizontal, - (button_cx - arrow_length // 2, button_cy), - (button_cx + arrow_length // 2, button_cy), - button_color, - arrow_thickness, - cv.FILLED, - 0, - arrow_tip_length - ) - - # Initialize stop button image - img_button_stop = np.zeros(button_shape, dtype=np.uint8) - img_button_stop[:, :] = stop_background_color - img_button_stop = cv.rectangle( - img_button_stop, - (button_cx - stop_size // 2, button_cy - stop_size // 2), - (button_cx + stop_size // 2, button_cy + stop_size // 2), - button_color, - cv.FILLED - ) - - # Initialize UI image - img_ui = np.zeros(ui_shape, dtype=np.uint8) - - # Draw the stop button in the center - img_ui[ - ui_cy-button_cy:ui_cy+button_cy, - ui_cx-button_cx:ui_cx+button_cx - ] = img_button_stop - - # Draw the forward arrow above the stop button - img_ui[ - ui_cy-button_spacing-button_cy:ui_cy-button_spacing+button_cy, - ui_cx-button_cx:ui_cx+button_cx - ] = img_arrow_vertical - - # Draw the backward arrow below the stop button - img_ui[ - ui_cy+button_spacing-button_cy:ui_cy+button_spacing+button_cy, - ui_cx-button_cx:ui_cx+button_cx - ] = img_arrow_vertical[::-1, :] # Flip the arrow image vertically - - # Draw the right arrow to the right of the stop button - img_ui[ - ui_cy-button_cy:ui_cy+button_cy, - ui_cx+button_spacing-button_cx:ui_cx+button_spacing+button_cx - ] = img_arrow_horizontal - - # Draw the left arrow to the left of the stop button - img_ui[ - ui_cy-button_cy:ui_cy+button_cy, - ui_cx-button_spacing-button_cx:ui_cx-button_spacing+button_cx - ] = img_arrow_horizontal[:, ::-1] # Flip the arrow image horizontally - - # Return the UI image - return img_ui - -# Create the UI image and show it on the display -cv.imshow(display, create_ui_image()) - -# Prompt the user to touch the screen to drive around -print("Touch the screen to drive around. Press any key to exit.") - -# Loop to continuously read touch input and drive around -while True: - # Check if there is touch input - if touch_screen.is_touched(): - # Read touch coordinates - x, y = touch_screen.get_touch_xy() - - # Check if the stop button was pressed - if (ui_cx - button_cx <= x <= ui_cx + button_cx and - ui_cy - button_cy <= y <= ui_cy + button_cy): - print("Stop") - break - - # Check if the forward arrow was pressed - elif (ui_cx - button_cx <= x <= ui_cx + button_cx and - ui_cy - button_spacing - button_cy <= y <= ui_cy - button_spacing + button_cy): - print("Forward") - drivetrain.straight(20, 0.5) - - # Check if the backward arrow was pressed - elif (ui_cx - button_cx <= x <= ui_cx + button_cx and - ui_cy + button_spacing - button_cy <= y <= ui_cy + button_spacing + button_cy): - print("Backward") - drivetrain.straight(-20, 0.5) - - # Check if the right arrow was pressed - elif (ui_cy - button_cy <= y <= ui_cy + button_cy and - ui_cx + button_spacing - button_cx <= x <= ui_cx + button_spacing + button_cx): - print("Right") - drivetrain.turn(-90, 0.5) - - # Check if the left arrow was pressed - elif (ui_cy - button_cy <= y <= ui_cy + button_cy and - ui_cx - button_spacing - button_cx <= x <= ui_cx - button_spacing + button_cx): - print("Left") - drivetrain.turn(90, 0.5) - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break - -# Clear the display to remove the UI -display.splash() diff --git a/opencv-examples/xrp_examples/ex02_grab_orange_ring.py b/opencv-examples/xrp_examples/ex02_grab_orange_ring.py deleted file mode 100644 index 0952263..0000000 --- a/opencv-examples/xrp_examples/ex02_grab_orange_ring.py +++ /dev/null @@ -1,212 +0,0 @@ -#------------------------------------------------------------------------------- -# SPDX-License-Identifier: MIT -# -# Copyright (c) 2025 SparkFun Electronics -#------------------------------------------------------------------------------- -# ex02_grab_orange_ring.py -# -# The XRP can act as a bridge to FIRST programs, which includes summer camps -# with FIRST-style games. Learn more here: -# https://experientialrobotics.org/bridge-to-first/ -# -# FIRST-style games often include game elements with randomized locations that -# can be detected with a camera. The exact game elements and tasks change every -# year, but this example assumes there is an orange ring in front of the robot -# that needs to be grabbed. This example demonstrates how to detect the ring, -# calculate its distance and position relative to the robot in real-world units, -# then drive the robot to grab it. This requires the servo arm to be mounted to -# the front of the chassis right next to the camera, so it can reach through the -# ring to grab it. -# -# The ring used in this example is from the 2020-2021 FIRST Tech Challenge game -# Ultimate Goal, and can be purchased here: -# https://andymark.com/products/5-in-foam-ring -#------------------------------------------------------------------------------- - -# Import XRPLib defaults -from XRPLib.defaults import * - -# Import OpenCV and hardware initialization module -import cv2 as cv -from cv2_hardware_init import * - -# Import time for delays -import time - -# Import math for calculations -import math - -# This is the pipeline implementation that attempts to find an orange ring in -# an image, and returns the real-world distance to the ring and its left/right -# position relative to the center of the image in centimeters -def find_orange_ring_pipeline(frame): - # Convert the frame to HSV color space, which is often more effective for - # color-based segmentation tasks than RGB or BGR color spaces - hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) - - # Here we use the `cv.inRange()` function to find all the orange pixels. - # This outputs a binary image where pixels that fall within the specified - # lower and upper bounds are set to 255 (white), and all other pixels are - # set to 0 (black). This is applied to the HSV image, so the lower and upper - # bounds are in HSV color space. The bounds were determined experimentally: - # - # Hue: Orange hue is around 20, so we use a range of 15 to 25 - # Saturation: Anything above 50 is saturated enough - # Value: Anything above 30 is bright enough - lower_bound = (15, 50, 30) - upper_bound = (25, 255, 255) - in_range = cv.inRange(hsv, lower_bound, upper_bound) - - # Noise in the image often causes `cv.inRange()` to return false positives - # and false negatives, meaning there are some incorrect pixels in the binary - # image. These can be cleaned up with morphological operations, which - # effectively grow and shrink regions in the binary image to remove tiny - # blobs of noise - kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) - morph_open = cv.morphologyEx(in_range, cv.MORPH_OPEN, kernel) - morph_close = cv.morphologyEx(morph_open, cv.MORPH_CLOSE, kernel) - - # Now we use `cv.findContours()` to find the contours in the binary image, - # which are the boundaries of the regions in the binary image - contours, hierarchy = cv.findContours(morph_close, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) - - # It's possible that no contours were found, so first check if any were - # found before proceeding - best_contour = None - if contours: - # It's possible that some tiny blobs of noise are still present in the - # binary image, or other objects entirely, leading to extra contours. A - # proper pipeline would make an effort to filter out unwanted contours - # based on size, shape, or other criteria. This example keeps it simple; - # the contour of a ring is a circle, meaning many points are needed to - # represent it. A contour with only a few points is obviously not a - # circle, so we can ignore it. This example assumes the ring is the only - # large orange object in the image, so the first contour that's complex - # enough is probably the one we're looking for - for i in range(len(contours)): - if len(contours[i]) < 50: - continue - best_contour = contours[i] - break - - # If no contour was found, return invalid values to indicate that - if best_contour is None: - return -1, -1 - - # Calculate the bounding rectangle of the contour, and use that to calculate - # the center coordinates of the ring - left, top, width, height = cv.boundingRect(best_contour) - center_x = left + width // 2 - center_y = top + height // 2 - - # Now we can calculate the real-world distance to the ring based on its - # size. We'll first estimate the diameter of the ring in pixels by taking - # the maximum of the width and height of the bounding rectangle. This - # compensates for the fact that the ring may be tilted - diameter_px = max(width, height) - - # If the camera has a perfect lens, the distance can be calculated with: - # - # distance_cm = diameter_cm * focal_length_px / diameter_px - # - # Almost every camera lens has some distortion, so this may not be perfect, - # but testing with the HM01B0 has shown it to be good enough. Note that this - # distance is measured from the camera lens - # - # The focal length depends on the exact camera being used. This example - # assumes the HM01B0 camera board sold by SparkFun, which has an effective - # focal length (EFL) of 0.66mm, and a pixel size of 3.6um. We can calculate - # the focal length in pixels from these, which were found in the datasheet: - # https://mm.digikey.com/Volume0/opasdata/d220001/medias/docus/5458/HM01B0-ANA-00FT870.pdf - focal_length_px = 660 / 3.6 - diameter_cm = 12.7 - distance_cm = diameter_cm * focal_length_px / diameter_px - - # Now with our distance estimate, we can calculate how far left or right the - # ring is from the center in the same real-world units. Assuming a perfect - # lens, the position can be calculated as: - # - # position_x_cm = distance_cm * position_x_px / focal_length_px - position_x_px = center_x - (frame.shape[1] // 2) - position_x_cm = distance_cm * position_x_px / focal_length_px - - # Draw the contour, bounding box, center, and text for visualization - frame = cv.drawContours(frame, [best_contour], -1, (0, 0, 255), 2) - frame = cv.rectangle(frame, (left, top), (left + width, top + height), (255, 0, 0), 2) - frame = cv.drawMarker(frame, (center_x, center_y), (0, 255, 0), cv.MARKER_CROSS, 10, 2) - frame = cv.putText(frame, f"({center_x}, {center_y})", (center_x - 45, center_y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) - frame = cv.putText(frame, f"{width}x{height}", (left, top - 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) - frame = cv.putText(frame, f"D={distance_cm:.1f}cm", (left, top - 25), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) - frame = cv.putText(frame, f"X={position_x_cm:.1f}cm", (left, top - 40), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) - - # Now we can return the distance and position of the ring in cm, since - # that's the only data we need from this pipeline - return distance_cm, position_x_cm - -# Move the servo out of the way of the camera -servo_one.set_angle(90) - -# Wait for user button to be pressed to start the example -print("Press the user button to start the example") -while not board.is_button_pressed(): - pass - -# Open the camera and wait a moment for at least one frame to be captured -camera.open() -time.sleep(0.1) - -# Prompt the user to press a key to continue -print("Detecting ring...") - -# Loop until the ring is found or the user presses a key -while True: - # Read a frame from the camera - success, frame = camera.read() - if not success: - print("Error reading frame from camera") - break - - # Call the pipeline function to find the ring - distance_cm, position_x_cm = find_orange_ring_pipeline(frame) - - # Display the frame - cv.imshow(display, frame) - - # If the distance is valid, break the loop - if distance_cm >= 0: - break - - # Check for key presses - key = cv.waitKey(1) - - # If any key is pressed, exit the loop - if key != -1: - break - -# Print the distance and position of the ring -print(f"Found ring at distance {distance_cm:.1f} cm, X position {position_x_cm:.1f} cm from center") - -# Release the camera, we're done with it -camera.release() - -# Wait for user button to be pressed to continue -print("Press the user button to continue") -while not board.is_button_pressed(): - pass - -# Move the servo to go through the center of the ring -servo_one.set_angle(45) - -# Turn to face the ring. We first calculate the angle to turn based on the -# position of the ring -angle = -math.atan2(position_x_cm, distance_cm) * 180 / math.pi -drivetrain.turn(angle) - -# Drive forwards to put the arm through the ring -drivetrain.straight(distance_cm) - -# Rotate the servo to pick up the ring -servo_one.set_angle(90) - -# Drive backwards to grab the ring -drivetrain.straight(-10) diff --git a/src/opencv/platforms/common.cmake b/platforms/common.cmake similarity index 100% rename from src/opencv/platforms/common.cmake rename to platforms/common.cmake diff --git a/platforms/include/rp2350_unsafe_cv_xadd.h b/platforms/include/rp2350_unsafe_cv_xadd.h new file mode 100644 index 0000000..a3853c8 --- /dev/null +++ b/platforms/include/rp2350_unsafe_cv_xadd.h @@ -0,0 +1,20 @@ +#ifndef RP2350_UNSAFE_CV_XADD_H +#define RP2350_UNSAFE_CV_XADD_H + +// Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 +// TLDR; OpenCV uses atomic operations for incrementing reference counters by +// default. However, the Pico SDK does not support atomic operations on data in +// PSRAM; attempting to do so just causes an infinite loop where the value is +// incremented forever. The workaround is to use a non-atomic operation by +// re-defining the `CV_XADD` macro. This is "unsafe" because it's not atomic, +// but it *should* be fine since we're only using one thread. Also see: +// https://github.com/opencv/opencv/blob/52bed3cd7890192700b2451e2713c340209ffd79/modules/core/include/opencv2/core/cvdef.h#L697-L723 +static inline int unsafe_cv_xadd(int* addr, int delta) +{ + int tmp = *addr; + *addr += delta; + return tmp; +} +#define CV_XADD(addr, delta) unsafe_cv_xadd(addr, delta) + +#endif diff --git a/src/opencv/platforms/include/zephyr_stdint.h b/platforms/include/zephyr_stdint.h similarity index 100% rename from src/opencv/platforms/include/zephyr_stdint.h rename to platforms/include/zephyr_stdint.h diff --git a/src/opencv/platforms/rp2350.toolchain.cmake b/platforms/rp2350.toolchain.cmake similarity index 100% rename from src/opencv/platforms/rp2350.toolchain.cmake rename to platforms/rp2350.toolchain.cmake diff --git a/src/convert.h b/src/convert.h index 7310dfd..08d3731 100644 --- a/src/convert.h +++ b/src/convert.h @@ -17,7 +17,7 @@ // C headers extern "C" { #include "py/runtime.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" } // extern "C" using namespace cv; diff --git a/src/core.cpp b/src/core.cpp index 56065ed..2e7c95b 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -19,7 +19,7 @@ // C headers extern "C" { #include "core.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" } // extern "C" using namespace cv; diff --git a/src/highgui.cpp b/src/highgui.cpp index 06945f8..645dac6 100644 --- a/src/highgui.cpp +++ b/src/highgui.cpp @@ -18,7 +18,7 @@ // C headers extern "C" { #include "highgui.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" #include "py/mphal.h" } // extern "C" diff --git a/src/imgcodecs.cpp b/src/imgcodecs.cpp index 4f15851..c624f89 100644 --- a/src/imgcodecs.cpp +++ b/src/imgcodecs.cpp @@ -19,7 +19,7 @@ // C headers extern "C" { #include "imgcodecs.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" #include "py/builtin.h" } // extern "C" diff --git a/src/imgproc.cpp b/src/imgproc.cpp index 2cbf30e..72fd805 100644 --- a/src/imgproc.cpp +++ b/src/imgproc.cpp @@ -19,7 +19,7 @@ // C headers extern "C" { #include "imgproc.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" } // extern "C" using namespace cv; diff --git a/src/numpy.h b/src/numpy.h index 9f58248..ccee157 100644 --- a/src/numpy.h +++ b/src/numpy.h @@ -29,7 +29,7 @@ // C headers extern "C" { #include "py/runtime.h" -#include "ulab/code/ndarray.h" +#include "ndarray.h" } // extern "C" using namespace cv; diff --git a/src/opencv/Makefile b/src/opencv/Makefile deleted file mode 100644 index c26c20e..0000000 --- a/src/opencv/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -ifndef PLATFORM -$(error PLATFORM not specified. Use 'make PLATFORM=rp2350' or similar.) -endif - -TOOLCHAIN_FILE = ../../platforms/${PLATFORM}.toolchain.cmake - -# TODO: For some reason, specifying this in the toolchain file doesn't work -CMAKE_ARGS += -DBUILD_LIST=core,imgproc,imgcodecs - -# Generic build -all: - cd opencv && mkdir -p build && cmake -S . -B build -DPICO_BUILD_DOCS=0 -DCMAKE_TOOLCHAIN_FILE=${TOOLCHAIN_FILE} ${CMAKE_ARGS} && make -C build -f Makefile $(MAKEFLAGS) $(MAKE_ARGS) - -# Clean the OpenCV build -clean: - cd opencv && rm -rf build diff --git a/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h b/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h deleted file mode 100644 index 43234d7..0000000 --- a/src/opencv/platforms/include/rp2350_unsafe_cv_xadd.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef RP2350_UNSAFE_CV_XADD_H -#define RP2350_UNSAFE_CV_XADD_H - -// Fix for https://github.com/raspberrypi/pico-sdk/issues/2505 -static inline int unsafe_cv_xadd(int* addr, int delta) -{ - int tmp = *addr; - *addr += delta; - return tmp; -} -#define CV_XADD(addr, delta) unsafe_cv_xadd(addr, delta) - -#endif \ No newline at end of file diff --git a/src/ulab b/ulab similarity index 100% rename from src/ulab rename to ulab