diff --git a/drivers/display/st7789_spi.py b/drivers/display/st7789_spi.py new file mode 100644 index 0000000..2927422 --- /dev/null +++ b/drivers/display/st7789_spi.py @@ -0,0 +1,373 @@ +# Modified from: +# https://github.com/easytarget/st7789-framebuffer/blob/main/st7789_purefb.py + +import struct +from time import sleep_ms +from machine import Pin, SPI +from ulab import numpy as np +import cv2 + +# ST7789 commands +_ST7789_SWRESET = b"\x01" +_ST7789_SLPIN = b"\x10" +_ST7789_SLPOUT = b"\x11" +_ST7789_NORON = b"\x13" +_ST7789_INVOFF = b"\x20" +_ST7789_INVON = b"\x21" +_ST7789_DISPOFF = b"\x28" +_ST7789_DISPON = b"\x29" +_ST7789_CASET = b"\x2a" +_ST7789_RASET = b"\x2b" +_ST7789_RAMWR = b"\x2c" +_ST7789_VSCRDEF = b"\x33" +_ST7789_COLMOD = b"\x3a" +_ST7789_MADCTL = b"\x36" +_ST7789_VSCSAD = b"\x37" +_ST7789_RAMCTL = b"\xb0" + +# MADCTL bits +_ST7789_MADCTL_MY = const(0x80) +_ST7789_MADCTL_MX = const(0x40) +_ST7789_MADCTL_MV = const(0x20) +_ST7789_MADCTL_ML = const(0x10) +_ST7789_MADCTL_BGR = const(0x08) +_ST7789_MADCTL_MH = const(0x04) +_ST7789_MADCTL_RGB = const(0x00) + +RGB = 0x00 +BGR = 0x08 + +# 8 basic color definitions +BLACK = const(0x0000) +BLUE = const(0x001F) +RED = const(0xF800) +GREEN = const(0x07E0) +CYAN = const(0x07FF) +MAGENTA = const(0xF81F) +YELLOW = const(0xFFE0) +WHITE = const(0xFFFF) + +_ENCODE_POS = const(">HH") + +# Rotation tables +# (madctl, width, height, xstart, ystart)[rotation % 4] + +_DISPLAY_240x320 = ( + (0x00, 240, 320, 0, 0), + (0x60, 320, 240, 0, 0), + (0xc0, 240, 320, 0, 0), + (0xa0, 320, 240, 0, 0)) + +_DISPLAY_170x320 = ( + (0x00, 170, 320, 35, 0), + (0x60, 320, 170, 0, 35), + (0xc0, 170, 320, 35, 0), + (0xa0, 320, 170, 0, 35)) + +_DISPLAY_240x240 = ( + (0x00, 240, 240, 0, 0), + (0x60, 240, 240, 0, 0), + (0xc0, 240, 240, 0, 80), + (0xa0, 240, 240, 80, 0)) + +_DISPLAY_135x240 = ( + (0x00, 135, 240, 52, 40), + (0x60, 240, 135, 40, 53), + (0xc0, 135, 240, 53, 40), + (0xa0, 240, 135, 40, 52)) + +_DISPLAY_128x128 = ( + (0x00, 128, 128, 2, 1), + (0x60, 128, 128, 1, 2), + (0xc0, 128, 128, 2, 1), + (0xa0, 128, 128, 1, 2)) + +# Supported displays (physical width, physical height, rotation table) +_SUPPORTED_DISPLAYS = ( + (240, 320, _DISPLAY_240x320), + (170, 320, _DISPLAY_170x320), + (240, 240, _DISPLAY_240x240), + (135, 240, _DISPLAY_135x240), + (128, 128, _DISPLAY_128x128)) + +# init tuple format (b'command', b'data', delay_ms) +_ST7789_INIT_CMDS = ( + ( b'\x11', b'\x00', 120), # Exit sleep mode + ( b'\x13', b'\x00', 0), # Turn on the display + ( b'\xb6', b'\x0a\x82', 0), # Set display function control + ( b'\x3a', b'\x55', 10), # Set pixel format to 16 bits per pixel (RGB565) + ( b'\xb2', b'\x0c\x0c\x00\x33\x33', 0), # Set porch control + ( b'\xb7', b'\x35', 0), # Set gate control + ( b'\xbb', b'\x28', 0), # Set VCOMS setting + ( b'\xc0', b'\x0c', 0), # Set power control 1 + ( b'\xc2', b'\x01\xff', 0), # Set power control 2 + ( b'\xc3', b'\x10', 0), # Set power control 3 + ( b'\xc4', b'\x20', 0), # Set power control 4 + ( b'\xc6', b'\x0f', 0), # Set VCOM control 1 + ( b'\xd0', b'\xa4\xa1', 0), # Set power control A + # Set gamma curve positive polarity + ( b'\xe0', b'\xd0\x00\x02\x07\x0a\x28\x32\x44\x42\x06\x0e\x12\x14\x17', 0), + # Set gamma curve negative polarity + ( b'\xe1', b'\xd0\x00\x02\x07\x0a\x28\x31\x54\x47\x0e\x1c\x17\x1b\x1e', 0), + ( b'\x21', b'\x00', 0), # Enable display inversion + ( b'\x29', b'\x00', 120) # Turn on the display +) + +class ST7789_SPI(): + """ + OpenCV SPI driver for ST7789 displays + + Args: + width (int): display width **Required** + height (int): display height **Required** + spi_id (int): SPI bus ID + spi_baudrate (int): SPI baudrate, default 24MHz + pin_sck (pin): SCK pin number + pin_mosi (pin): MOSI pin number + pin_miso (pin): MISO pin number + pin_cs (pin): Chip Select pin number + pin_dc (pin): Data/Command pin number + rotation (int): Orientation of display + - 0-Portrait, default + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + color_order (int): + - RGB: Red, Green Blue, default + - BGR: Blue, Green, Red + reverse_bytes_in_word (bool): + - Enable if the display uses LSB byte order for color words + """ + def __init__( + self, + width, + height, + spi_id, + spi_baudrate=24000000, + pin_sck=None, + pin_mosi=None, + pin_miso=None, + pin_cs=None, + pin_dc=None, + rotation=0, + color_order=BGR, + reverse_bytes_in_word=True, + ): + # Store SPI arguments + self.spi = SPI(spi_id, baudrate=spi_baudrate, + sck=Pin(pin_sck, Pin.OUT) if pin_sck else None, + mosi=Pin(pin_mosi, Pin.OUT) if pin_mosi else None, + miso=Pin(pin_miso, Pin.IN) if pin_miso else None) + self.cs = Pin(pin_cs, Pin.OUT, value=1) if pin_cs else None + self.dc = Pin(pin_dc, Pin.OUT, value=1) if pin_dc else None + # Initial dimensions and offsets; will be overridden when rotation applied + self.width = width + self.height = height + self.xstart = 0 + self.ystart = 0 + # Check display is known and get rotation table + self.rotations = self._find_rotations(width, height) + if not self.rotations: + supported_displays = ", ".join( + [f"{display[0]}x{display[1]}" for display in _SUPPORTED_DISPLAYS]) + raise ValueError( + f"Unsupported {width}x{height} display. Supported displays: {supported_displays}") + # Colors + self.color_order = color_order + self.needs_swap = reverse_bytes_in_word + # Reset the display + self.soft_reset() + # Yes, send init twice, once is not always enough + self.send_init(_ST7789_INIT_CMDS) + self.send_init(_ST7789_INIT_CMDS) + # Initial rotation + self._rotation = rotation % 4 + # Apply rotation + self.rotation(self._rotation) + # Create the framebuffer for the correct rotation + self.buffer = np.zeros((self.height, self.width, 2), dtype=np.uint8) + + def send_init(self, commands): + """ + Send initialisation commands to display. + """ + for command, data, delay in commands: + self._write(command, data) + sleep_ms(delay) + + def soft_reset(self): + """ + Soft reset display. + """ + self._write(_ST7789_SWRESET) + sleep_ms(150) + + def _find_rotations(self, width, height): + """ Find the correct rotation for our display or return None """ + for display in _SUPPORTED_DISPLAYS: + if display[0] == width and display[1] == height: + return display[2] + return None + + def rotation(self, rotation): + """ + Set display rotation. + + Args: + rotation (int): + - 0-Portrait + - 1-Landscape + - 2-Inverted Portrait + - 3-Inverted Landscape + """ + if ((rotation % 2) != (self._rotation % 2)) and (self.width != self.height): + # non-square displays can currently only be rotated by 180 degrees + # TODO: can framebuffer of super class be destroyed and re-created + # to match the new dimensions? or it's width/height changed? + return + + # find rotation parameters and send command + rotation %= len(self.rotations) + ( madctl, + self.width, + self.height, + self.xstart, + self.ystart, ) = self.rotations[rotation] + if self.color_order == BGR: + madctl |= _ST7789_MADCTL_BGR + else: + madctl &= ~_ST7789_MADCTL_BGR + self._write(_ST7789_MADCTL, bytes([madctl])) + # Set window for writing into + self._write(_ST7789_CASET, + struct.pack(_ENCODE_POS, self.xstart, self.width + self.xstart - 1)) + self._write(_ST7789_RASET, + struct.pack(_ENCODE_POS, self.ystart, self.height + self.ystart - 1)) + self._write(_ST7789_RAMWR) + # TODO: Can we swap (modify) framebuffer width/height in the super() class? + self._rotation = rotation + + def _get_common_roi_with_buffer(self, image): + """ + Get the common region of interest (ROI) between the image and the + display's internal buffer. + + Args: + image (ndarray): Image to display + + Returns: + tuple: (image_roi, buffer_roi) + """ + # Ensure image is a NumPy ndarray + if type(image) is not np.ndarray: + raise TypeError("Image must be a NumPy ndarray") + + # Determing number of rows and columns in the image + image_rows = image.shape[0] + if len(image.shape) < 2: + image_cols = 1 + else: + image_cols = image.shape[1] + + # Get the common ROI between the image and the buffer + row_max = min(image_rows, self.height) + col_max = min(image_cols, self.width) + img_roi = image[:row_max, :col_max] + buffer_roi = self.buffer[:row_max, :col_max] + return img_roi, buffer_roi + + def _convert_image_to_uint8(self, image): + """ + Convert the image to uint8 format if necessary. + + Args: + image (ndarray): Image to convert + + Returns: + Image: Converted image + """ + # Check if the image is already in uint8 format + if image.dtype is np.uint8: + return image + + # Convert to uint8 format. This unfortunately requires creating a new + # buffer for the converted image, which takes more memory + if image.dtype == np.int8: + return cv2.convertScaleAbs(image, alpha=1, beta=127) + elif image.dtype == np.int16: + return cv2.convertScaleAbs(image, alpha=1/255, beta=127) + elif image.dtype == np.uint16: + return cv2.convertScaleAbs(image, alpha=1/255) + elif image.dtype == np.float: + # This implementation creates an additional buffer from np.clip() + # TODO: Find another solution that avoids an additional buffer + return cv2.convertScaleAbs(np.clip(image, 0, 1), alpha=255) + else: + raise ValueError(f"Unsupported image dtype: {image.dtype}") + + def _write_image_to_buffer_bgr565(self, image_roi, buffer_roi): + """ + Convert the image ROI to BGR565 format and write it to the buffer ROI. + + Args: + image_roi (ndarray): Image region of interest + buffer_roi (ndarray): Buffer region of interest + """ + # Determine the number of channels in the image + if len(image_roi.shape) < 3: + ch = 1 + else: + ch = image_roi.shape[2] + + if ch == 1: # Grayscale + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_GRAY2BGR565, buffer_roi) + elif ch == 2: # Already in BGR565 format + buffer_roi[:] = image_roi + elif ch == 3: # BGR + buffer_roi = cv2.cvtColor(image_roi, cv2.COLOR_BGR2BGR565, buffer_roi) + else: + raise ValueError("Image must be 1, 2 or 3 channels (grayscale, BGR565, or BGR)") + + def imshow(self, image): + """ + Display a NumPy image on the screen. + + Args: + image (ndarray): Image to display + """ + # Get the common ROI between the image and internal display buffer + image_roi, buffer_roi = self._get_common_roi_with_buffer(image) + + # Ensure the image is in uint8 format + image_roi = self._convert_image_to_uint8(image_roi) + + # Convert the image to BGR565 format and write it to the buffer + self._write_image_to_buffer_bgr565(image_roi, buffer_roi) + + # Write buffer to display. Swap bytes if needed + if self.needs_swap: + self._write(None, self.buffer[:, :, ::-1]) + else: + self._write(None, self.buffer) + + def clear(self): + """ + Clear the display by filling it with black color. + """ + # Clear the buffer by filling it with zeros (black) + self.buffer[:] = 0 + # Write the buffer to the display + self._write(None, self.buffer) + + def _write(self, command=None, data=None): + """SPI write to the device: commands and data.""" + if self.cs: + self.cs.off() + if command is not None: + self.dc.off() + self.spi.write(command) + if data is not None: + self.dc.on() + self.spi.write(data) + if self.cs: + self.cs.on() diff --git a/examples/ex01_hello_opencv.py b/examples/ex01_hello_opencv.py new file mode 100644 index 0000000..288fb32 --- /dev/null +++ b/examples/ex01_hello_opencv.py @@ -0,0 +1,64 @@ +# Import OpenCV, just as you would in any other Python environment! +import cv2 + +# Import NumPy, almost like any other Python environment! The only difference is +# the addition of `from ulab` since MicroPython does not have a full NumPy +# implementation; ulab NumPy is a lightweight version of standard NumPy +from ulab import numpy as np + +# Standard OpenCV leverages the host operating system to display images, but we +# don't have that luxury in MicroPython. Instead, we need to import a display +# driver. Any display driver can be used, as long as it implements an `imshow()` +# method that takes an NumPy array as input +from st7789_spi import ST7789_SPI + +# Create a display object. This will depend on the display driver you are using, +# and you may need to adjust the parameters based on your specific display and +# board configuration +display = ST7789_SPI(width=240, + height=320, + spi_id=0, + pin_cs=17, + pin_dc=16, + rotation=1) + +# Initialize an image (NumPy array) to be displayed, just like in any other +# Python environment! Here we create a 240x320 pixel image with 3 color channels +# (BGR order, like standard OpenCV) and a data type of `uint8` (you should +# always specify the data type, because NumPy defaults to `float`) +img = np.zeros((240, 320, 3), dtype=np.uint8) + +# Images can be accessed and modified directly if desired with array slicing. +# Here we set the top 50 rows of the image to blue (remember, BGR order!) +img[0:50, :] = (255, 0, 0) + +# OpenCV's drawing functions can be used to modify the image as well. For +# example, we can draw a green ellipse at the center of the image +img = cv2.ellipse(img, (160, 120), (100, 50), 0, 0, 360, (0, 255, 0), -1) + +# Note - Most OpenCV functions return the resulting image. It's redundant for +# the drawing functions and often ignored, but if you call those functions from +# the REPL without assigning it to a variable, the entire array will be printed. +# To avoid this, you can simply re-assign the image, which has no effect other +# than preventing the output from being printed + +# And the obligatory "Hello OpenCV" text! This time in red +img = cv2.putText(img, "Hello OpenCV!", (50, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) + +# Once we have an image ready to show, just call `cv2.imshow()`, almost like any +# other Python environment! The only difference is that we need to pass the +# display object we created earlier as the first argument, instead of a window +# name string. Alternatively, you can call `display.imshow(img)` directly +cv2.imshow(display, img) + +# Standard OpenCV requires a call to `cv2.waitKey()` to process events and +# actually display the image. However the display driver shows the image +# immediately, so it's not necessary to call `cv2.waitKey()` in MicroPython. +# But it is available, and behaves almost like any other Python environment! The +# only difference is that it requires a key to be pressed in the REPL instead of +# a window. It will wait for up to the specified number of milliseconds (0 for +# indefinite), and return the ASCII code of the key pressed (-1 if no key press) +# +# Note - Some MicroPython IDEs (like Thonny) don't actually send any key presses +# until you hit Enter on your keyboard +key = cv2.waitKey(1) # Not necessary to display image, can remove if desired diff --git a/src/core.cpp b/src/core.cpp index 5ac0d82..23238c8 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -11,6 +11,54 @@ extern "C" { using namespace cv; +// Fix for https://github.com/sparkfun/micropython-opencv/issues/13 +// +// TLDR; The CoreTLSData object gets allocated once, whenever the first OpenCV +// function that needs it happens to be called. That will only happen from the +// user's code, after the GC has been initialized, meaning it gets allocated on +// the GC heap (see `__wrap_malloc()`). If a soft reset occurs, the GC gets +// reset and overwrites the memory location, but the same memory location is +// still referenced for the CoreTLSData object, resulting in bogus values and +// subsequent `CV_Assert()` calls fail +// +// The solution here is to create a global variable that subsequently calls +// `getCoreTlsData()` to allocate the CoreTLSData object before the GC has +// been initialized, so it gets allocated on the C heap and persists through +// soft resets. `getCoreTlsData()` is not publicly exposed, but `theRNG()` is +// exposed, which just runs `return getCoreTlsData().rng` +volatile RNG rng = theRNG(); + +mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_src, ARG_dst, ARG_alpha, ARG_beta }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_src, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_dst, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_alpha, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + { MP_QSTR_beta, MP_ARG_OBJ, { .u_obj = mp_const_none } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + Mat src = mp_obj_to_mat(args[ARG_src].u_obj); + Mat dst = mp_obj_to_mat(args[ARG_dst].u_obj); + mp_float_t alpha = args[ARG_alpha].u_obj == mp_const_none ? 1.0 : mp_obj_get_float(args[ARG_alpha].u_obj); + mp_float_t beta = args[ARG_beta].u_obj == mp_const_none ? 0.0 : mp_obj_get_float(args[ARG_beta].u_obj); + + // Call the corresponding OpenCV function + try { + convertScaleAbs(src, dst, alpha, beta); + } catch(Exception& e) { + mp_raise_msg(&mp_type_Exception, MP_ERROR_TEXT(e.what())); + } + + // Return the result + return mat_to_mp_obj(dst); +} + mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { // Define the arguments enum { ARG_src, ARG_lower, ARG_upper, ARG_dst }; diff --git a/src/core.h b/src/core.h index 6f4b062..71d922f 100644 --- a/src/core.h +++ b/src/core.h @@ -1,4 +1,5 @@ // C headers #include "py/runtime.h" +extern mp_obj_t cv2_core_convertScaleAbs(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); extern mp_obj_t cv2_core_inRange(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/highgui.cpp b/src/highgui.cpp new file mode 100644 index 0000000..dbeb957 --- /dev/null +++ b/src/highgui.cpp @@ -0,0 +1,122 @@ +// C++ headers +#include "opencv2/core.hpp" +#include "convert.h" +#include "numpy.h" + +// C headers +extern "C" { +#include "highgui.h" +#include "ulab/code/ndarray.h" +#include "py/mphal.h" +} // extern "C" + +extern const mp_obj_type_t cv2_display_type; + +using namespace cv; + +mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { ARG_display, ARG_img }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_display, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + { MP_QSTR_img, MP_ARG_REQUIRED | MP_ARG_OBJ, { .u_obj = MP_OBJ_NULL } }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Assume the display object has an `imshow` method and load it. The method + // array should be loaded with method[0] as the method object and method[1] + // as the self object. + mp_obj_t method[3]; + mp_load_method_maybe(args[ARG_display].u_obj, MP_QSTR_imshow, method); + + // Check if the method was found + if(method[0] == MP_OBJ_NULL) { + // Method not found, raise an AttributeError + mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("`cv2.imshow()` requires a display object with its own 'imshow()' method, not a window name string.")); + } + + // Add the image object to the method arguments + method[2] = args[ARG_img].u_obj; + + // Call the method with one positional argument (the image we just added) + return mp_call_method_n_kw(1, 0, method); +} + +mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + // Define the arguments + enum { Arg_delay }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_delay, MP_ARG_INT, {.u_int = 0} }, + }; + + // Parse the arguments + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + // Convert arguments to required types + int delay = args[Arg_delay].u_int; + + // Derived from: + // https://github.com/orgs/micropython/discussions/11448 + + // Import `sys` and `select` modules + mp_obj_t sys_module = mp_import_name(MP_QSTR_sys, mp_const_none, MP_OBJ_NEW_SMALL_INT(0)); + mp_obj_t select_module = mp_import_name(MP_QSTR_select, mp_const_none, MP_OBJ_NEW_SMALL_INT(0)); + + // Get the `sys.stdin` object + mp_obj_t stdin_obj = mp_load_attr(sys_module, MP_QSTR_stdin); + + // Get the `select.POLLIN` constant + mp_obj_t pollin_obj = mp_load_attr(select_module, MP_QSTR_POLLIN); + + // Call `select.poll()` function to create a poll object + mp_obj_t select_poll_method[2]; + mp_load_method(select_module, MP_QSTR_poll, select_poll_method); + mp_obj_t poll_obj = mp_call_method_n_kw(0, 0, select_poll_method); + + // Call `poll.register(sys.stdin, select.POLLIN)` + mp_obj_t poll_register_method[4]; + mp_load_method(poll_obj, MP_QSTR_register, poll_register_method); + poll_register_method[2] = stdin_obj; + poll_register_method[3] = pollin_obj; + mp_call_method_n_kw(2, 0, poll_register_method); + + // Create timeout integer object for next method call. OpenCV uses a delay + // of 0 to wait indefinitely, whereas `select.poll` uses -1 + mp_obj_t timeout = MP_OBJ_NEW_SMALL_INT(delay <= 0 ? -1 : delay); + + // TODO: Some key presses return multiple characters (eg. up arrow key + // returns 3 characters: "\x1b[A"). Need to handle this case properly. + // Should also look into implementing waitKeyEx() for these extra cases + + // Call `poll.poll(timeout)` + mp_obj_t poll_poll_method[3]; + mp_load_method(poll_obj, MP_QSTR_poll, poll_poll_method); + poll_poll_method[2] = timeout; + mp_obj_t result = mp_call_method_n_kw(1, 0, poll_poll_method); + + // Extract the items from the result list + mp_obj_t *items; + size_t len; + mp_obj_list_get(result, &len, &items); + + // Check if any items were returned + if(len == 0) { + // If no items were returned, return -1 to indicate no key was pressed + return MP_OBJ_NEW_SMALL_INT(-1); + } + + // Since something was returned, a key was pressed. We need to extract it + // with `sys.stdin.read(1)` + mp_obj_t read_method[3]; + mp_load_method(stdin_obj, MP_QSTR_read, read_method); + read_method[2] = MP_OBJ_NEW_SMALL_INT(1); + mp_obj_t key_str = mp_call_method_n_kw(1, 0, read_method); + + // Convert the key character to an integer and return it + const char *key_chars = mp_obj_str_get_str(key_str); + return MP_OBJ_NEW_SMALL_INT(key_chars[0]); +} diff --git a/src/highgui.h b/src/highgui.h new file mode 100644 index 0000000..92f5ed5 --- /dev/null +++ b/src/highgui.h @@ -0,0 +1,5 @@ +// C headers +#include "py/runtime.h" + +extern mp_obj_t cv2_highgui_imshow(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); +extern mp_obj_t cv2_highgui_waitKey(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args); diff --git a/src/opencv_upy.c b/src/opencv_upy.c index b63a18d..06ffc8c 100644 --- a/src/opencv_upy.c +++ b/src/opencv_upy.c @@ -1,4 +1,5 @@ #include "core.h" +#include "highgui.h" #include "imgproc.h" //////////////////////////////////////////////////////////////////////////////// @@ -6,8 +7,13 @@ //////////////////////////////////////////////////////////////////////////////// // OpenCV core module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_convertScaleAbs_obj, 1, cv2_core_convertScaleAbs); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_core_inRange_obj, 3, cv2_core_inRange); +// OpenCV highgui module +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_imshow_obj, 2, cv2_highgui_imshow); +static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_highgui_waitKey_obj, 0, cv2_highgui_waitKey); + // OpenCV imgproc module static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_adaptiveThreshold_obj, 6, cv2_imgproc_adaptiveThreshold); static MP_DEFINE_CONST_FUN_OBJ_KW(cv2_imgproc_arrowedLine_obj, 4, cv2_imgproc_arrowedLine); @@ -208,8 +214,16 @@ static const mp_rom_map_elem_t cv2_module_globals_table[] = { // OpenCV core functions //////////////////////////////////////////////////////////////////////////// + { MP_ROM_QSTR(MP_QSTR_convertScaleAbs), MP_ROM_PTR(&cv2_core_convertScaleAbs_obj) }, { MP_ROM_QSTR(MP_QSTR_inRange), MP_ROM_PTR(&cv2_core_inRange_obj) }, + //////////////////////////////////////////////////////////////////////////// + // OpenCV highgui functions + //////////////////////////////////////////////////////////////////////////// + + { MP_ROM_QSTR(MP_QSTR_imshow), MP_ROM_PTR(&cv2_highgui_imshow_obj) }, + { MP_ROM_QSTR(MP_QSTR_waitKey), MP_ROM_PTR(&cv2_highgui_waitKey_obj) }, + //////////////////////////////////////////////////////////////////////////// // OpenCV imgproc functions //////////////////////////////////////////////////////////////////////////// diff --git a/src/opencv_upy.cmake b/src/opencv_upy.cmake index f2ea973..66367e6 100644 --- a/src/opencv_upy.cmake +++ b/src/opencv_upy.cmake @@ -6,6 +6,7 @@ target_sources(usermod_cv2 INTERFACE ${CMAKE_CURRENT_LIST_DIR}/alloc.c ${CMAKE_CURRENT_LIST_DIR}/convert.cpp ${CMAKE_CURRENT_LIST_DIR}/core.cpp + ${CMAKE_CURRENT_LIST_DIR}/highgui.cpp ${CMAKE_CURRENT_LIST_DIR}/imgproc.cpp ${CMAKE_CURRENT_LIST_DIR}/numpy.cpp ${CMAKE_CURRENT_LIST_DIR}/opencv_upy.c