aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjonas <schmid@stettbacher.ch>2020-10-12 08:58:53 +0200
committerjonas <schmid@stettbacher.ch>2020-10-12 08:58:53 +0200
commita5e7e6f616fe301210fea33ec7ff88b7eca60bdb (patch)
tree7d4cb3671e470fb8c2a178d517963e822ee8c581
parentAdd FreeBSD License (2-clause license) with disclaimer. (diff)
downloado3000-python-binding-a5e7e6f616fe301210fea33ec7ff88b7eca60bdb.tar.gz
o3000-python-binding-a5e7e6f616fe301210fea33ec7ff88b7eca60bdb.zip
Simple python application that uses the O-3000 driver and Color Image
Pipeline using a Python C-Extension.
-rw-r--r--.gitignore2
-rw-r--r--c_extension.c425
-rw-r--r--helpers.c407
-rw-r--r--helpers.h60
-rw-r--r--image-record-annotate.py332
-rw-r--r--setup_c_extension.py8
6 files changed, 1234 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..be1a124
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+# python c-extension build directory
+build
diff --git a/c_extension.c b/c_extension.c
new file mode 100644
index 0000000..00da503
--- /dev/null
+++ b/c_extension.c
@@ -0,0 +1,425 @@
+/*
+Copyright 2020 - Stettbacher Signal Processing AG
+
+Author: Jonas Schmid
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or other
+materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <Python.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <o3000/o3000.h>
+#include <o3000/color_pipe.h>
+
+#include "helpers.h"
+
+#include </usr/lib64/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h>
+
+
+
+#define DEFAULT_FRAME_RATE 10 ///< default frame rate
+#define MAX_FRAME_WIDTH 1280 ///< O-3000 image width in pixels
+#define MAX_FRAME_HEIGHT 960 ///< O-3000 image height in pixels
+
+
+/**
+ * default video cache size in bytes
+ * use multiple of maximum image size for better performance
+ */
+#define DEFAULT_VIDEO_CACHE_SIZE (MAX_FRAME_WIDTH*MAX_FRAME_HEIGHT*5)
+
+#define MAX_REC_THREAD 8 ///< maximum possible threads
+
+
+
+/**
+ * thread data definition structure
+ */
+struct recpthread_data_t {
+ int is_running; ///< flag indicating whether thread is running or not
+ int id; ///< thread data ID
+ unsigned long long frame_cnt; ///< frame counter of the processed image
+ pthread_cond_t cond; ///< thread condition variable used for synchronization
+ pthread_mutex_t mutex; ///< mutex used by pthread_cond_wait()
+ struct color_pipe_t *color_pipe; ///< allocated color pipeline for this thread
+ unsigned char *img_raw; ///< pointer to raw input image (from sensor)
+ struct img_header_t img_header; ///< image header corresponding to raw input image
+ char filename[256]; ///< image filename without extensions
+};
+
+static struct recpthread_data_t recthread_data[MAX_REC_THREAD]; ///< recording thread array
+static struct recpthread_data_t *recthread_data_recent;
+
+static pthread_t pthread_video; ///< thread where o3000_connect
+
+static int cam_session; ///< camera session ID
+static unsigned long long frame_cnt; ///< image counter used to generate file number if is enabled
+static int is_color; ///< not 0 if color camera is connected
+
+
+static int log_level = O3000_LOG_ERROR;
+static int video_cache_size = DEFAULT_VIDEO_CACHE_SIZE;
+static int num_recthread = MAX_REC_THREAD-1;
+static int prio_rec = PRIO_TIME_SLICED;
+static int prio_video = PRIO_TIME_SLICED;
+static float fps = DEFAULT_FRAME_RATE;
+static int awb_enable = 0;
+static int ccm_enable = 0;
+static int lense_enable = 1;
+static int lense_nr = 2; // C-Mount 6mm 1/2.7" IR MP
+static int sharp_enable = 0;
+static int gamma_enable = 0;
+static float gamma_fact = 1.0;
+static int img_width = MAX_FRAME_WIDTH;
+static int img_height = MAX_FRAME_HEIGHT;
+
+
+/**
+ * O-3000 log handler
+ *
+ * @param id session ID
+ * @param msg logging message from O-3000 driver
+ */
+static void log_handling(int id, char* msg) {
+ printf("%s: %s", __func__, msg);
+}
+
+/**
+ * XML handler
+ * This function is called of the host receives XML messages from the camera.
+ *
+ * @param id session ID
+ * @param msg message string
+ * @param len string length
+ */
+static void xml_handling(int id, char* msg, int len) {
+ printf("%s: %s", __func__, msg);
+}
+
+/**
+ * Thread used for image processing
+ *
+ * @param ptr Pointer to thread ID
+ */
+static void *rec_thread(void *ptr) {
+
+ int id = *((int*)ptr);
+ struct recpthread_data_t *data = &recthread_data[id];
+
+ while(1) {
+
+ data->is_running = 0;
+
+ if(pthread_cond_wait(&(data->cond), &(data->mutex))) {
+ printf("pthread_cond_wait: %s\n", strerror(errno));
+ // TODO do something
+ continue;
+ }
+ recthread_data_recent = data; // TODO TODO replace by looking for the oldest frame
+ // TODO copy data to cache
+
+ color_pipe_process(data->color_pipe, data->img_raw, &(data->img_header));
+ is_color = data->color_pipe->is_color;
+ }
+ pthread_exit(NULL);
+}
+
+/**
+ * This callback is called from the underlying O-3000 driver after receiving a complete image frame.
+ * This function should finish quickly before the next image is received. Therfore, CPU intensive work
+ * should be done at another thread.
+ *
+ * @param id session ID
+ * @param buf image frame data
+ * @param img_header image header
+ */
+static void video_handling(int id, unsigned char *buf, struct img_header_t *img_header) {
+
+ int i, thdrec_index;
+
+ /*
+ * Find next thread ready for processing image data.
+ */
+ for(i = 0; i < num_recthread; i++) {
+ if(!recthread_data[i].is_running) {
+ // at least one thread is not running
+ break;
+ }
+ }
+
+ // TODO TODO look for a recthread with an older frame
+
+ if(i >= num_recthread) {
+ printf("%s: All image processing threads are busy --> skip image frame.\n", __func__);
+ return;
+ }
+
+ // save next free thread ID
+ thdrec_index = i;
+
+ recthread_data[thdrec_index].is_running = 1;
+ recthread_data[thdrec_index].img_raw = buf;
+ memcpy(&recthread_data[thdrec_index].img_header, img_header, sizeof(struct img_header_t));
+ recthread_data[thdrec_index].frame_cnt = frame_cnt;
+ frame_cnt++;
+ pthread_cond_signal(&recthread_data[thdrec_index].cond);
+}
+
+
+/**
+ * Video thread handling images received by USB.
+ *
+ * @param ptr not used
+ * @return not used
+ */
+static void *video_main(void *ptr) {
+
+ int ret, num_camera, i, msg_len, num_recthread = 2; // TODO TODO
+ char msg[2048];
+ pthread_t pthread_imgproc[MAX_REC_THREAD];
+
+ // create bunch of image recording thread
+// printf("%s: create %d threads for processing and recording\n", __func__, num_recthread);
+ for(i = 0; i < num_recthread; i++) {
+
+ recthread_data[i].id = i;
+
+ if(pthread_cond_init(&recthread_data[i].cond, NULL)) {
+ printf("pthread_cond_init: %s\n", strerror(errno));
+ goto _abort_video_main1;
+ }
+
+ if(pthread_mutex_init(&recthread_data[i].mutex, NULL)) {
+ printf("pthread_mutex_init: %s\n", strerror(errno));
+ goto _abort_video_main1;
+ }
+
+ // setup color image processing pipeline with 12 bits per channel
+ if(color_pipe_open(&recthread_data[i].color_pipe, MAX_FRAME_HEIGHT, MAX_FRAME_WIDTH, 12)) {
+ printf("%s: Initializing pipeline for thread %d failed\n", __func__, i);
+ goto _abort_video_main1;
+ }
+
+ color_pipe_stageconf_awb(recthread_data[i].color_pipe, awb_enable, 0.3, 0.01);
+ color_pipe_stageconf_cam_calib(recthread_data[i].color_pipe, lense_enable, (enum o3000_lenses_t)lense_nr);
+ color_pipe_stageconf_color_calib(recthread_data[i].color_pipe, ccm_enable, CCM_PRESET_O3020);
+ color_pipe_stageconf_sharp(recthread_data[i].color_pipe, sharp_enable, 5, SHARP_ALG_LOCAL, 94.0);
+ color_pipe_stageconf_gamma(recthread_data[i].color_pipe, gamma_enable, gamma_fact);
+
+ // start image recording thread
+ ret = generic_start_thread(&pthread_imgproc[i], rec_thread, prio_rec, (void*)&recthread_data[i].id);
+ if(ret) {
+ printf("%s: starting recording thread %d failed\n", __func__, i);
+ goto _abort_video_main1;
+ }
+ }
+
+
+ /*
+ * Setup camera session
+ */
+ cam_session = o3000_init(O3000_VID, O3000_PID, video_cache_size, xml_handling, video_handling, log_handling, log_level);
+ if(cam_session < 0) {
+ printf("%s: Error opening new camera session (code %d)\n", __func__, cam_session);
+ goto _abort_video_main2;
+ }
+
+ printf("%s: new session ID %d\n", __func__, cam_session);
+
+ num_camera = o3000_device_discovery(cam_session);
+ if(num_camera < 0) {
+ printf("%s: device discovery error (code %d)\n", __func__, num_camera);
+ goto _abort_video_main2;
+ }
+
+ if(num_camera == 0) {
+ printf("%s: no camera connected to the system\n", __func__);
+ goto _abort_video_main2;
+ }
+
+ printf("%s: %d cameras connected to the system\n", __func__, num_camera);
+
+ // prepare configuration message
+ sprintf(msg, "<camera>"
+ "<set>"
+ "<window>(0 %d 0 %d)</window>"
+ "</set>"
+ "</camera>"
+ "<camera>"
+ "<set>"
+ "<frame_rate>%f</frame_rate>"
+ "</set>"
+ "</camera>"
+ "<camera>"
+ "<stream></stream>"
+ "</camera>",
+ img_width-1, img_height-1, fps);
+
+ msg_len = strlen(msg);
+
+ // establish connection to first camera which is not in use
+ for(i = 0; i < num_camera; i++) {
+ printf("%s: establish connection to camera %d\n", __func__, i);
+ ret = o3000_connect(cam_session, i, msg, msg_len);
+ if(ret == O3000_ERROR_BUSY) {
+ printf("%s: device %d is already in use\n", __func__, i);
+ }
+ else {
+ printf("%s: connection failure %d\n", __func__, ret);
+ break;
+ }
+ }
+
+ // cleanup
+ for(i = 0; i < num_recthread; i++) {
+ pthread_cancel(pthread_imgproc[i]);
+ pthread_join(pthread_imgproc[i], NULL);
+ printf("%s: recording thread %d joined\n", __func__, i);
+ }
+
+ for(i = 0; i < num_recthread; i++) {
+ pthread_cond_destroy(&recthread_data[i].cond);
+ pthread_mutex_destroy(&recthread_data[i].mutex);
+ }
+
+_abort_video_main2:
+ o3000_exit(cam_session);
+
+_abort_video_main1:
+ for(i = 0; i < num_recthread; i++) {
+ if(recthread_data[i].color_pipe != NULL) {
+ color_pipe_close(recthread_data[i].color_pipe);
+ recthread_data[i].color_pipe = NULL;
+ }
+ }
+
+ // TODO TODO do_exit = 1;
+ //kill(0, SIGTERM);
+ printf("video thread exits...\n");
+ pthread_exit(NULL);
+}
+
+static PyObject* video_images_get(PyObject* self, PyObject* args) {
+
+ npy_intp dims[3];
+
+ struct color_pipe_t *cp = recthread_data_recent->color_pipe;
+
+
+ if (!is_color) {
+ fprintf(stderr, "%s: not implemented for gray images\n", __func__);
+ return NULL;
+ }
+
+ dims[0] = cp->height;
+ dims[1] = cp->width;
+ dims[2] = 3;
+
+
+ PyObject *raw = PyArray_SimpleNewFromData(2, dims, NPY_UINT8, recthread_data_recent->img_raw);
+ PyObject *rgb = PyArray_SimpleNewFromData(3, dims, NPY_UINT8, cp->debayer_data.img_rgb);
+ PyObject *dist = PyArray_SimpleNewFromData(3, dims, NPY_UINT8, cp->img_out);
+
+ PyObject *list = PyList_New(3);
+ PyList_SetItem(list, 0, raw);
+ PyList_SetItem(list, 1, rgb);
+ PyList_SetItem(list, 2, dist);
+
+ return list;
+
+ //return PyArray_SimpleNewFromData(3, dims, NPY_UINT8, cp->img_out);
+}
+
+/**
+ * Main function
+ *
+ * @param argc number of argument
+ * @param argv argument vector data
+ * @return exit code
+ */
+static PyObject* video_init(PyObject* self, PyObject* args) {
+ int ret;
+ // create video handling thread
+ ret = generic_start_thread(&pthread_video, video_main, prio_video, NULL);
+ if(ret) {
+ printf("%s: starting video handling thread failed\n", __func__);
+ sync();
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject* video_deinit(PyObject* self, PyObject* args) {
+ o3000_disconnect(cam_session);
+ pthread_join(pthread_video, NULL);
+ o3000_exit(cam_session);
+ cam_session = -1;
+
+ sync();
+ Py_RETURN_NONE;
+}
+
+static PyObject* video_xml_send(PyObject* self, PyObject* args) {
+ Py_ssize_t count;
+ const uint8_t* str;
+ if (!PyArg_ParseTuple(args, "s#", &str, &count)) {
+ return NULL;
+ }
+ printf("video send xml: %s\n", str);
+ o3000_send_xml(cam_session, str, count);
+ Py_RETURN_NONE;
+}
+
+/** module's function definition struct */
+static PyMethodDef myMethods[] = {
+ { "video_init", video_init, METH_NOARGS, "Initialize and start the O-3000 pipeline" },
+ { "video_deinit", video_deinit, METH_NOARGS, "Deinitialize the O-3000 pipeline" },
+ { "video_images_get", video_images_get, METH_NOARGS, "Get the most recent captured images: raw, color (debayered), distorted (lense)" },
+ { "video_xml_send", video_xml_send, METH_VARARGS, "Send an XML message to the camera, e.g. for some configurations" },
+ { NULL, NULL, 0, NULL } // to signal the end of our method list
+};
+
+
+/** module definition struct */
+static struct PyModuleDef o3000 = {
+ PyModuleDef_HEAD_INIT,
+ "o3000",
+ "O-3000 Python API",
+ -1,
+ myMethods
+};
+
+/* module initialization */
+PyMODINIT_FUNC PyInit_o3000(void) {
+ _import_array();
+ return PyModule_Create(&o3000);
+}
diff --git a/helpers.c b/helpers.c
new file mode 100644
index 0000000..533769d
--- /dev/null
+++ b/helpers.c
@@ -0,0 +1,407 @@
+/*
+Copyright 2019 - Stettbacher Signal Processing AG
+
+Author: Patrick Roth
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or other
+materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <string.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <errno.h>
+#include <tiffio.h>
+#include <stdlib.h>
+
+#include "helpers.h"
+
+
+
+/**
+ * Save image as JPEG format
+ *
+ * @param filename filename as string
+ * @param format save image with this format
+ * @param color_pipe piped out image data
+ * @param compress 0 if no compression is used
+ * @return 0 on success, -1 on error
+ */
+static int save_jpeg(char *filename, enum frame_format_t format, struct color_pipe_t *__restrict__ color_pipe, int compress_img) {
+ printf("%s: saving JPEG image failed --> not implemented yet!!\n", __func__);
+ return -1;
+}
+
+
+/**
+ * Save image with TIFF format
+ *
+ * @param filename filename as string
+ * @param format save image with this format
+ * @param color_pipe piped out image data
+ * @param compress 0 if no compression is used
+ * @return 0 on success, -1 on error
+ */
+static int save_tiff(char *filename, enum frame_format_t format, struct color_pipe_t *__restrict__ color_pipe, int compress_img) {
+
+ TIFF *tif;
+ int samples_per_pixel;
+ int i, u, height, width;
+ uint8_t *in_img8 = color_pipe->img_out;
+ uint16_t *in_img16 = color_pipe->img_out;
+ uint16_t *linebuf16;
+ int bitpersample;
+
+
+ if(color_pipe->is_color) {
+ samples_per_pixel = 3;
+ }
+ else {
+ samples_per_pixel = 1;
+ }
+ height = color_pipe->height;
+ width = color_pipe->width;
+
+ if(color_pipe->bit_channel == 8) {
+ bitpersample = 8;
+ }
+ else if(color_pipe->bit_channel == 12) {
+ bitpersample = 16;
+ }
+ else {
+ printf("%s: %d bits per channel is not supported yet\n", __func__, color_pipe->bit_channel);
+ return -1;
+ }
+
+ tif = TIFFOpen(filename,"w");
+ TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, width);
+ TIFFSetField(tif, TIFFTAG_IMAGELENGTH, height);
+ TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel);
+ TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, bitpersample);
+ TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
+ TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, width*samples_per_pixel);
+ TIFFSetField(tif, TIFFTAG_XRESOLUTION, (float)width);
+ TIFFSetField(tif, TIFFTAG_YRESOLUTION, (float)height);
+ TIFFSetField(tif, TIFFTAG_RESOLUTIONUNIT, RESUNIT_NONE);
+ TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, 0);
+ TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
+ if(compress_img) {
+ TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_LZW);
+ }
+ else {
+ TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
+ }
+ TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
+
+ if(color_pipe->is_color) {
+ TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
+ }
+ else {
+ TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK);
+ }
+
+ if(bitpersample == 8) {
+ // 8 bit per channel RGB image
+ for(i = 0; i < height; i++) {
+ TIFFWriteScanline(tif, (void*)&(in_img8[i*width*samples_per_pixel]), i, 0);
+ }
+ }
+ else if(bitpersample == 16) {
+ // 12 bit per channel RGB image (upscaling from 12 to 16 bit needed)
+ linebuf16 = malloc(width*samples_per_pixel*2);
+ if(linebuf16 == NULL) {
+ printf("%s: allocating memory failed: %s\n", __func__, strerror(errno));
+ goto _end_savetiff;
+ }
+
+ for(i = 0; i < height; i++) {
+ // upscale 12 to 16 bit
+ for(u = 0; u < width*samples_per_pixel; u++) {
+ linebuf16[u] = in_img16[i*width*samples_per_pixel+u] << 4;
+ }
+ TIFFWriteScanline(tif, (void*)linebuf16, i, 0);
+ }
+ free(linebuf16);
+ }
+
+_end_savetiff:
+ TIFFClose(tif);
+ return 0;
+}
+
+
+/**
+ * Save image frame with given file format.
+ *
+ * @param filename filename as string
+ * @param format save image with this format
+ * @param color_pipe piped out image data
+ * @param compress 0 if no compression is used
+ * @return 0 on success, -1 on error
+ */
+int save_imgframe(char *filename, enum frame_format_t format, struct color_pipe_t *color_pipe, int compress) {
+
+ char filename_ext[256];
+
+ switch(format) {
+ case IMGFRAME_FORMAT_TIF:
+ sprintf(filename_ext, "%s.tif", filename);
+ save_tiff(filename_ext, format, color_pipe, compress);
+ break;
+
+ case IMGFRAME_FORMAT_JPEG:
+ sprintf(filename_ext, "%s.jpeg", filename);
+ save_jpeg(filename_ext, format, color_pipe, compress);
+ break;
+
+ default:
+ printf("%s: saving image with format %d not implemeted yet\n", __func__, format);
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * Tokenise (split) string
+ *
+ * This function uses strtok_r and therefore it's thread safe.
+ *
+ * @param s input string
+ * @param argv On return: array of string pointers containing the substrings
+ * @param argv_len string array length
+ * @param delim string delimiter used to tokenize input string ('\0' terminated)
+ * @return number of substrings splitted or -1 on error
+ */
+
+int tokenise(char *s, char **argv, int argv_len, char *delim) {
+
+ char *str;
+ int argc=0, i;
+ char *save_ptr;
+
+ for(i = 0; i < argv_len; i++) {
+ argv[i] = NULL;
+ }
+
+ str = strtok_r(s, delim, &save_ptr);
+
+ while((str != NULL) && (argc < argv_len)) {
+ argv[argc++] = str;
+ str = strtok_r(NULL,delim,&save_ptr);
+ }
+
+ if(argc > argv_len) {
+ return -1;
+ }
+ return argc;
+}
+
+
+
+/**
+ * generic function to start thread
+ * This function is used to start a new thread. Normally all threads are scheduled with real time policy. If the
+ * priority is defined as @ref PRIO_TIME_SLICED a normal scheduling policy is used (no real time).
+ *
+ * @param thr On return: The ID of the new thread.
+ * @param func The start function.
+ * @param prio The thread priority.
+ * @param param The thread parameters.
+ * @return 0 on success otherwis -1.
+ */
+int generic_start_thread(pthread_t *thr, void*(*func)(void*), int prio, void *param) {
+ int r;
+ pthread_attr_t attr;
+ sigset_t bmask, oldmask;
+ struct sched_param sp;
+
+ r = pthread_attr_init(&attr); // create attribute object
+ if(r) {
+ printf("%s: pthread_attr_init failed: %s\n", __func__, strerror(r));
+ return -1;
+ }
+
+ if(prio == PRIO_TIME_SLICED) {
+ r = pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
+ sp.sched_priority = 0;
+ }
+ else {
+ r = pthread_attr_setschedpolicy(&attr, SCHED_FIFO); // set realtime scheduling policy
+ sp.sched_priority = sched_get_priority_max(SCHED_FIFO) - prio;
+ }
+ if(r) {
+ printf("%s: pthread_attr_setschedpolicy failed: %s\n", __func__, strerror(r));
+ goto _generic_start_thread_abort;
+ }
+ r = pthread_attr_setschedparam(&attr, &sp);
+ if(r) {
+ printf("%s: pthread_attr_setschedparam failed: %s\n", __func__, strerror(r));
+ goto _generic_start_thread_abort;
+ }
+ r = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+ if(r) {
+ printf("%s: pthread_attr_setinheritsched failed: %s\n", __func__, strerror(r));
+ goto _generic_start_thread_abort;
+ }
+
+ //temporarely block some signals and inherit the signal mask to the new thread
+ if(sigemptyset(&bmask)) {
+ printf("%s: sigemptyset failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGTERM)) {
+ printf("%s: sigaddset (SIGTERM) failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGINT)) {
+ printf("%s: sigaddset (SIGINT) failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGALRM)) {
+ printf("%s: sigaddset (SIGALRM) \n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGCHLD)) {
+ printf("%s: sigaddset (SIGCHLD) failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGUSR1)) {
+ printf("%s: sigaddset (SIGUSR1) failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigaddset(&bmask, SIGUSR2)) {
+ printf("%s: sigaddset (SIGUSR2) failed\n", __func__);
+ goto _generic_start_thread_abort;
+ }
+ if(sigprocmask(SIG_BLOCK, &bmask, &oldmask)) {
+ printf("%s: sigprocmask failed: %s\n", __func__, strerror(errno));
+ goto _generic_start_thread_abort;
+ }
+
+ //create thread
+ r = pthread_create(thr, &attr, func, param);
+ if(r) {
+ printf("%s: pthread_create failed: %s\n", __func__, strerror(r));
+ goto _generic_start_thread_abort;
+ }
+
+ //restore old signalmask
+ if(sigprocmask (SIG_SETMASK, &oldmask, NULL)) {
+ printf("%s: sigprocmask failed: %s\n", __func__, strerror(errno));
+ goto _generic_start_thread_abort;
+ }
+
+ pthread_attr_destroy(&attr); //release attribute object
+ return 0;
+
+_generic_start_thread_abort:
+ pthread_attr_destroy(&attr); // release attribute object
+ return -1;
+}
+
+
+/**
+ * Execute a shell command.
+ * This function is thread safe.
+ *
+ * @param cmd shell command to be executed
+ * @param retstr On return: The return string from shell command execution. If NULL is defined nothing is returned.
+ * @param len maximum length of return string
+ * @return -1 if an error is detected
+ */
+int exec_shell_command(char *cmd, char *retstr, int len) {
+ FILE *substream;
+ char buf[512];
+ int ret, len_tmp;
+
+ if(retstr == NULL) {
+ ret = system(cmd);
+ }
+ else {
+ substream = popen(cmd, "r"); // start subprocess with streampipe connection
+ if (substream == NULL) {
+ printf("%s: popen failed: %s\n", __func__, strerror(errno));
+ return -1;
+ }
+
+ if(setvbuf(substream, NULL, _IOLBF, 0)) { // set line buffering for the stream
+ printf("%s: setvbuf failed: %s\n", __func__, strerror(errno));
+ return -1;
+ }
+
+ if (len > 0) {
+ retstr[0] = '\0';
+ }
+ while(fgets(buf, sizeof(buf), substream) != NULL) {
+ len_tmp = strlen(buf);
+ if(len > 0) {
+ strncpy(retstr, buf, len);
+ retstr[len-1] = '\0';
+ retstr += len_tmp;
+ len -= len_tmp;
+ }
+ }
+ ret = pclose(substream);
+ }
+ return ret;
+}
+
+
+/**
+ * Set the given process name to realtime scheduling with the definded priority.
+ * 0 means highest priority.
+ *
+ * NOTE
+ * Time critical scheduling policy SCHED_RR (real-time round-robin) is used!
+ *
+ * @param p_name process name (name given command 'ps')
+ * @param prio priority
+ * @return 0 on success, -1 on error
+ */
+int setRealtimeScheduling(char *p_name, int prio) {
+
+ char shell_cmd[256];
+ int pid;
+ char str[256];
+ struct sched_param sp;
+
+ // check string length of given process name
+ if(strlen(p_name) > (sizeof(shell_cmd)-30)) {
+ printf("%s: process name is too long\n", __func__);
+ return -1;
+ }
+
+ sprintf(shell_cmd, "pidof %s", p_name);
+ if(exec_shell_command(shell_cmd, str, sizeof(str))) {
+ printf("%s: Setting realtime scheduling for '%s' failed: '%s'\n", __func__, p_name, str);
+ return -1;
+ }
+
+ pid = atoi(str);
+ sp.sched_priority = sched_get_priority_max(SCHED_RR) - prio;
+ if(sched_setscheduler(pid, SCHED_RR, &sp)) {
+ printf("%s: sched_setscheduler failed: %s\n", __func__, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
diff --git a/helpers.h b/helpers.h
new file mode 100644
index 0000000..5ade442
--- /dev/null
+++ b/helpers.h
@@ -0,0 +1,60 @@
+/*
+Copyright 2019 - Stettbacher Signal Processing AG
+
+Author: Patrick Roth
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or other
+materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef _HELPERS_H
+#define _HELPERS_H
+
+#include <pthread.h>
+#include <o3000/color_pipe.h>
+
+
+/**
+ * Use this priority to define a time sliced scheduling policy (no real time)
+ */
+#define PRIO_TIME_SLICED 1000
+
+enum frame_format_t {
+ IMGFRAME_FORMAT_TIF = 0,
+ IMGFRAME_FORMAT_JPEG,
+};
+
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+int tokenise(char *s, char **argv, int argv_len, char *delim);
+int save_imgframe(char *filename, enum frame_format_t format, struct color_pipe_t *color_pipe, int compress);
+int generic_start_thread(pthread_t *thr, void*(*func)(void*), int prio, void *param);
+int exec_shell_command(char *cmd, char *retstr, int len);
+int setRealtimeScheduling(char *p_name, int prio);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // _HELPERS_H
diff --git a/image-record-annotate.py b/image-record-annotate.py
new file mode 100644
index 0000000..d206bca
--- /dev/null
+++ b/image-record-annotate.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import sys
+#from tkinter import *
+import tkinter as tk
+from tkinter import ttk
+from PIL import ImageTk, Image
+import numpy as np
+import o3000
+import cv2
+from datetime import datetime
+
+
+import code # entering the interactive mode at a certain point: code.interact(local=dict(globals(), **locals())), Ctrl+D and the script goes on
+
+
+IMAGE_WIDTH = 1280
+IMAGE_HEIGHT = 960
+
+
+class ImageLabel(tk.Label):
+ def __init__(self, *args, **kwargs):
+ super(ImageLabel,self).__init__(*args, **kwargs)
+ self.image_update(255*np.ones([IMAGE_HEIGHT,IMAGE_WIDTH,3], dtype=np.uint8))
+
+ def image_update(self, image):
+ '''
+ :param image: A numpy array or a PIL image object, refere to https://pillow.readthedocs.io/en/stable/reference/Image.html
+ '''
+ if isinstance(image, Image.Image):
+ pass
+ elif isinstance(image, np.ndarray):
+ #code.interact(local=dict(globals(), **locals()))
+ image = Image.fromarray(image)
+ else:
+ raise Exception("illegal image format, must be eighter a numpy array or a PIL image object")
+ self._image = ImageTk.PhotoImage(image)
+ self.configure(image=self._image)
+
+
+class StringEntry(tk.Entry):
+ def __init__(self, master, *args, **kwargs):
+ self._variable = tk.StringVar()
+ self.name = ''
+ super(StringEntry,self).__init__(master=master,text=self._variable, *args, **kwargs)
+
+ @property
+ def name(self):
+ return self._variable.get()
+
+ @name.setter
+ def name(self, name):
+ self._variable.set(name)
+
+class FloatEntry(tk.Entry):
+ def __init__(self, master, scaling = 1.0, *args, **kwargs):
+ self._scaling = scaling
+ self._variable = tk.StringVar()
+ self.value = ''
+ vcmd = (master.register(self.validate_digit))
+ super(FloatEntry,self).__init__(master=master,text=self._variable, *args, **kwargs,
+ validate='all', validatecommand=(vcmd, '%P'))
+ @property
+ def scaling(self):
+ return self._scaling
+ @property
+ def value(self):
+ try:
+ return float(self._variable.get())/self.scaling
+ except ValueError:
+ return None
+ @value.setter
+ def value(self, value):
+ if self.validate_digit(value):
+ try: self._variable.set(float(value)*self.scaling)
+ except ValueError as e: print('FloatEntry ignore value {}, it is not a float'.format(value))
+ else:
+ print('FloatEntry ignore value {}'.format(value))
+ def validate_digit(self, P):
+ if P == '': return True
+ try:
+ _val = float(P)
+ if _val < 0.0: return False
+ else: return True
+ except ValueError: return False
+
+
+class ImageControlPanel(tk.Frame):
+ def __init__(self, save_fcn, *args, **kwargs):
+ super(ImageControlPanel,self).__init__(*args, **kwargs)
+
+ tk.Label(self,text='Filename: ').grid(column=0, row=0)
+ self.filename_entry = StringEntry(self, bg='white')
+ self.filename_entry.grid(column=1, row=0)
+ tk.Label(self,text='.tiff').grid(column=2, row=0)
+
+ def btn_save_fcn():
+ save_fcn(self.filename_entry.name)
+
+ self.save_btn = tk.Button(self, text="save", command=btn_save_fcn)
+ self.save_btn.grid(column=1, row=1)
+
+
+class CamControlPanel(tk.Frame):
+ def __init__(self, *args, **kwargs):
+ super(CamControlPanel,self).__init__(*args, **kwargs)
+
+ tk.Label(self, text="Sensitivity:").grid(column=0, row=0)
+ self.sensitivity = FloatEntry(self, width=7, bg='white')
+ self.sensitivity.grid(column=1, row=0)
+ tk.Label(self, text="%").grid(column=2, row=0)
+
+
+ tk.Label(self, text="Exposure Time:").grid(column=0, row=1)
+ self.exposure_time = FloatEntry(self, scaling=1000.0, width=7, bg='white')
+ self.exposure_time.grid(column=1, row=1)
+ tk.Label(self, text="ms").grid(column=2, row=1)
+
+ def configure():
+ o3000.video_xml_send("<camera><set><acquisition><mode>time</mode><time>{:f}</time></acquisition></set></camera>".format(self.exposure_time.value))
+ o3000.video_xml_send("<camera><set><acquisition><mode>sensitivity</mode><sensitivity>{:f}</sensitivity></acquisition></set></camera>".format(self.sensitivity.value))
+
+ self.btn_configure = tk.Button(self, text="send configuration", command=configure)
+ self.btn_configure.grid(column=0, row=3, columnspan=3)
+
+ # TODO TODO these are hardcoded default values
+ self.exposure_time.value = 0.02
+ self.sensitivity.value = 1.5
+ configure()
+
+
+
+
+
+class AnnotationPanel(tk.Frame):
+ _PERSONS = ["nobody", "kris", "stefan", "sophia", "jonas", "juerg", "petar"]
+ _PERSONS = ["nobody", "kris", "stefan", "sophia", "jonas", "juerg", "petar"]
+ _POSES = ["no pose", "sleeping", "typing", "writting", "tinkering"]
+
+ def __init__(self, *args, **kwargs):
+ super(AnnotationPanel,self).__init__(*args, **kwargs)
+
+
+ tk.Label(self,text='Date:').grid(column=0, row=0)
+ self.date_var = tk.StringVar()
+ tk.Label(self,textvariable=self.date_var).grid(column=1, row=0)
+
+ tk.Label(self,text='Time:').grid(column=0, row=1)
+ self.time_var = tk.StringVar()
+ tk.Label(self,textvariable=self.time_var).grid(column=1, row=1)
+
+
+ tk.Label(self,text='Person:').grid(column=0, row=2, sticky='w')
+ self.person_list = tk.Listbox(self, exportselection=False)
+ self.person_list.grid(column=0, row=3)
+
+ [self.person_list.insert(tk.END, item) for item in self._PERSONS]
+
+ tk.Label(self,text='Pose:').grid(column=1, row=2, sticky='w')
+ self.pose_list = tk.Listbox(self, selectmode=tk.SINGLE, exportselection=False)
+ self.pose_list.grid(column=1, row=3)
+ [self.pose_list.insert(tk.END, item) for item in self._POSES]
+
+ tk.Label(self,text='Comment: ').grid(column=0, row=4, columnspan=2)
+ self.comment_text = tk.Text(self, bg='white', width=50, height=5)
+ self.comment_text.grid(column=0, row=5, columnspan=2)
+
+
+
+ def _list_selection_get(self, listbox):
+ '''
+ :param: a tkinter Listbox widget
+ :return: text of selected line in the given Listbox widget
+ '''
+ sel = listbox.curselection()
+ if len(sel) == 0:
+ return 'unkown'
+ elif len(sel) == 1:
+ return listbox.get(sel[0],sel[0])[0]
+ else:
+ raise Exception('only single selection allowed')
+
+ def update_date_time(self):
+ '''
+ Updates annotation information to the current date and time
+ '''
+ now = datetime.now()
+ self.date_var.set(now.strftime("%Y-%m-%d"))
+ self.time_var.set(now.strftime("%H:%M:%S"))
+
+ @property
+ def pose(self):
+ '''
+ :return: the selected pose name
+ '''
+ return self._list_selection_get(self.pose_list)
+
+ @property
+ def person(self):
+ '''
+ :return: the selected pose name
+ '''
+ return self._list_selection_get(self.person_list)
+
+ @property
+ def date(self):
+ return self.date_var.get()
+
+ @property
+ def time(self):
+ return self.time_var.get()
+
+ @property
+ def comment(self):
+ return self.comment_text.get(1.0,tk.END)
+
+
+
+def annotation_text(filenames, annotation_panel, cam_control_panel):
+ '''
+ build the annotation text for the annotation file
+ '''
+
+ ap = annotation_panel
+
+ _str = 'raw camera data (not debayered): {:s}\n'.format(filenames[0])
+ _str += 'color image (without lense calibration): {:s}\n'.format(filenames[1])
+ _str += 'calibrated color image (lense calibrated): {:s}\n'.format(filenames[2])
+
+ _str += '\n'
+
+ _str += 'camera: O-3020 (color, rolling shutter, serial no.: 10030)\n'
+ _str += 'lense: C-Mount 6mm 1/2.7" IR MP, with IR cutoff filter\n'
+ _str += 'exposure time: {:f}s\n'.format(cam_control_panel.exposure_time.value)
+ _str += 'sensitivity: {:f}%'.format(cam_control_panel.sensitivity.value)
+
+ _str += '\n'
+
+ _str += 'date: {:s}\n'.format(ap.date)
+ _str += 'time: {:s}\n'.format(ap.time)
+
+ _str += '\n'
+
+ _str += 'person: {:s}\n'.format(ap.person)
+ _str += 'pose: {:s}\n'.format(ap.pose)
+ _str += 'comment:\n'
+ _str += ap.comment[:-1]
+
+ return _str
+
+
+imgs = [None, None, None]
+
+def main():
+ o3000.video_init()
+ o3000.video_xml_send("<camera><get><model_id></model_id></get></camera>");
+
+
+ root = tk.Tk()
+ root.title("0-3000 Image Recorder")
+
+ # the geometry of the box which will be displayed on the screen
+ root.geometry("1700x1000")
+
+ image_label = ImageLabel()
+ image_label.pack(side=tk.LEFT)
+
+
+ control_panel = tk.Frame(root)
+ control_panel.pack(side=tk.LEFT)
+
+ tk.Label(control_panel, text='').pack(pady=20) # separater, vertical space
+
+
+ tk.Label(control_panel, text='').pack(pady=20) # separater, vertical space
+
+ tk.Label(control_panel, text='Camera Configuration').pack(anchor='w')
+ cam_control_panel = CamControlPanel(master=control_panel)
+ cam_control_panel.pack()
+
+ tk.Label(control_panel, text='').pack(pady=20) # separater, vertical space
+
+ tk.Label(control_panel, text='Annotation').pack(anchor='w')
+ annotation_panel = AnnotationPanel(master=control_panel)
+ annotation_panel.pack()
+
+
+ def record_image():
+ global imgs
+ imgs = o3000.video_images_get()
+ image = imgs[2]
+ image_label.image_update(image)
+ annotation_panel.update_date_time()
+
+
+ btn_record_image = tk.Button(control_panel, text="record/update image", command=record_image)
+ btn_record_image.pack()
+
+
+ def save_image():
+ ap = annotation_panel
+ filenamebase = '{:s}_{:s}'.format(ap.date.strip('-'), ap.time.strip(':'))
+
+ filenames = [
+ '{:s}_raw.tiff'.format(filenamebase),
+ '{:s}_rgb.tiff'.format(filenamebase),
+ '{:s}.tiff'.format(filenamebase),
+ ]
+ annotation_filename = '{:s}.txt'.format(filenamebase)
+
+ _annotation_text = annotation_text(filenames, annotation_panel, cam_control_panel)
+
+ print('save images and annotation file {:s}, annotation:'.format(annotation_filename))
+ print(_annotation_text)
+
+ for filename,image, in zip(filenames,imgs):
+ im = Image.fromarray(image)
+ im.save(filename)
+
+ with open(annotation_filename,'w') as f:
+ f.write(_annotation_text)
+
+
+ btn_save_image = tk.Button(control_panel, text="save image", command=save_image)
+ btn_save_image.pack()
+
+
+ root.mainloop()
+
+if __name__ == '__main__':
+ main()
diff --git a/setup_c_extension.py b/setup_c_extension.py
new file mode 100644
index 0000000..695c88c
--- /dev/null
+++ b/setup_c_extension.py
@@ -0,0 +1,8 @@
+from distutils.core import setup, Extension
+setup(name = 'o3000', version = '1.0',
+ ext_modules = [Extension(
+ 'o3000',
+ sources = ['c_extension.c', 'helpers.c'],
+ libraries = ['o3000','o3000_imgpipe','tiff'],
+ #extra_compile_args = ['-Wextra']
+ )])