aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Roth <roth@stettbacher.ch>2019-10-04 11:51:48 +0200
committerPatrick Roth <roth@stettbacher.ch>2019-10-04 11:51:48 +0200
commita0f501fa5650d0b6062cc8f26b34bce11137643d (patch)
tree8e31c5ac3409d4ce48887d88d4530b88a02c2660
downloado3000-color-pipe-a0f501fa5650d0b6062cc8f26b34bce11137643d.tar.gz
o3000-color-pipe-a0f501fa5650d0b6062cc8f26b34bce11137643d.zip
initial commit
import from github
-rw-r--r--CMakeLists.txt23
-rw-r--r--ChangeLog6
-rw-r--r--LICENSE165
-rw-r--r--README.md39
-rw-r--r--TODO10
-rw-r--r--alg_ccm.h88
-rw-r--r--alg_debayer_bilinear.h271
-rw-r--r--alg_gamma.h59
-rw-r--r--alg_interpolate_mono_scalar.h84
-rw-r--r--alg_interpolate_rgb_scalar.h100
-rw-r--r--alg_rgb_to_yuv.h78
-rw-r--r--alg_yuv_to_rgb.h79
-rw-r--r--camera_calib.c769
-rw-r--r--cmake/toolchain_file_template.cmake17
-rw-r--r--color.c422
-rw-r--r--color.h64
-rw-r--r--color_pipe.c969
-rw-r--r--color_pipe.h328
-rw-r--r--color_pipe_private.h57
-rw-r--r--cpu_feature.c122
-rw-r--r--cpu_feature.h48
-rw-r--r--debayer.c243
-rw-r--r--filter.c532
-rw-r--r--filter.h52
-rw-r--r--gamma_corr.c113
-rw-r--r--sharpening.c482
-rw-r--r--white_balance.c285
27 files changed, 5505 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..3875638
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,23 @@
+#
+# Color Image Processing Pipeline
+#
+
+cmake_minimum_required(VERSION 2.4)
+
+include_directories(.)
+
+file(GLOB colorpipesources *.c *.h)
+
+add_library(o3000_imgpipe SHARED ${colorpipesources})
+
+set_target_properties (o3000_imgpipe PROPERTIES
+ OUTPUT_NAME "o3000_imgpipe"
+ VERSION 1.0.0
+ SOVERSION 1
+ LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib"
+ )
+
+target_compile_options(o3000_imgpipe PRIVATE -Wall -g -O3 -std=c11 -ggdb -D_XOPEN_SOURCE=500 -fPIC)
+
+install(TARGETS o3000_imgpipe DESTINATION lib)
+install(FILES "color_pipe.h" DESTINATION "include/o3000")
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..800f2fd
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,6 @@
+-------------------------------------------------------------------------------
+- ChangeLog Color Image Processing Pipeline
+-------------------------------------------------------------------------------
+
+Version 1.0.0 - 2015-10-16 (PR)
+ Inititial version
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..65c5ca8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6e6fe08
--- /dev/null
+++ b/README.md
@@ -0,0 +1,39 @@
+# Why O-3000
+O-3000 is our answer to rigid and inflexible vision solutions. Maybe you know that. The camera you'd like to use
+does not support your (embedded) operating system. The interface specification is top secret and certainly you won't
+get any driver source code. Due to such experiences we developed the O-3000 cameras. In the meantime, O-3000
+has grown into a comprehensive vision system. Everything is open-source and all interface specifications are
+freely available. These cameras are developed and produced in Switzerland.
+
+## Color Image Processing Pipeline
+The pipeline applies serveral algorithms to captured images from the O-3000 camera. Following pipeline
+stages are implemented:
+<ol>
+<li>Debayering</li>
+<li>Auto white balancing</li>
+<li>Color Correction</li>
+<li>Lense correction</li>
+<li>Sharpening</li>
+<li>Gamma correction</li>
+</ol>
+
+
+### Build and install
+```
+git clone https://stettbacher.ch/gitlab/o-3000/driver.git
+cd color-pipe
+mkdir build
+cd build
+cmake ..
+sudo make install
+```
+
+
+### Cross-Compile
+A toolchain file is needed to specify the cross-compiler environment. As a starting point,
+use the template 'cmake/toolchain_file_template.cmake'. Specify the cross-compiler and path.
+Run cmake within build directory as follow:
+
+```
+cmake -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchain_file_template.cmake ..
+```
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..039186d
--- /dev/null
+++ b/TODO
@@ -0,0 +1,10 @@
+-------------------------------------------------------------------------------
+- TODO Color Image Processing Pipeline
+-------------------------------------------------------------------------------
+
+Add new image processing pipelines stages:
+ - Aperture correction depending on selected lense
+ - Defective pixel correction
+
+Define setpoint value (red, green. blue) for Auto-White-Balance algorithm
+to control image color temperature.
diff --git a/alg_ccm.h b/alg_ccm.h
new file mode 100644
index 0000000..24d37bc
--- /dev/null
+++ b/alg_ccm.h
@@ -0,0 +1,88 @@
+/**
+* @file alg_ccm.h
+* @brief color correction algorithm definition
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-26
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements color correction algorithm and is pixel size independent. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see color.c).
+ */
+// static void rgb_color_correction8(uint8_t *img_calib, const uint8_t *img_uncalib, const int color_bit_depth, const int height, const int width, float a[3][3])
+// static void rgb_color_correction16(uint16_t *img_calib, const uint16_t *img_uncalib, const int color_bit_depth, const int height, const int width, float a[3][3])
+{
+ int y, x;
+ int uncal_red, uncal_green, uncal_blue;
+ int cal_red, cal_green, cal_blue;
+ int index;
+ int pix_max_val;
+ const int shift_fact = 16;
+
+ const int a11 = (int) (a[0][0]*(1<<shift_fact));
+ const int a12 = (int) (a[0][1]*(1<<shift_fact));
+ const int a13 = (int) (a[0][2]*(1<<shift_fact));
+ const int a21 = (int) (a[1][0]*(1<<shift_fact));
+ const int a22 = (int) (a[1][1]*(1<<shift_fact));
+ const int a23 = (int) (a[1][2]*(1<<shift_fact));
+ const int a31 = (int) (a[2][0]*(1<<shift_fact));
+ const int a32 = (int) (a[2][1]*(1<<shift_fact));
+ const int a33 = (int) (a[2][2]*(1<<shift_fact));
+
+ index = 0;
+ pix_max_val = (1<<color_bit_depth)-1;
+
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+ uncal_red = img_uncalib[index];
+ uncal_green = img_uncalib[index+1];
+ uncal_blue = img_uncalib[index+2];
+
+ // apply color correction matrix
+ cal_red = (uncal_red*a11 + uncal_green*a12 + uncal_blue*a13) >> shift_fact;
+ cal_green = (uncal_red*a21 + uncal_green*a22 + uncal_blue*a23) >> shift_fact;
+ cal_blue = (uncal_red*a31 + uncal_green*a32 + uncal_blue*a33) >> shift_fact;
+
+ // range check
+ if(cal_red > pix_max_val) cal_red = pix_max_val;
+ else if(cal_red < 0) cal_red = 0;
+
+ if(cal_green > pix_max_val) cal_green = pix_max_val;
+ else if(cal_green < 0) cal_green = 0;
+
+ if(cal_blue > pix_max_val) cal_blue = pix_max_val;
+ else if(cal_blue < 0) cal_blue = 0;
+
+ // save calibrated color values at output image
+ img_calib[index] = cal_red;
+ img_calib[index+1] = cal_green;
+ img_calib[index+2] = cal_blue;
+
+ index += 3;
+
+
+ }
+ }
+} \ No newline at end of file
diff --git a/alg_debayer_bilinear.h b/alg_debayer_bilinear.h
new file mode 100644
index 0000000..45c8a8d
--- /dev/null
+++ b/alg_debayer_bilinear.h
@@ -0,0 +1,271 @@
+/**
+* @file alg_debayer_bilinear.h
+* @brief bilinear demosaicing algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+
+/*
+ * This code inplements a bilinear demosaicing algorithm and is pixel size independent. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see debayer.c).
+ */
+// static void bilinear8(uint8_t *img_rgb, const uint8_t *img_bayer, const int height, const int width, const enum enumBayerPattern_t bayer_pattern)
+// static void bilinear16(uint16_t *img_rgb, const uint16_t *img_bayer, const int height, const int width, const enum enumBayerPattern_t bayer_pattern)
+{
+ int x, y, red, green, blue;
+ int index_rgb, index_upper, index_center, index_lower;
+ enum enumBayerPattern_t pattern, pattern_nextline;
+
+
+ red = 0;
+ green = 0;
+ blue = 0;
+ pattern_nextline = bayer_pattern;
+ pattern = bayer_pattern;
+
+ /*
+ * figure out pattern color with coordinate 1/1 (second line / second row)
+ */
+ switch(bayer_pattern) {
+ case BP_GR: // green-red-green-red-...
+ pattern = BP_GB;
+ pattern_nextline = BP_RG;
+ break;
+
+ case BP_RG: // red-green-red-green-...
+ pattern = BP_BG;
+ pattern_nextline = BP_GR;
+ break;
+
+ case BP_BG: // blue-green-blue-green-...
+ pattern = BP_RG;
+ pattern_nextline = BP_GB;
+ break;
+
+ case BP_GB: // green-blue-green-blue-...
+ pattern = BP_GR;
+ pattern_nextline = BP_BG;
+ break;
+ }
+
+ /*
+ * loop through image without border area
+ */
+ for(y = 1; y < (height-1); y++) {
+
+ // initialize line indices used to reference pixel at 3x3 kernel
+ index_upper = (y-1)*width;
+ index_center = y*width;
+ index_lower = (y+1)*width;
+
+ // initialize index used to reference pixel at RGB image
+ index_rgb = y*width*3+3;
+
+
+ for(x = 1; x < (width-1); x++) {
+
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper+2],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center+2],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower+2],
+ &pattern, &red, &green, &blue);
+
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ index_rgb += 3;
+ index_upper++;
+ index_center++;
+ index_lower++;
+ }
+
+ pattern = pattern_nextline;
+ pattern_nextline = getBayerType(pattern, 1, 0);
+ }
+
+ /*
+ * handle pixels at horizontal upper border line
+ */
+ pattern = getBayerType(bayer_pattern, 0, 1);
+ index_center = 0;
+ index_upper = width;
+ index_lower = width;
+ index_rgb = 3;
+ for(x = 1; x < (width-1); x++) {
+
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper+2],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center+2],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower+2],
+ &pattern, &red, &green, &blue);
+
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ index_rgb += 3;
+ index_upper++;
+ index_center++;
+ index_lower++;
+ }
+
+ /*
+ * handle pixels at horizontal lower border line
+ */
+ pattern = getBayerType(bayer_pattern, height-1, 1);
+ index_center = (height-1)*width;
+ index_upper = index_center-width;
+ index_lower = index_center-width;
+ index_rgb = ((height-1)*width*3)+3;
+ for(x = 1; x < (width-1); x++) {
+
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper+2],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center+2],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower+2],
+ &pattern, &red, &green, &blue);
+
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ index_rgb += 3;
+ index_upper++;
+ index_center++;
+ index_lower++;
+ }
+
+ /*
+ * handle pixels at vertical left border line
+ */
+ index_upper = 0;
+ index_center = width;
+ index_lower = 2*width;
+ index_rgb = width*3;
+ for(y = 1; y < (height-1); y++) {
+ pattern = getBayerType(bayer_pattern, y, 0);
+ calc_bilinear(img_bayer[index_upper+1], img_bayer[index_upper], img_bayer[index_upper+1],
+ img_bayer[index_center+1], img_bayer[index_center], img_bayer[index_center+1],
+ img_bayer[index_lower+1], img_bayer[index_lower], img_bayer[index_lower+1],
+ &pattern, &red, &green, &blue);
+
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ index_rgb += 3*width;
+ index_upper += width;
+ index_center += width;
+ index_lower += width;
+ }
+
+ /*
+ * handle pixels at vertical right border line
+ */
+ index_upper = width-2;
+ index_center = index_upper+width;
+ index_lower = index_upper+2*width;
+ index_rgb = width*3*2-3;
+ for(y = 1; y < (height-1); y++) {
+ pattern = getBayerType(bayer_pattern, y, width-1);
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower],
+ &pattern, &red, &green, &blue);
+
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ index_rgb += 3*width;
+ index_upper += width;
+ index_center += width;
+ index_lower += width;
+ }
+
+
+ /*
+ * Handle upper left corner
+ */
+ pattern = getBayerType(bayer_pattern, 0, 0);
+ index_upper = width;
+ index_lower = width;
+ calc_bilinear(img_bayer[index_upper+1], img_bayer[index_upper], img_bayer[index_upper+1],
+ img_bayer[1], img_bayer[0], img_bayer[1],
+ img_bayer[index_lower+1], img_bayer[index_lower], img_bayer[index_lower+1],
+ &pattern, &red, &green, &blue);
+ img_rgb[0] = red;
+ img_rgb[1] = green;
+ img_rgb[2] = blue;
+
+ /*
+ * Handle upper right corner
+ */
+ pattern = getBayerType(bayer_pattern, 0, width-1);
+ index_upper = 2*width-2;
+ index_center = width-2;
+ index_lower = 2*width-2;
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower],
+ &pattern, &red, &green, &blue);
+ index_rgb = 3*width-3;
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ /*
+ * Handle lower left corner
+ */
+ pattern = getBayerType(bayer_pattern, height-1, 0);
+ index_upper = (height-2)*width;
+ index_center = (height-1)*width;
+ index_lower = (height-2)*width;
+ calc_bilinear(img_bayer[index_upper+1], img_bayer[index_upper], img_bayer[index_upper+1],
+ img_bayer[index_center+1], img_bayer[index_center], img_bayer[index_center+1],
+ img_bayer[index_lower+1], img_bayer[index_lower], img_bayer[index_lower+1],
+ &pattern, &red, &green, &blue);
+ index_rgb = ((height-1)*width)*3;
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+ /*
+ * Handle lower right corner
+ */
+ pattern = getBayerType(bayer_pattern, height-1, width-1);
+ index_upper = (height-2)*width-2;
+ index_center = (height-1)*width-2;
+ index_lower = (height-2)*width-2;
+ calc_bilinear(img_bayer[index_upper], img_bayer[index_upper+1], img_bayer[index_upper],
+ img_bayer[index_center], img_bayer[index_center+1], img_bayer[index_center],
+ img_bayer[index_lower], img_bayer[index_lower+1], img_bayer[index_lower],
+ &pattern, &red, &green, &blue);
+ index_rgb = height*width*3-3;
+ img_rgb[index_rgb] = red;
+ img_rgb[index_rgb+1] = green;
+ img_rgb[index_rgb+2] = blue;
+
+
+} \ No newline at end of file
diff --git a/alg_gamma.h b/alg_gamma.h
new file mode 100644
index 0000000..2f9a7ae
--- /dev/null
+++ b/alg_gamma.h
@@ -0,0 +1,59 @@
+/**
+* @file alg_gamma.h
+* @brief gamma correction algorithm definition
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-09-08
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements gamma correction algorithm and is pixel size independent. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see gamma_corr.c).
+ */
+// static void gamma_corr8(uint8_t *img_rgb, const uint8_t *img_in, const int height, const int width, const int *gamma_table, const int is_color)
+// static void gamma_corr16(uint8_t *img_rgb, const uint8_t *img_in, const int height, const int width, const int *gamma_table, const int is_color)
+{
+ int index, num_pixel;
+
+ if(is_color) {
+ num_pixel = height*width*3;
+ }
+ else {
+ num_pixel = height*width;
+ }
+
+ for(index = 0; index < num_pixel; index++) {
+ img_rgb[index] = gamma_table[img_in[index]];
+ }
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/alg_interpolate_mono_scalar.h b/alg_interpolate_mono_scalar.h
new file mode 100644
index 0000000..9adda8b
--- /dev/null
+++ b/alg_interpolate_mono_scalar.h
@@ -0,0 +1,84 @@
+/**
+* @file alg_interpolate_mono_scalar.h
+* @brief Monochrome pixel interpolation (scalar code)
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-11-09
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements a monochrome pixel interpolator and is independent of the pixel-bit-depth. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see camera_calib.c).
+ */
+// static void interpolate_mono8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width,
+// const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+// static void interpolate_mono16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width,
+// const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+{
+ int x_start, y_start, x_end, y_end, index_start, index_end;
+ int wheight_x, wheight_y, a11, a12, a21, a22;
+ int mono;
+ int index;
+ const int max_val = (1<<scale_fact);
+
+
+ // calculate pixel index of destination image (calibrated image)
+ index = ((y*width)+x);
+
+ if((coord_x>>scale_fact) > width || (coord_y>>scale_fact) > height || coord_x < 0 || coord_y < 0) {
+ // out of range --> return black value
+ img_out[index] = 0;
+ return;
+ }
+
+ mono = 0;
+
+ x_start = coord_x>>scale_fact;
+ y_start = coord_y>>scale_fact;
+ x_end = x_start + 1;
+ y_end = y_start + 1;
+ index_start = (y_start*width + x_start);
+ index_end = (y_end*width + x_end);
+
+ // calculate wheights
+ wheight_x = coord_x % max_val;
+ wheight_y = coord_y % max_val;
+ a11 = (max_val - wheight_x)*(max_val - wheight_y);
+ a12 = wheight_x*(max_val - wheight_y);
+ a21 = (max_val - wheight_x)*wheight_y;
+ a22 = wheight_x*wheight_y;
+
+ /*
+ * handle border region separately
+ */
+ if(x_end < width || y_end < height) {
+ // pixels are not lying on border region
+ mono = img_in[index_start]*a11 +
+ img_in[index_start+1]*a12 +
+ img_in[index_end-1]*a21 +
+ img_in[index_end]*a22;
+ }
+
+ img_out[index] = mono >> (2*scale_fact);
+}
diff --git a/alg_interpolate_rgb_scalar.h b/alg_interpolate_rgb_scalar.h
new file mode 100644
index 0000000..aa17c54
--- /dev/null
+++ b/alg_interpolate_rgb_scalar.h
@@ -0,0 +1,100 @@
+/**
+* @file alg_interpolate_rgb_scalar.h
+* @brief RGB pixel interpolation (scalar code)
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-11-09
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements an RGB pixel interpolator and is independent of the pixel-bit-depth. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see camera_calib.c).
+ */
+// static void interpolate_rgb8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width,
+// const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+// static void interpolate_rgb16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width,
+// const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+{
+ int x_start, y_start, x_end, y_end, index_start, index_end;
+ int wheight_x, wheight_y, a11, a12, a21, a22;
+ int red, green, blue;
+ int index;
+ const int max_val = (1<<scale_fact);
+
+
+ // calculate pixel index of destination image (calibrated image)
+ index = ((y*width)+x)*3;
+
+ if((coord_x>>scale_fact) > width || (coord_y>>scale_fact) > height || coord_x < 0 || coord_y < 0) {
+ // out of range --> return black value
+ img_out[index] = 0;
+ img_out[index+1] = 0;
+ img_out[index+2] = 0;
+ return;
+ }
+
+ red = 0;
+ green = 0;
+ blue = 0;
+
+ x_start = coord_x>>scale_fact;
+ y_start = coord_y>>scale_fact;
+ x_end = x_start + 1;
+ y_end = y_start + 1;
+ index_start = (y_start*width + x_start)*3;
+ index_end = (y_end*width + x_end)*3;
+
+ // calculate wheights
+ wheight_x = coord_x % max_val;
+ wheight_y = coord_y % max_val;
+ a11 = (max_val - wheight_x)*(max_val - wheight_y);
+ a12 = wheight_x*(max_val - wheight_y);
+ a21 = (max_val - wheight_x)*wheight_y;
+ a22 = wheight_x*wheight_y;
+
+ /*
+ * handle border region separately
+ */
+ if(x_end < width || y_end < height) {
+ // pixels are not lying on border region
+ red = img_in[index_start]*a11 +
+ img_in[index_start+3]*a12 +
+ img_in[index_end-3]*a21 +
+ img_in[index_end]*a22;
+
+ green = img_in[index_start+1]*a11 +
+ img_in[index_start+1+3]*a12 +
+ img_in[index_end+1-3]*a21 +
+ img_in[index_end+1]*a22;
+
+ blue = img_in[index_start+2]*a11 +
+ img_in[index_start+2+3]*a12 +
+ img_in[index_end+2-3]*a21 +
+ img_in[index_end+2]*a22;
+ }
+
+ img_out[index] = red >> (2*scale_fact);
+ img_out[index+1] = green >> (2*scale_fact);
+ img_out[index+2] = blue >> (2*scale_fact);
+}
diff --git a/alg_rgb_to_yuv.h b/alg_rgb_to_yuv.h
new file mode 100644
index 0000000..f75df6a
--- /dev/null
+++ b/alg_rgb_to_yuv.h
@@ -0,0 +1,78 @@
+/**
+* @file alg_rgb_to_yuv.h
+* @brief color space conversion definitions
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements a RGB to YUV color space conversion and is pixel size independent. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see color.c).
+ */
+// static void rgb8_to_yuv(int32_t *img_yuv, const uint8_t *img_rgb, const int height, const int width)
+// static void rgb16_to_yuv(int32_t *img_yuv, const uint16_t *img_rgb, const int height, const int width)
+{
+
+ int x, y, index;
+ int blue, green, red;
+ int pixel_y, pixel_u, pixel_v;
+ const int scale_fact = 10;
+
+ const int y_r = (int)roundf(RGB2YUV_COEFF_Y_RED*(1<<scale_fact));
+ const int y_g = (int)roundf(RGB2YUV_COEFF_Y_GREEN*(1<<scale_fact));
+ const int y_b = (int)roundf(RGB2YUV_COEFF_Y_BLUE*(1<<scale_fact));
+
+ const int u_r = (int)roundf(RGB2YUV_COEFF_U_RED*(1<<scale_fact));
+ const int u_g = (int)roundf(RGB2YUV_COEFF_U_GREEN*(1<<scale_fact));
+ const int u_b = (int)roundf(RGB2YUV_COEFF_U_BLUE*(1<<scale_fact));
+
+ const int v_r = (int)roundf(RGB2YUV_COEFF_V_RED*(1<<scale_fact));
+ const int v_g = (int)roundf(RGB2YUV_COEFF_V_GREEN*(1<<scale_fact));
+ const int v_b = (int)roundf(RGB2YUV_COEFF_V_BLUE*(1<<scale_fact));
+
+
+
+ index = 0;
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+
+ // put each RGB color on stack
+ red = img_rgb[index];
+ green = img_rgb[index + 1];
+ blue = img_rgb[index + 2];
+
+ // color space conversion from RGB to YUV
+ pixel_y = (y_r*red + y_g*green + y_b*blue) >> scale_fact;
+ pixel_u = (u_r*red + u_g*green + u_b*blue) >> scale_fact;
+ pixel_v = (v_r*red + v_g*green + v_b*blue) >> scale_fact;
+
+ img_yuv[index] = pixel_y;
+ img_yuv[index+1] = pixel_u;
+ img_yuv[index+2] = pixel_v;
+
+ index += 3;
+ }
+ }
+}
diff --git a/alg_yuv_to_rgb.h b/alg_yuv_to_rgb.h
new file mode 100644
index 0000000..c608a6d
--- /dev/null
+++ b/alg_yuv_to_rgb.h
@@ -0,0 +1,79 @@
+/**
+* @file alg_yuv_to_rgb.h
+* @brief color space conversion definitions
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-27
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+/*
+ * This code inplements a RGB to YUV color space conversion and is pixel size independent. Including this code at the
+ * C-Source file will define the pixel-bit-depth (see color.c).
+ */
+// static void yuv_to_rgb8(uint8_t *img_rgb, const int32_t *img_yuv, const int height, const int width, const int pix_max)
+// static void yuv_to_rgb16(uint16_t *img_rgb, const int32_t *img_yuv, const int height, const int width, const int pix_max)
+{
+ int x, y, index;
+ int blue, green, red;
+ int val_y, val_u, val_v;
+ const int scale_fact = 10;
+
+
+ const int green_y = (int)roundf(1.0*(1<<scale_fact));
+ const int green_u = (int)roundf(-0.1942*(1<<scale_fact));
+ const int green_v = (int)roundf(-0.5094*(1<<scale_fact));
+
+ index = 0;
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+
+ // save YUV values on stack
+ val_y = img_yuv[index];
+ val_u = img_yuv[index+1];
+ val_v = img_yuv[index+2];
+
+ red = val_y + val_v;
+ green = (green_y*val_y + green_u*val_u + green_v*val_v) >> scale_fact;
+ blue = val_y + val_u;
+
+ // range check red
+ if(red < 0) red = 0;
+ else if(red > pix_max) red = pix_max;
+
+ // range check green
+ if(green < 0) green = 0;
+ else if(green > pix_max) green = pix_max;
+
+ // range check blue
+ if(blue < 0) blue = 0;
+ else if(blue > pix_max) blue = pix_max;
+
+ img_rgb[index] = red;
+ img_rgb[index+1] = green;
+ img_rgb[index+2] = blue;
+
+ index += 3;
+ }
+ }
+}
diff --git a/camera_calib.c b/camera_calib.c
new file mode 100644
index 0000000..78362bd
--- /dev/null
+++ b/camera_calib.c
@@ -0,0 +1,769 @@
+/**
+* @file camera_calib.c
+* @brief camera calibration algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <math.h>
+
+#if (WITH_SIMD == 1)
+#include <immintrin.h> // see /usr/lib64/gcc/x86_64-suse-linux/4.7/include/immintrin.h
+#endif // WITH_SIMD
+
+#include "color_pipe_private.h"
+
+
+#if 0
+
+not used anymore!!
+
+
+#ifdef __AVX__
+static inline void interpolate_avx(const int bit_channel, void *img_out, const int x, const int y, const int height, const int width,
+ const void *img_in, const int32_t x_corr_start, const int32_t y_corr_start, const int32_t x_weight, const int32_t y_weight,
+ const int shift_fact) {
+
+ int x_coord_end, y_coord_end, index_start, index_end;
+ int a11, a12, a21, a22;
+ int red, green, blue;
+ int index;
+ uint8_t *out8 = img_out;
+ uint16_t *out16 = img_out;
+ const uint8_t *in8 = img_in;
+ const uint16_t *in16= img_in;
+
+ // init pixel value to black
+ red = 0;
+ green = 0;
+ blue = 0;
+
+ // calculate pixel index of destination image (calibrated image)
+ index = ((y*width)+x)*3;
+
+ if(x_corr_start >= width || y_corr_start >= height || x_corr_start < 0 || y_corr_start < 0) {
+ // out of range --> return black value
+ goto _end_interpolate;
+ }
+
+
+ x_coord_end = x_corr_start + 1;
+ y_coord_end = y_corr_start + 1;
+ index_start = (y_corr_start*width + x_corr_start)*3;
+ index_end = (y_coord_end*width + x_coord_end)*3;
+
+ // calculate wheights used for interpolation
+ a11 = ((1<<shift_fact) - x_weight)*((1<<shift_fact) - y_weight);
+ a12 = x_weight*((1<<shift_fact) - y_weight);
+ a21 = ((1<<shift_fact) - x_weight)*y_weight;
+ a22 = x_weight*y_weight;
+
+ // apply interpolation weight on input image
+ if(bit_channel <= 8) {
+ red = in8[index_start]*a11 +
+ in8[index_start+3]*a12 +
+ in8[index_end-3]*a21 +
+ in8[index_end]*a22;
+
+ green = in8[index_start+1]*a11 +
+ in8[index_start+1+3]*a12 +
+ in8[index_end+1-3]*a21 +
+ in8[index_end+1]*a22;
+
+ blue = in8[index_start+2]*a11 +
+ in8[index_start+2+3]*a12 +
+ in8[index_end+2-3]*a21 +
+ in8[index_end+2]*a22;
+ }
+ else if(bit_channel <= 16) {
+ red = in16[index_start]*a11 +
+ in16[index_start+3]*a12 +
+ in16[index_end-3]*a21 +
+ in16[index_end]*a22;
+
+ green = in16[index_start+1]*a11 +
+ in16[index_start+1+3]*a12 +
+ in16[index_end+1-3]*a21 +
+ in16[index_end+1]*a22;
+
+ blue = in16[index_start+2]*a11 +
+ in16[index_start+2+3]*a12 +
+ in16[index_end+2-3]*a21 +
+ in16[index_end+2]*a22;
+ }
+
+_end_interpolate:
+ if(bit_channel <= 8) {
+ out8[index] = red >> (2*shift_fact);
+ out8[index+1] = green >> (2*shift_fact);
+ out8[index+2] = blue >> (2*shift_fact);
+ }
+ else if(bit_channel <= 16) {
+ out16[index] = red >> (2*shift_fact);
+ out16[index+1] = green >> (2*shift_fact);
+ out16[index+2] = blue >> (2*shift_fact);
+ }
+}
+
+
+/**
+ * Calculate camera calibration lookup-table (by using AVX vector instructions).
+ *
+ * @param data required camera calibration data
+ */
+static void calc_calib_avx(struct cam_calib_data_t *data) {
+ int v, u, i, height, width, fov_x_start, fov_x_end, fov_y_start, fov_y_end;
+ float k1, k2, p1, p2, k3;
+ float fx, fy, cx, cy;
+ float _y;
+ void *img_calib, *img_uncalib;
+ int bit_channel;
+ const int shift_fact = 9;
+
+ // align variable to 32-byte boundary
+ float _x[8] __attribute__((aligned(32)));
+ int32_t x_corr_start[8] __attribute__((aligned(32)));
+ int32_t y_corr_start[8] __attribute__((aligned(32)));
+ int32_t x_weight[8] __attribute__((aligned(32)));
+ int32_t y_weight[8] __attribute__((aligned(32)));
+
+
+ __m256 _y_vect, _y_sq_vect;
+ __m256 _x_vect, _x_sq_vect;
+ __m256 r_sq_vect, r_sqx2_vect, r_sqx3_vect;
+ __m256 dist_radial_vect, k1_vect, k2_vect, k3_vect, dist_radial_offset;
+ __m256 dist_tang_x_vect, dist_tang_y_vect, p1_vect, p2_vect, a_p1, a_p2;
+ __m256 dist_x_vect, dist_y_vect;
+ __m256 const_2_vect;
+ __m256 x_corr_vect, y_corr_vect;
+ __m256 fx_vect, cx_vect, fov_x_start_vect;
+ __m256 fy_vect, cy_vect, fov_y_start_vect;
+ __m256 _x_fact_y_vect;
+ __m256i x_corr_start_vect, y_corr_start_vect;
+ __m256 shift_fact_vect;
+ __m256i x_weight_vect, y_weight_vect;
+
+ img_calib = data->img_calib;
+ img_uncalib = data->img_in;
+ bit_channel = data->bit_channel;
+ height = data->tot_height;
+ width = data->tot_width;
+
+ fov_x_start = data->fov_x_start;
+ fov_x_end = data->fov_x_end;
+ fov_y_start = data->fov_y_start;
+ fov_y_end = data->fov_y_end;
+ fov_x_start_vect = _mm256_set1_ps(fov_x_start);
+ fov_y_start_vect = _mm256_set1_ps(fov_y_start);
+
+
+ k1 = data->dist_coeff.k1;
+ k2 = data->dist_coeff.k2;
+ k3 = data->dist_coeff.k3;
+ k1_vect = _mm256_set1_ps(k1);
+ k2_vect = _mm256_set1_ps(k2);
+ k3_vect = _mm256_set1_ps(k3);
+
+ p1 = data->dist_coeff.p1;
+ p2 = data->dist_coeff.p2;
+ p1_vect = _mm256_set1_ps(p1);
+ p2_vect = _mm256_set1_ps(p2);
+
+ fx = data->camera_matrix.a11;
+ fy = data->camera_matrix.a22;
+ cx = data->camera_matrix.a13;
+ cy = data->camera_matrix.a23;
+ fx_vect = _mm256_set1_ps(fx);
+ fy_vect = _mm256_set1_ps(fy);
+ cx_vect = _mm256_set1_ps(cx);
+ cy_vect = _mm256_set1_ps(cy);
+
+ dist_radial_offset = _mm256_set1_ps(1.0);
+ const_2_vect = _mm256_set1_ps(2.0);
+ a_p1 = _mm256_mul_ps(_mm256_set1_ps(2.0), p1_vect);
+ a_p2 = _mm256_mul_ps(_mm256_set1_ps(2.0), p2_vect);
+
+ shift_fact_vect = _mm256_set1_ps((float)(1<<shift_fact));
+
+
+ for(v = fov_y_start; v <= fov_y_end; v++) {
+
+ _y = (v-cy)/fy;
+ _y_vect = _mm256_set1_ps(_y);
+ _y_sq_vect = _mm256_mul_ps(_y_vect, _y_vect); // _y_sq_vect = _y_vect*_y_vect
+
+ for(u = fov_x_start; u <= fov_x_end; u+=8) {
+
+ for(i = 0; i < 8; i++) {
+ _x[i] = (u+i-cx)/fx;
+ }
+ _x_vect = _mm256_load_ps(_x); // load 8 float values
+ _x_sq_vect = _mm256_mul_ps(_x_vect, _x_vect); // _x_sq_vect = _x_vect*_x_vect
+ r_sq_vect = _mm256_add_ps(_y_sq_vect, _x_sq_vect); // r_sq_vect = _y_sq_vect*_x_sq_vect
+
+ /*
+ * dist_radial = (1 + k1*r_sq + k2*r_sq*r_sq + k3*r_sq*r_sq*r_sq);
+ */
+ r_sqx2_vect = _mm256_mul_ps(r_sq_vect, r_sq_vect); // r_sqx2_vect = r_sq_vect*r_sq_vect
+ r_sqx3_vect = _mm256_mul_ps(r_sqx2_vect, r_sq_vect); // r_sqx3_vect = r_sqx2_vect*r_sq_vect
+
+ dist_radial_vect = _mm256_add_ps(dist_radial_offset, _mm256_mul_ps(k1_vect, r_sq_vect)); // dist_radial_vect = 1 + k1*r_sq
+ dist_radial_vect = _mm256_add_ps(dist_radial_vect, _mm256_mul_ps(k2_vect, r_sqx2_vect)); // dist_radial_vect += k2*r_sq*r_sq
+ dist_radial_vect = _mm256_add_ps(dist_radial_vect, _mm256_mul_ps(k3_vect, r_sqx3_vect)); // dist_radial_vect += k3*r_sq*r_sq*r_sq
+
+
+ /*
+ * dist_tang_x = 2*p1*_x*_y + p2*(r_sq + 2*_x_sq)
+ * dist_tang_x = a_p1*_x*_y + p2*(r_sq + 2*_x_sq) where a_p1 = 2*p1
+ */
+ dist_tang_x_vect = _mm256_mul_ps(p2_vect, _mm256_add_ps(r_sq_vect, _mm256_mul_ps(const_2_vect, _x_sq_vect))); // dist_tang_x_vect = p2*(r_sq + 2*_x_sq)
+ _x_fact_y_vect = _mm256_mul_ps(_x_vect, _y_vect); // _x_fact_y_vect = _x*_y
+ dist_tang_x_vect = _mm256_add_ps(dist_tang_x_vect, _mm256_mul_ps(_x_fact_y_vect, a_p1)); // dist_tang_x_vect += a_p1*_x*_y
+
+
+ /*
+ * dist_x = _x*dist_radial + dist_tang_x;
+ */
+ dist_x_vect = _mm256_add_ps(_mm256_mul_ps(_x_vect, dist_radial_vect), dist_tang_x_vect);
+
+
+ /*
+ * dist_tang_y = p1*(r_sq + 2*_y_sq) + 2*p2*_x*_y
+ * dist_tang_y = p1*(r_sq + 2*_y_sq) + a_p2*_x*_y where a_p2 = 2*p2
+ */
+ dist_tang_y_vect = _mm256_mul_ps(p1_vect, _mm256_add_ps(r_sq_vect, _mm256_mul_ps(const_2_vect, _y_sq_vect))); // dist_tang_y_vect = p1*(r_sq + 2*_y_sq)
+ dist_tang_y_vect = _mm256_add_ps(dist_tang_y_vect, _mm256_mul_ps(_x_fact_y_vect, a_p2)); // dist_tang_y_vect += a_p2*_x*_y
+
+
+ /*
+ * dist_y = _y*dist_radial + dist_tang_y
+ */
+ dist_y_vect = _mm256_add_ps(_mm256_mul_ps(_y_vect, dist_radial_vect), dist_tang_y_vect);
+
+
+ /*
+ * x_corr = fx*dist_x + cx - fov_x_start
+ * y_corr = fy*dist_y + cy - fov_y_start;
+ */
+ x_corr_vect = _mm256_sub_ps(_mm256_add_ps(_mm256_mul_ps(fx_vect, dist_x_vect), cx_vect), fov_x_start_vect);
+ y_corr_vect = _mm256_sub_ps(_mm256_add_ps(_mm256_mul_ps(fy_vect, dist_y_vect), cy_vect), fov_y_start_vect);
+
+
+ /*
+ * Convert X/Y coordinate from float to fixed-point value. The float value is rounded down to the next integer value
+ * and will be the start coordinate.
+ */
+ x_corr_start_vect = _mm256_cvtps_epi32(_mm256_floor_ps(x_corr_vect));
+ y_corr_start_vect = _mm256_cvtps_epi32(_mm256_floor_ps(y_corr_vect));
+
+
+ /*
+ * Calculate X/Y weights as fixed-point value using given shifting factor.
+ *
+ * weight = (coord - floor(coord)) << shift_fact
+ */
+ x_weight_vect = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_sub_ps(x_corr_vect, _mm256_floor_ps(x_corr_vect)), shift_fact_vect));
+ y_weight_vect = _mm256_cvtps_epi32(_mm256_mul_ps(_mm256_sub_ps(y_corr_vect, _mm256_floor_ps(y_corr_vect)), shift_fact_vect));
+
+
+ /*
+ * Now switch to scalar format and get interpolated pixel value from uncalibrated image.
+ */
+ _mm256_store_si256((__m256i*)x_corr_start, x_corr_start_vect);
+ _mm256_store_si256((__m256i*)y_corr_start, y_corr_start_vect);
+ _mm256_store_si256((__m256i*)x_weight, x_weight_vect);
+ _mm256_store_si256((__m256i*)y_weight, y_weight_vect);
+
+
+ for(i = 0; i < 8; i++) {
+ interpolate_avx(bit_channel, img_calib, u + i - fov_x_start, v - fov_y_start, height, width, img_uncalib,
+ x_corr_start[i], y_corr_start[i],
+ x_weight[i], y_weight[i],
+ shift_fact);
+ }
+ }
+ }
+}
+
+
+#else
+
+
+/**
+ * Pixel value interpolation of RGB image (16 bit per color channel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor pixels.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ */
+static void interpolate16(uint16_t *img_out, const int x, const int y, const int height, const int width,
+ const uint16_t *img_in, const float coord_x, const float coord_y)
+#include "alg_interpolate.h"
+
+
+/**
+ * Pixel value interpolation of RGB image (8 bit per color channel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor pixels.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ */
+static void interpolate8(uint8_t *img_out, const int x, const int y, const int height, const int width,
+ const uint8_t *img_in, const float coord_x, const float coord_y)
+#include "alg_interpolate.h"
+
+
+/**
+ * Calculate camera calibration lookup-table.
+ *
+ * @param data required camera calibration data
+ */
+static void calc_calib_scalar(struct cam_calib_data_t *data) {
+
+ int v, u, height, width, fov_x_start, fov_x_end, fov_y_start, fov_y_end;
+ float k1, k2, p1, p2, k3;
+ float fx, fy, cx, cy;
+ float dist_radial, dist_tang_x, dist_tang_y, dist_x, dist_y;
+ float _x, _y, _y_sq, _x_sq, r_sq;
+ float x_corr, y_corr;
+ void *img_calib, *img_uncalib;
+ int bit_channel;
+
+
+ img_calib = data->img_calib;
+ img_uncalib = data->img_in;
+ bit_channel = data->bit_channel;
+ height = data->tot_height;
+ width = data->tot_width;
+ fov_x_start = data->fov_x_start;
+ fov_x_end = data->fov_x_end;
+ fov_y_start = data->fov_y_start;
+ fov_y_end = data->fov_y_end;
+
+ k1 = data->dist_coeff.k1;
+ k2 = data->dist_coeff.k2;
+ p1 = data->dist_coeff.p1;
+ p2 = data->dist_coeff.p2;
+ k3 = data->dist_coeff.k3;
+
+ fx = data->camera_matrix.a11;
+ fy = data->camera_matrix.a22;
+ cx = data->camera_matrix.a13;
+ cy = data->camera_matrix.a23;
+
+
+ for(v = fov_y_start; v <= fov_y_end; v++) {
+
+ _y = (v-cy)/fy;
+ _y_sq = _y*_y;
+
+ for(u = fov_x_start; u <= fov_x_end; u++) {
+
+ _x = (u-cx)/fx;
+ _x_sq = _x*_x;
+
+ r_sq = _y_sq + _x_sq;
+
+ dist_radial = (1 + k1*r_sq + k2*r_sq*r_sq + k3*r_sq*r_sq*r_sq);
+
+ dist_tang_x = 2*p1*_x*_y + p2*(r_sq + 2*_x_sq);
+ dist_x = _x*dist_radial + dist_tang_x;
+
+ dist_tang_y = p1*(r_sq + 2*_y_sq) + 2*p2*_x*_y;
+ dist_y = _y*dist_radial + dist_tang_y;
+
+ x_corr = fx*dist_x + cx - fov_x_start;
+ y_corr = fy*dist_y + cy - fov_y_start;
+
+ if(bit_channel <= 8) {
+ interpolate8(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr);
+ }
+ else if(bit_channel <= 16) {
+ interpolate16(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr);
+ }
+ }
+ }
+}
+#endif /* __AVX__ */
+#endif
+
+
+
+/**
+ * Calculate undistortion map used for camera calibration.
+ *
+ * The undistortion map corrects the radial and tangential distortion.
+ * The five distortion coefficients are used: k1, k2, p1, p2, k3
+ *
+ * radial distortion causes barrel or pillow effect:
+ *
+ * x_dist = x*(1 + k1*r² + k2*r⁴ + k3*r⁶)
+ * y_dist = y*(1 + k1*r² + k2*r⁴ + k3*r⁶)
+ *
+ *
+ * tangention distortion (lense is not perfectly aligned):
+ *
+ * x_dist = x + (2*p1*x*y + p2*(r² + 2*x²))
+ * y_dist = y + (p1*(r²+2*y²) + 2*p2*x*y)
+ *
+ * In addition, the intrincic parameters of the camera are used containing
+ * informations about the focal length (fx, fy) and optical center (cx, cy).
+ *
+ * | fx 0 cx |
+ * A = | 0 fy cy |
+ * | 0 0 1 |
+ *
+ *
+ * @param data required camera calibration data
+ */
+static void init_undistort_map_scalar(struct cam_calib_data_t *data) {
+
+ int v, u, fov_x_start, fov_x_end, fov_y_start, fov_y_end;
+ float k1, k2, p1, p2, k3;
+ float fx, fy, cx, cy;
+ float dist_radial, dist_tang_x, dist_tang_y, dist_x, dist_y;
+ float _x, _y, _y_sq, _x_sq, r_sq;
+ float x_corr, y_corr;
+ struct lense_undistort_coord_t *map;
+ const int scale_fact = (1 << (data->calib_map_scale_fact));
+
+
+ fov_x_start = data->fov_x_start;
+ fov_x_end = data->fov_x_end;
+ fov_y_start = data->fov_y_start;
+ fov_y_end = data->fov_y_end;
+
+ k1 = data->dist_coeff.k1;
+ k2 = data->dist_coeff.k2;
+ p1 = data->dist_coeff.p1;
+ p2 = data->dist_coeff.p2;
+ k3 = data->dist_coeff.k3;
+
+ fx = data->camera_matrix.a11;
+ fy = data->camera_matrix.a22;
+ cx = data->camera_matrix.a13;
+ cy = data->camera_matrix.a23;
+
+ map = data->calib_map;
+
+ for(v = fov_y_start; v <= fov_y_end; v++) {
+
+ _y = (v-cy)/fy;
+ _y_sq = _y*_y;
+
+ for(u = fov_x_start; u <= fov_x_end; u++) {
+
+ _x = (u-cx)/fx;
+ _x_sq = _x*_x;
+
+ r_sq = _y_sq + _x_sq;
+
+ dist_radial = (1 + k1*r_sq + k2*r_sq*r_sq + k3*r_sq*r_sq*r_sq);
+
+ dist_tang_x = 2*p1*_x*_y + p2*(r_sq + 2*_x_sq);
+ dist_x = _x*dist_radial + dist_tang_x;
+
+ dist_tang_y = p1*(r_sq + 2*_y_sq) + 2*p2*_x*_y;
+ dist_y = _y*dist_radial + dist_tang_y;
+
+ x_corr = fx*dist_x + cx - fov_x_start;
+ y_corr = fy*dist_y + cy - fov_y_start;
+
+ map->coord_x = (int)roundf(x_corr*scale_fact);
+ map->coord_y = (int)roundf(y_corr*scale_fact);
+ map++;
+ }
+ }
+}
+
+
+/**
+ * Pixel value interpolation of RGB image (8 bit per color channel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ * @param scale_fact coordinates are scaled by this factor
+ */
+static void interpolate_rgb8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width,
+ const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+#include "alg_interpolate_rgb_scalar.h"
+
+
+/**
+ * Pixel value interpolation of RGB image (16 bit per color channel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ * @param scale_fact coordinates are scaled by this factor
+ */
+static void interpolate_rgb16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width,
+ const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+#include "alg_interpolate_rgb_scalar.h"
+
+
+/**
+ * Pixel value interpolation of monochrome image (8 bit per pixel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ * @param scale_fact coordinates are scaled by this factor
+ */
+static void interpolate_mono8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width,
+ const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+#include "alg_interpolate_mono_scalar.h"
+
+
+/**
+ * Pixel value interpolation of monochrome image (16 bit per pixel).
+ * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel.
+ *
+ * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights:
+ * +------+------+
+ * | | |
+ * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70%
+ * | | |
+ * +------+------+
+ * | | |
+ * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30%
+ * | | |
+ * +------+------+
+ *
+ * The weights are applied to the neighors and the resulting pixel value is saved at the given location.
+ *
+ * NOTE
+ * The input and output image must have the same pixel size.
+ *
+ * @param img_out On return: image with interpolated values
+ * @param x saved interpolated pixel value at this x-coordinate
+ * @param y saved interpolated pixel value at this y-coordinate
+ * @param height image height of input and output image in number of pixels
+ * @param width image width of input and output image in number of pixels
+ * @param img_in input image to interpolate pixel values
+ * @param coord_x x-coordinate to interpolate
+ * @param coord_y y-coordinate to interpolate
+ * @param scale_fact coordinates are scaled by this factor
+ */
+static void interpolate_mono16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width,
+ const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact)
+#include "alg_interpolate_mono_scalar.h"
+
+
+/**
+ * Lense calibration using bilinear interpolation.
+ *
+ * @param data required camera calibration data
+ */
+static void calib_scalar(struct cam_calib_data_t *data) {
+
+ int v, u, height, width, fov_x_start, fov_x_end, fov_y_start, fov_y_end;
+ int x_corr, y_corr;
+ void *img_calib, *img_uncalib;
+ int bit_channel;
+ struct lense_undistort_coord_t *map;
+ const int scale_fact = data->calib_map_scale_fact;
+ const int is_color = data->is_color;
+
+ img_calib = data->img_calib;
+ img_uncalib = data->img_in;
+ height = data->tot_height;
+ width = data->tot_width;
+ fov_x_start = data->fov_x_start;
+ fov_x_end = data->fov_x_end;
+ fov_y_start = data->fov_y_start;
+ fov_y_end = data->fov_y_end;
+ bit_channel = data->bit_channel;
+ map = data->calib_map;
+
+ for(v = fov_y_start; v <= fov_y_end; v++) {
+ for(u = fov_x_start; u <= fov_x_end; u++) {
+
+ x_corr = map->coord_x;
+ y_corr = map->coord_y;
+ map++;
+
+ if(bit_channel <= 8) {
+ if(is_color) {
+ interpolate_rgb8_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact);
+ }
+ else {
+ interpolate_mono8_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact);
+ }
+ }
+ else if(bit_channel <= 16) {
+ if(is_color) {
+ interpolate_rgb16_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact);
+ }
+ else {
+ interpolate_mono16_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact);
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * Perform camera calibration by applying following algorithms:
+ * - radial and tangential lens distortion correction
+ * - bilinear interpolation to calibrate image
+ *
+ * The undistorted (calibrated) and distorted (input) image must be the same size.
+ *
+ * @param data required camera calibration data
+ * @return 0 on success otherwise -1
+ */
+int camera_calib(struct cam_calib_data_t *data) {
+
+ /*
+ * Check whether undistortion map (lookup-table) is initialized.
+ */
+ if(data->undistort_map_init == 0) {
+ data->calib_map_scale_fact = 9; // scale by 9 means 2^9 = 512
+ init_undistort_map_scalar(data);
+ data->undistort_map_init = 1;
+ }
+
+ calib_scalar(data);
+ return 0;
+}
+
+
+
+
+
+
diff --git a/cmake/toolchain_file_template.cmake b/cmake/toolchain_file_template.cmake
new file mode 100644
index 0000000..7e781e5
--- /dev/null
+++ b/cmake/toolchain_file_template.cmake
@@ -0,0 +1,17 @@
+# this one is important
+SET(CMAKE_SYSTEM_NAME Linux)
+#this one not so much
+SET(CMAKE_SYSTEM_VERSION 1)
+
+# specify the cross compiler
+SET(CMAKE_C_COMPILER /opt/eldk-2007-01-19/usr/bin/ppc_74xx-gcc)
+SET(CMAKE_CXX_COMPILER /opt/eldk-2007-01-19/usr/bin/ppc_74xx-g++)
+
+# where is the target environment
+SET(CMAKE_FIND_ROOT_PATH /opt/eldk-2007-01-19/ppc_74xx /home/alex/eldk-ppc74xx-inst)
+
+# search for programs in the build host directories
+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+# for libraries and headers in the target directories
+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
diff --git a/color.c b/color.c
new file mode 100644
index 0000000..4802083
--- /dev/null
+++ b/color.c
@@ -0,0 +1,422 @@
+/**
+* @file color_space.c
+* @brief color space conversion utilities
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#if (WITH_SIMD == 1)
+#include <immintrin.h> // see /usr/lib64/gcc/x86_64-suse-linux/4.7/include/immintrin.h
+#endif // WITH_SIMD
+
+#include "color.h"
+#include "color_pipe_private.h"
+
+
+/**
+ * RGB image (16 bit per color channel) to YUV color space conversion (scalar code).
+ *
+ * @param img_yuv On return: image in YUV color space (this buffer must be allocated externly)
+ * @param img_rgb RGB image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ */
+static void rgb16_to_yuv_scalar(int16_t *img_yuv, const uint16_t *img_rgb, const int height, const int width)
+#include "alg_rgb_to_yuv.h"
+
+
+#ifdef __SSSE3__
+/**
+ * RGB image (8 bit per color channel) to YUV color space conversion (vector code).
+ *
+ * @param img_yuv On return: image in YUV color space (this buffer must be allocated externly)
+ * @param img_rgb RGB image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ */
+static void rgb8_to_yuv_vector(int16_t *img_yuv, const uint8_t *img_rgb, const int height, const int width) {
+ int i, size_mod8;
+ uint8_t *input_img;
+ int px_blue, px_green, px_red;
+ int px_y, px_u, px_v;
+
+ const int scale_fact = 7; // more than 7 is not possible due to overflow
+
+ const int y_r = (int)roundf(RGB2YUV_COEFF_Y_RED*(1<<scale_fact));
+ const int y_g = (int)roundf(RGB2YUV_COEFF_Y_GREEN*(1<<scale_fact));
+ const int y_b = (int)roundf(RGB2YUV_COEFF_Y_BLUE*(1<<scale_fact));
+
+ const int u_r = (int)roundf(RGB2YUV_COEFF_U_RED*(1<<scale_fact));
+ const int u_g = (int)roundf(RGB2YUV_COEFF_U_GREEN*(1<<scale_fact));
+ const int u_b = (int)roundf(RGB2YUV_COEFF_U_BLUE*(1<<scale_fact));
+
+ const int v_r = (int)roundf(RGB2YUV_COEFF_V_RED*(1<<scale_fact));
+ const int v_g = (int)roundf(RGB2YUV_COEFF_V_GREEN*(1<<scale_fact));
+ const int v_b = (int)roundf(RGB2YUV_COEFF_V_BLUE*(1<<scale_fact));
+
+ __m128i px_buf1, px_buf2;
+ __m128i mask_red1, mask_red2, red1, red2, red;
+ __m128i mask_green1, mask_green2, green1, green2, green;
+ __m128i mask_blue1, mask_blue2, blue1, blue2, blue;
+
+ __m128i ch_y, coeff_y_red, coeff_y_green, coeff_y_blue;
+ __m128i ch_u, coeff_u_red, coeff_u_green, coeff_u_blue;
+ __m128i ch_v, coeff_v_red, coeff_v_green, coeff_v_blue;
+ __m128i red_tmp, green_tmp, blue_tmp;
+
+ __m128i mask_y1, mask_y2, mask_y3, y1, y2, y3;
+ __m128i mask_u1, mask_u2, mask_u3, u1, u2, u3;
+ __m128i mask_v1, mask_v2, mask_v3, v1, v2, v3;
+
+ __m128i yuv1, yuv2, yuv3;
+
+
+ input_img = (uint8_t*)img_rgb;
+ size_mod8 = (width*height/8)*8; // image size must be divisible by 8 because 8 pixel are processed in parallel
+
+ mask_red1 = _mm_set_epi8(-1, -1, -1, -1, -1, 15, -1, 12, -1, 9, -1, 6, -1, 3, -1, 0);
+ mask_red2 = _mm_set_epi8(-1, 5, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+
+ mask_green1 = _mm_set_epi8(-1, -1, -1, -1, -1, -1, -1, 13, -1, 10, -1, 7, -1, 4, -1, 1);
+ mask_green2 = _mm_set_epi8(-1, 6, -1, 3, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+
+ mask_blue1 = _mm_set_epi8(-1, -1, -1, -1, -1, -1, -1, 14, -1, 11, -1, 8, -1, 5, -1, 2);
+ mask_blue2 = _mm_set_epi8(-1, 7, -1, 4, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+
+ mask_y1 = _mm_set_epi8(-1, -1, 5, 4, -1, -1, -1, -1, 3, 2, -1, -1, -1, -1, 1, 0);
+ mask_y2 = _mm_set_epi8(11, 10, -1, -1, -1, -1, 9, 8, -1, -1, -1, -1, 7, 6, -1, -1);
+ mask_y3 = _mm_set_epi8(-1, -1, -1, -1, 15, 14, -1, -1, -1, -1, 13, 12, -1, -1, -1, -1);
+
+ mask_u1 = _mm_set_epi8(5, 4, -1, -1, -1, -1, 3, 2, -1, -1, -1, -1, 1, 0, -1, -1);
+ mask_u2 = _mm_set_epi8(-1, -1, -1, -1, 9, 8, -1, -1, -1, -1, 7, 6, -1, -1, -1, -1);
+ mask_u3 = _mm_set_epi8(-1, -1, 15, 14, -1, -1, -1, -1, 13, 12, -1, -1, -1, -1, 11, 10);
+
+ mask_v1 = _mm_set_epi8(-1, -1, -1, -1, 3, 2, -1, -1, -1, -1, 1, 0, -1, -1, -1, -1);
+ mask_v2 = _mm_set_epi8(-1, -1, 9, 8, -1, -1, -1, -1, 7, 6, -1, -1, -1, -1, 5, 4);
+ mask_v3 = _mm_set_epi8(15, 14, -1, -1, -1, -1, 13, 12, -1, -1, -1, -1, 11, 10, -1, -1);
+
+
+ /*
+ * RGB -> YUV transformation coefficient
+ * The values are scaled defined by scale_fact.
+ *
+ * y = 0.299*red + 0.587*green + 0.114*blue
+ * u = -0.299*red - 0.587*green + 0.886*blue
+ * v = 0.701*red - 0.587*green - 0.114*blue
+ */
+ coeff_y_red = _mm_set1_epi16((short)y_r);
+ coeff_y_green = _mm_set1_epi16((short)y_g);
+ coeff_y_blue = _mm_set1_epi16((short)y_b);
+
+ coeff_u_red = _mm_set1_epi16((short)u_r);
+ coeff_u_green = _mm_set1_epi16((short)u_g);
+ coeff_u_blue = _mm_set1_epi16((short)u_b);
+
+ coeff_v_red = _mm_set1_epi16((short)v_r);
+ coeff_v_green = _mm_set1_epi16((short)v_g);
+ coeff_v_blue = _mm_set1_epi16((short)v_b);
+
+
+ // process 8 RGB-pixel-pair in parallel
+ for(i = 0; i < size_mod8; i+=8) {
+
+ // load 128 bit pixel value into SIMD register
+ px_buf1 = _mm_lddqu_si128((__m128i*)(input_img));
+ input_img += 16;
+ px_buf2 = _mm_lddqu_si128((__m128i*)(input_img));
+ input_img += 8;
+
+ // get first 6 red pixels
+ red1 = _mm_shuffle_epi8(px_buf1, mask_red1);
+
+ // get next 2 red pixels
+ red2 = _mm_shuffle_epi8(px_buf2, mask_red2);
+
+ // combine to 8 red pixels
+ red = _mm_or_si128(red1, red2);
+
+
+ // get first 5 green pixels
+ green1 = _mm_shuffle_epi8(px_buf1, mask_green1);
+
+ // get next 3 green pixels
+ green2 = _mm_shuffle_epi8(px_buf2, mask_green2);
+
+ // combine to 8 green pixels
+ green = _mm_or_si128(green1, green2);
+
+
+ // get first 5 blue pixels
+ blue1 = _mm_shuffle_epi8(px_buf1, mask_blue1);
+
+ // get next 3 blue pixels
+ blue2 = _mm_shuffle_epi8(px_buf2, mask_blue2);
+
+ // combine to 8 blue pixels
+ blue = _mm_or_si128(blue1, blue2);
+
+
+ /*
+ * calculate 8 Y-channel pixels
+ */
+ red_tmp = _mm_mullo_epi16(coeff_y_red, red);
+ green_tmp = _mm_mullo_epi16(coeff_y_green, green);
+ blue_tmp = _mm_mullo_epi16(coeff_y_blue, blue);
+ ch_y = _mm_add_epi16(_mm_add_epi16(red_tmp, green_tmp), blue_tmp);
+ ch_y = _mm_srai_epi16(ch_y, scale_fact);
+
+ /*
+ * calculate 8 U-channel pixels
+ */
+ red_tmp = _mm_mullo_epi16(coeff_u_red, red);
+ green_tmp = _mm_mullo_epi16(coeff_u_green, green);
+ blue_tmp = _mm_mullo_epi16(coeff_u_blue, blue);
+ ch_u = _mm_add_epi16(_mm_add_epi16(red_tmp, green_tmp), blue_tmp);
+ ch_u = _mm_srai_epi16(ch_u, scale_fact);
+
+ /*
+ * calculate 8 V-channel pixels
+ */
+ red_tmp = _mm_mullo_epi16(coeff_v_red, red);
+ green_tmp = _mm_mullo_epi16(coeff_v_green, green);
+ blue_tmp = _mm_mullo_epi16(coeff_v_blue, blue);
+ ch_v = _mm_add_epi16(_mm_add_epi16(red_tmp, green_tmp), blue_tmp);
+ ch_v = _mm_srai_epi16(ch_v, scale_fact);
+
+
+ /*
+ * Store separate YUV buffer to one YUV image stream
+ */
+ y1 = _mm_shuffle_epi8(ch_y, mask_y1);
+ y2 = _mm_shuffle_epi8(ch_y, mask_y2);
+ y3 = _mm_shuffle_epi8(ch_y, mask_y3);
+
+ u1 = _mm_shuffle_epi8(ch_u, mask_u1);
+ u2 = _mm_shuffle_epi8(ch_u, mask_u2);
+ u3 = _mm_shuffle_epi8(ch_u, mask_u3);
+
+ v1 = _mm_shuffle_epi8(ch_v, mask_v1);
+ v2 = _mm_shuffle_epi8(ch_v, mask_v2);
+ v3 = _mm_shuffle_epi8(ch_v, mask_v3);
+
+ yuv1 = _mm_or_si128(y1, _mm_or_si128(u1, v1));
+ yuv2 = _mm_or_si128(y2, _mm_or_si128(u2, v2));
+ yuv3 = _mm_or_si128(y3, _mm_or_si128(u3, v3));
+
+ /*
+ * Store 3 YUV SIMD register into memory
+ */
+ _mm_store_si128((__m128i*)img_yuv, yuv1);
+ img_yuv += 8;
+ _mm_store_si128((__m128i*)img_yuv, yuv2);
+ img_yuv += 8;
+ _mm_store_si128((__m128i*)img_yuv, yuv3);
+ img_yuv += 8;
+ }
+
+
+ for(i = size_mod8; i < (width*height); i++) {
+
+ // put each RGB color on stack
+ px_red = *input_img;
+ input_img++;
+ px_green = *input_img;
+ input_img++;
+ px_blue = *input_img;
+
+ // color space conversion from RGB to YUV
+ px_y = (y_r*px_red + y_g*px_green + y_b*px_blue) >> scale_fact;
+ px_u = (u_r*px_red + u_g*px_green + u_b*px_blue) >> scale_fact;
+ px_v = (v_r*px_red + v_g*px_green + v_b*px_blue) >> scale_fact;
+
+ *img_yuv = px_y;
+ img_yuv++;
+ *img_yuv = px_u;
+ img_yuv++;
+ *img_yuv = px_v;
+ img_yuv++;
+ }
+}
+
+#else
+
+/**
+ * RGB image (8 bit per color channel) to YUV color space conversion (scalar code).
+ *
+ * @param img_yuv On return: image in YUV color space (this buffer must be allocated externly)
+ * @param img_rgb RGB image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ */
+static void rgb8_to_yuv_scalar(int16_t *img_yuv, const uint8_t *img_rgb, const int height, const int width)
+#include "alg_rgb_to_yuv.h"
+
+#endif // __SSSE3__
+
+
+/**
+ * YUV to RGB (8 bit per color channel) color space conversion.
+ *
+ * @param img_rgb On return: image in RGB space (this buffer must be allocated externly)
+ * @param img_yuv YUV image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @return 0 on success otherwise -1
+ */
+static void yuv_to_rgb8(uint8_t *img_rgb, const int16_t *img_yuv, const int height, const int width, const int pix_max)
+#include "alg_yuv_to_rgb.h"
+
+
+/**
+ * YUV to RGB (8 bit per color channel) color space conversion.
+ *
+ * @param img_rgb On return: image in RGB space (this buffer must be allocated externly)
+ * @param img_yuv YUV image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @return 0 on success otherwise -1
+ */
+static void yuv_to_rgb16(uint16_t *img_rgb, const int16_t *img_yuv, const int height, const int width, const int pix_max)
+#include "alg_yuv_to_rgb.h"
+
+
+/**
+ * Apply color correction matrix on given RGB image (8 bit per color channel).
+ *
+ * @param img_calib On return: color calibrated image
+ * @param img_uncalib input image to calibrate
+ * @param color_bit_depth color channel bit depth (all channel have the same bit depth)
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param a 3x3 color correction matrix
+ */
+static void rgb_color_correction16(uint16_t *img_calib, const uint16_t *img_uncalib,
+ const int color_bit_depth, const int height, const int width, float a[3][3])
+#include "alg_ccm.h"
+
+
+/**
+ * Apply color correction matrix on given RGB image (8 bit per color channel).
+ *
+ * @param img_calib On return: color calibrated image
+ * @param img_uncalib input image to calibrate
+ * @param color_bit_depth color channel bit depth (all channel have the same bit depth)
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param a 3x3 color correction matrix
+ */
+static void rgb_color_correction8(uint8_t *img_calib, const uint8_t *img_uncalib,
+ const int color_bit_depth, const int height, const int width, float a[3][3])
+#include "alg_ccm.h"
+
+
+/**
+ * RGB to YUV color space conversion.
+ *
+ * @param img_yuv On return: image in YUV color space (this buffer must be allocated externly)
+ * @param img_rgb RGB image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bit_channel bits per color channel
+ * @return 0 on success otherwise -1
+ */
+int color_rgb_to_yuv(int16_t *img_yuv, const void *img_rgb, const int height, const int width, const int bit_channel) {
+
+ if(bit_channel <= 8) {
+#ifdef __SSSE3__
+ rgb8_to_yuv_vector(img_yuv, img_rgb, height, width);
+#else
+ rgb8_to_yuv_scalar(img_yuv, img_rgb, height, width);
+#endif
+ }
+ else if(bit_channel <= 16) {
+ rgb16_to_yuv_scalar(img_yuv, img_rgb, height, width);
+ }
+ return 0;
+}
+
+
+/**
+ * YUV to RGB color space conversion.
+ *
+ * @param img_rgb On return: image in RGB space (this buffer must be allocated externly)
+ * @param img_yuv YUV image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bit_channel bits per color channel of RGB image
+ * @return 0 on success otherwise -1
+ */
+int color_yuv_to_rgb(void *img_rgb, const int16_t *img_yuv, const int height, const int width, const int bit_channel) {
+ int ret = 0;
+ int bpp = (1<<bit_channel) - 1;
+
+ if(bit_channel <= 8) {
+ yuv_to_rgb8(img_rgb, img_yuv, height, width, bpp);
+ }
+ else if(bit_channel <= 16) {
+ yuv_to_rgb16(img_rgb, img_yuv, height, width, bpp);
+ }
+ else {
+ printf("%s: Color space conversion on images with %d bits per color channel not implemented yet\n", __func__, bit_channel);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/**
+ * Apply color calibration.
+ *
+ * @param color_calib required color calibration data
+ * @return 0 on success otherwise -1
+ */
+int color_calib(struct color_calib_data_t *color_calib) {
+
+ int ret, bit_channel;
+
+
+ bit_channel = color_calib->bit_channel;
+ ret = 0;
+
+ if(bit_channel <= 8) {
+ rgb_color_correction8(color_calib->img_calib, color_calib->img_in, 8, color_calib->height, color_calib->width, color_calib->a);
+ }
+ else if(bit_channel <= 16) {
+ rgb_color_correction16(color_calib->img_calib, color_calib->img_in, 12, color_calib->height, color_calib->width, color_calib->a);
+ }
+ else {
+ printf("%s: Color calibration not possible on images with %d bits per color channel\n", __func__, bit_channel);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+
+
+
diff --git a/color.h b/color.h
new file mode 100644
index 0000000..73bff58
--- /dev/null
+++ b/color.h
@@ -0,0 +1,64 @@
+/**
+* @file color.h
+* @brief color utility function definition
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#ifndef _COLOR_H
+#define _COLOR_H
+
+#include "color_pipe.h"
+
+
+#define RGB2YUV_COEFF_Y_RED 0.299 ///< RGB to YUV conversion coefficient for Y-Channel: red
+#define RGB2YUV_COEFF_Y_GREEN 0.587 ///< RGB to YUV conversion coefficient for Y-Channel: green
+#define RGB2YUV_COEFF_Y_BLUE 0.114 ///< RGB to YUV conversion coefficient for Y-Channel: blue
+
+#define RGB2YUV_COEFF_U_RED (-0.299) ///< RGB to YUV conversion coefficient for U-Channel: red
+#define RGB2YUV_COEFF_U_GREEN (-0.587) ///< RGB to YUV conversion coefficient for U-Channel: green
+#define RGB2YUV_COEFF_U_BLUE 0.886 ///< RGB to YUV conversion coefficient for U-Channel: blue
+
+#define RGB2YUV_COEFF_V_RED 0.701 ///< RGB to YUV conversion coefficient for V-Channel: red
+#define RGB2YUV_COEFF_V_GREEN (-0.587) ///< RGB to YUV conversion coefficient for V-Channel: green
+#define RGB2YUV_COEFF_V_BLUE (-0.114) ///< RGB to YUV conversion coefficient for V-Channel: blue
+
+
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+int color_rgb_to_yuv(int16_t *img_yuv, const void *img_rgb, const int height, const int width, const int bit_channel);
+int color_yuv_to_rgb(void *img_rgb, const int16_t *img_yuv, const int height, const int width, const int bit_channel);
+
+int color_calib(struct color_calib_data_t *color_calib);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+
+#endif // _COLOR_H
diff --git a/color_pipe.c b/color_pipe.c
new file mode 100644
index 0000000..986989f
--- /dev/null
+++ b/color_pipe.c
@@ -0,0 +1,969 @@
+/**
+* @file main.cpp
+* @brief Color Image Processing Pipeline with O-3000 USB camera
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-02-10
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdio.h>
+
+#include "color_pipe.h"
+#include "color_pipe_private.h"
+#include "color.h"
+
+
+/**
+ * Logging macro for string error logging (libc). This macro inserts strerror(errno) in a suitable way.
+ */
+#define PRINTF_ERRNO(x) \
+ printf(x" in %s() line %d failed: %s\n", __func__, __LINE__-1, strerror(errno));
+
+
+/**
+ * Alignment size in bytes.
+ * Image buffers are aligned to this given boundary.
+ */
+#define ALIGNMENT_SIZE 32
+
+
+/**
+ * Coefficient definition used to undistort lenses.
+ */
+struct o3000_lens_coeffs_t {
+ struct dist_coeff_t dist_coeff; ///< distortion coefficients
+ struct camera_matrix_t camera_matrix; ///< camera matrix
+};
+
+
+/**
+ * Lens undistortion coefficients of various lens types supplied by
+ * Stettbacher Signal Processing.
+ */
+const static struct o3000_lens_coeffs_t o3000_lens_coeffs[] = {
+
+ // S-mount, focal length 2.8m, aperture 2.0 (O3000_LS_F2_8)
+ {
+ .dist_coeff = {
+ .k1 = -1.7989363928888906e+01,
+ .k2 = 4.2371667641386335e+02,
+ .p1 = -5.5177683005299717e-03,
+ .p2 = -1.8027296799469215e-02,
+ .k3 = -5.1212552122750130e+03,
+ },
+
+ .camera_matrix = {
+ .a11 = 5.5641130307342128e+03,
+ .a12 = 0,
+ .a13 = 6.4044160626552366e+02,
+ .a21 = 0,
+ .a22 = 5.5583733034586849e+03,
+ .a23 = 5.3305307740745866e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // S-mount, focal length 4.2mm, aperture 1.8 (O3000_LS_F4_2)
+ {
+ .dist_coeff = {
+ .k1 = -5.6100382549536558e+00,
+ .k2 = 3.7504235968196980e+01,
+ .p1 = -1.1849075953406191e-02,
+ .p2 = -2.0833317381133629e-02,
+ .k3 = -1.4657907716904774e+02,
+ },
+
+ .camera_matrix = {
+ .a11 = 3.8385004722247168e+03,
+ .a12 = 0,
+ .a13 = 6.5463814905337483e+02,
+ .a21 = 0,
+ .a22 = 3.8289385545784967e+03,
+ .a23 = 5.4227950629136478e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // S-mount, focal length 6.0mm, aperture 1.8 (O3000_LS_F6_0)
+ {
+ .dist_coeff = {
+ .k1 = -3.2037738664730195e+00,
+ .k2 = 1.1127115993662951e+01,
+ .p1 = -1.6455451408872675e-02,
+ .p2 = -2.4114999934222298e-02,
+ .k3 = -1.2882650294739891e+01,
+ },
+
+ .camera_matrix = {
+ .a11 = 3.7083736372135381e+03,
+ .a12 = 0,
+ .a13 = 6.6465346812371035e+02,
+ .a21 = 0,
+ .a22 = 3.6972315248821769e+03,
+ .a23 = 5.5003224793025629e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // S-mount, focal length 8.0mm, aperture 1.8 (O3000_LS_F8_0)
+ {
+ .dist_coeff = {
+ .k1 = -2.4661259044966712e+00,
+ .k2 = 1.1778658083457410e+00,
+ .p1 = -8.5928173466905556e-03,
+ .p2 = -1.4375183749585565e-02,
+ .k3 = 1.4290871342330237e+02,
+ },
+
+ .camera_matrix = {
+ .a11 = 4.3637409203781626e+03,
+ .a12 = 0,
+ .a13 = 6.6812858595376599e+02,
+ .a21 = 0,
+ .a22 = 4.3451519470626554e+03,
+ .a23 = 5.5034252965175574e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // S-mount, focal length 12.0mm, aperture 1.8 (O3000_LS_F12_0)
+ {
+ .dist_coeff = {
+ .k1 = -5.3454594843785479e+00,
+ .k2 = 6.4871676948306629e+01,
+ .p1 = 1.0455391312916947e-01,
+ .p2 = 4.7057889548236420e-02,
+ .k3 = 1.2045606388669163e+00,
+ },
+
+ .camera_matrix = {
+ .a11 = 1.0122924739235064e+04,
+ .a12 = 0,
+ .a13 = 5.4063808328357356e+02,
+ .a21 = 0,
+ .a22 = 1.0091265861649332e+04,
+ .a23 = 3.2225828876237193e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // CS-mount, focal length 2.8mm, aperture 1.6 (O3000_LCS_F2_8)
+ {
+ .dist_coeff = {
+ .k1 = -4.2767583407486480e+00,
+ .k2 = 2.6248731301034013e+01,
+ .p1 = 7.8609123258541538e-03,
+ .p2 = 3.5374054685996053e-03,
+ .k3 = -8.9935343886238059e+01,
+ },
+
+ .camera_matrix = {
+ .a11 = 2.6998000732890600e+03,
+ .a12 = 0,
+ .a13 = 6.3616455649992679e+02,
+ .a21 = 0,
+ .a22 = 2.6987125203839237e+03,
+ .a23 = 4.4895958452543323e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // CS-mount, focal length 4.2mm, aperture 1.4 (O3000_LCS_F4_2)
+ {
+ .dist_coeff = {
+ .k1 = -3.7570498088693711e+01,
+ .k2 = 1.5728357422468230e+03,
+ .p1 = 1.1791307984552163e-02,
+ .p2 = -1.3742991959700961e-02,
+ .k3 = 1.0475497983752284e+01,
+ },
+
+ .camera_matrix = {
+ .a11 = 9.9917306224860204e+03,
+ .a12 = 0,
+ .a13 = 6.5441343169200013e+02,
+ .a21 = 0,
+ .a22 = 9.9479425952720158e+03,
+ .a23 = 4.6795575668109700e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // CS-mount, focal length 6.0mm, aperture 1.4 (O3000_LCS_F6_0)
+ {
+ .dist_coeff = {
+ .k1 = -2.3964178081799389e+01,
+ .k2 = 4.4902969904416392e+02,
+ .p1 = 2.2481087999585000e-01,
+ .p2 = 1.1427760423539150e-01,
+ .k3 = 1.3202448608914709e+01,
+ },
+
+ .camera_matrix = {
+ .a11 = 1.0267898783331597e+04,
+ .a12 = 0,
+ .a13 = 5.9040975894428607e+02,
+ .a21 = 0,
+ .a22 = 1.0167762137245367e+04,
+ .a23 = 3.7217036432075685e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // CS-mount, focal length 8.0mm, aperture 1.4 (O3000_LCS_F8_0)
+ {
+ .dist_coeff = {
+ .k1 = -3.1323351826805144e+01,
+ .k2 = -8.8565542864692248e-01,
+ .p1 = 1.3154594427821961e-01,
+ .p2 = 1.3393386186128195e-01,
+ .k3 = -1.7372379469761756e-03,
+ },
+
+ .camera_matrix = {
+ .a11 = 1.6071195111825766e+04,
+ .a12 = 0,
+ .a13 = 5.9208178498651694e+02,
+ .a21 = 0,
+ .a22 = 1.6265935400534616e+04,
+ .a23 = 4.0867129284489448e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+
+ // CS-mount, focal length 12.0mm, aperture 1.4 (O3000_LCS_F12_0)
+ {
+ .dist_coeff = {
+ .k1 = -8.7854099735158311e+00,
+ .k2 = 3.0664687310188293e+02,
+ .p1 = -1.5840425493675159e-01,
+ .p2 = -2.4142181141228097e-02,
+ .k3 = 1.4519448386845686e+00,
+ },
+
+ .camera_matrix = {
+ .a11 = 1.2466587046030105e+04,
+ .a12 = 0,
+ .a13 = 6.9244116287526458e+02,
+ .a21 = 0,
+ .a22 = 1.2309699089674952e+04,
+ .a23 = 6.9766565927729926e+02,
+ .a31 = 0,
+ .a32 = 0,
+ .a33 = 1.0,
+ },
+ },
+};
+
+
+/**
+ * Color Correction Matrix for various ambient lights.
+ *
+ * How to get the color correction matrix (CCM):
+ *
+ * 1. Place a 24 patch Macbeth chart in a properly illuminated location. It's recommended to use a
+ * light booth with a normed color temperature (i. g. d65). Otherwise, you can do the
+ * calibration process during a cloudy day because the illument is about d65 (6500 K). Put
+ * the chart in the front of a window and switch off the room light.
+ * 2. Enable auto white balance and camera calibration (lense correction) algorithm. All other algorithms
+ * must be disabled.
+ * 3. Adjust image brightness and make sure that the lower left white patch has a value about 220.
+ * Use the XML-command brightness to reach the defined value.
+ * 4. Save the image and use the software SensorTune from Aptina to get the correction matrix.
+ */
+static const float ccm_presets[][3][3] = {
+
+ // CCM_PRESET_O3020
+ {
+ {1.7392, -0.7660, 0.1968},
+ {-0.2509, 1.5322, -0.1113},
+ {0.0840, -0.4782, 1.5641},
+ },
+};
+
+
+
+
+/**
+ * Initialize pipeline with reasonable default value.
+ *
+ * @param pipe Pointer to pipeline data.
+ */
+static void set_default_value(struct color_pipe_t *pipe) {
+
+ pipe->debayer_data.alg = BAYER_ALG_BILINEAR;
+ pipe->debayer_data.alg_new = pipe->debayer_data.alg;
+
+ pipe->awb_data.enable = 0;
+ pipe->awb_data.gray_threshold = 0.3;
+ pipe->awb_data.gray_threshold_new = pipe->awb_data.gray_threshold;
+ pipe->awb_data.ctrl_k = 0.01;
+ pipe->awb_data.ctrl_k_new = pipe->awb_data.ctrl_k;
+ pipe->awb_data.gain_red = 1.0;
+ pipe->awb_data.gain_blue = 1.0;
+
+ pipe->cam_calib_data.enable = 0;
+ pipe->cam_calib_data.lense = O3000_LS_F2_8;
+ pipe->cam_calib_data.lense_new = pipe->cam_calib_data.lense;
+ memcpy(&(pipe->cam_calib_data.dist_coeff), &o3000_lens_coeffs[pipe->cam_calib_data.lense].dist_coeff, sizeof(struct dist_coeff_t));
+ memcpy(&(pipe->cam_calib_data.camera_matrix), &o3000_lens_coeffs[pipe->cam_calib_data.lense].camera_matrix, sizeof(struct camera_matrix_t));
+ pipe->cam_calib_data.undistort_map_init = 0;
+
+ pipe->color_calib_data.enable = 0;
+ pipe->color_calib_data.ccm = CCM_PRESET_O3020;
+ pipe->color_calib_data.ccm_new = pipe->color_calib_data.ccm;
+ memcpy(pipe->color_calib_data.a, ccm_presets[pipe->color_calib_data.ccm], sizeof(pipe->color_calib_data.a));
+
+ pipe->sharp_data.enable = 0;
+ pipe->sharp_data.sharp_factor = 5.0;
+ pipe->sharp_data.sharp_factor_new = pipe->sharp_data.sharp_factor;
+ pipe->sharp_data.sharp_alg = SHARP_ALG_LOCAL;
+ pipe->sharp_data.sharp_alg_new = pipe->sharp_data.sharp_alg;
+ pipe->sharp_data.local_sens = 94.0;
+ pipe->sharp_data.local_sens_new = pipe->sharp_data.local_sens_new;
+
+ pipe->gamma_data.enable = 0;
+ pipe->gamma_data.gamma = 1.2;
+ pipe->gamma_data.gamma_new = pipe->gamma_data.gamma;
+}
+
+
+/**
+ * Free aligned memory.
+ *
+ * @param buf Pointer to aligned memory to be freed
+ */
+static void do_aligned_free(void *buf) {
+ if(buf != NULL) {
+ ALIGNED_FREE(buf);
+ }
+}
+
+
+/**
+ * Allocate aligned memory.
+ *
+ * @param alignment aligment size in bytes like 8, 16, 32
+ * @param size size in bytes to allocate
+ * @param func for debugging purposes do specify the calling function name
+ * @param line for debugging purposes do specify the line number from calling this function
+ * @return Pointer to aligned allocated memory or NULL on error
+ */
+static void *do_aligned_alloc(size_t alignment, size_t size, const char *func, int line) {
+ void *mem;
+
+ // The image size must be a multiple of the alignment size.
+ if((size % alignment) != 0) {
+ size = ((size/alignment)+1)*alignment;
+ }
+
+ mem = ALIGNED_ALLOC(alignment, size);
+ if(mem == NULL) {
+ printf("%s: aligned_alloc() line %d failed: %s\n", func, line, strerror(errno));
+ return NULL;
+ }
+ return mem;
+}
+
+
+/**
+ * Process raw image at color pipeline.
+ *
+ *
+ * @param color_pipe Pointer to pipeline data.
+ * @param img_buf raw input image
+ * @param img_header image header @see o3000.h
+ */
+void __stdcall color_pipe_process(struct color_pipe_t *__restrict__ color_pipe,
+ void *__restrict__ img_buf,
+ struct img_header_t *__restrict__ img_header) {
+
+ int height, width, bit_channel, is_color;
+ int header_version;
+ enum enumBayerPattern_t bayer_patter;
+ enum enumDataFormat_t raw_format;
+ void *img_out;
+ enum o3000_lenses_t lens_type;
+ enum ccm_preset_t ccm_type;
+
+
+ /*
+ * Extract image header information.
+ */
+ raw_format = (enum enumDataFormat_t) (img_header->format);
+ width = img_header->width;
+ height = img_header->height;
+ bayer_patter = (enum enumBayerPattern_t) (img_header->bayer_pattern);
+ header_version = img_header->version;
+
+ // set bit per pixel
+ if(raw_format == DF_RAW_MONO_8 || raw_format == DF_RAW_BAYER_8) {
+ bit_channel = 8;
+ }
+ else {
+ bit_channel = 12;
+ }
+
+ // set flag to indicate mono or color image
+ if(raw_format == DF_RAW_MONO_8 || raw_format == DF_RAW_MONO_12 || raw_format == DF_HDR_MONO_20_COMP) {
+ is_color = 0;
+ }
+ else {
+ is_color = 1;
+ }
+
+ // set output image to raw image
+ img_out = img_buf;
+
+ /*
+ * Pipeline stage: Demosaicing
+ */
+ if(is_color) {
+ color_pipe->debayer_data.img_raw = img_buf;
+ color_pipe->debayer_data.height = height;
+ color_pipe->debayer_data.width = width;
+ color_pipe->debayer_data.format = raw_format;
+ color_pipe->debayer_data.start_pattern = bayer_patter;
+ debayer(&(color_pipe->debayer_data));
+
+ img_out = color_pipe->debayer_data.img_rgb;
+ }
+
+
+ /*
+ * Pipeline stage: White-Balancing
+ */
+ if(color_pipe->awb_data.enable && is_color) {
+
+ // reset color gains if gray threshold or proportional factor have changed
+ if( color_pipe->awb_data.ctrl_k != color_pipe->awb_data.ctrl_k_new ||
+ color_pipe->awb_data.gray_threshold != color_pipe->awb_data.gray_threshold_new) {
+ color_pipe->awb_data.gain_red = 1;
+ color_pipe->awb_data.gain_blue = 1;
+
+ }
+
+ // apply user parameter (double buffered)
+ color_pipe->awb_data.ctrl_k = color_pipe->awb_data.ctrl_k_new;
+ color_pipe->awb_data.gray_threshold = color_pipe->awb_data.gray_threshold_new;
+
+
+
+ color_pipe->awb_data.img_in = img_out;
+ color_pipe->awb_data.bit_channel = bit_channel;
+ color_pipe->awb_data.height = height;
+ color_pipe->awb_data.width = width;
+ white_balance(&(color_pipe->awb_data));
+
+ img_out = color_pipe->awb_data.img_rgb_balanced;
+ }
+ else {
+ // always reset color gain if stage is disabled
+ color_pipe->awb_data.gain_red = 1;
+ color_pipe->awb_data.gain_blue = 1;
+ }
+
+
+ /*
+ * Pipeline stage: Camera calibration
+ */
+ if(color_pipe->cam_calib_data.enable) {
+
+ // apply user parameter (double buffered)
+ lens_type = color_pipe->cam_calib_data.lense_new;
+ if(color_pipe->cam_calib_data.lense != lens_type) {
+ color_pipe->cam_calib_data.lense = lens_type;
+ memcpy(&(color_pipe->cam_calib_data.dist_coeff), &o3000_lens_coeffs[lens_type].dist_coeff, sizeof(struct dist_coeff_t));
+ memcpy(&(color_pipe->cam_calib_data.camera_matrix), &o3000_lens_coeffs[lens_type].camera_matrix, sizeof(struct camera_matrix_t));
+ }
+
+ color_pipe->cam_calib_data.img_in = img_out;
+ color_pipe->cam_calib_data.is_color = is_color;
+
+ // reninit undistortion map if image format or resolution have changed
+ if( color_pipe->cam_calib_data.bit_channel != bit_channel ||
+ color_pipe->cam_calib_data.tot_width != width ||
+ color_pipe->cam_calib_data.tot_height != height) {
+
+ color_pipe->cam_calib_data.undistort_map_init = 0;
+ }
+
+
+ color_pipe->cam_calib_data.bit_channel = bit_channel;
+ color_pipe->cam_calib_data.tot_width = width;
+ color_pipe->cam_calib_data.tot_height = height;
+
+ // field-of-view available since O-3000 image header version 4
+ if(header_version >= 4) {
+ // reninit undistortion map if field-of-view has changed
+ if( color_pipe->cam_calib_data.fov_x_start != img_header->fov_x_start ||
+ color_pipe->cam_calib_data.fov_x_end != img_header->fov_x_end ||
+ color_pipe->cam_calib_data.fov_y_start != img_header->fov_y_start ||
+ color_pipe->cam_calib_data.fov_y_end != img_header->fov_y_end) {
+
+ color_pipe->cam_calib_data.undistort_map_init = 0;
+ }
+ color_pipe->cam_calib_data.fov_x_start = img_header->fov_x_start;
+ color_pipe->cam_calib_data.fov_x_end = img_header->fov_x_end;
+ color_pipe->cam_calib_data.fov_y_start = img_header->fov_y_start;
+ color_pipe->cam_calib_data.fov_y_end = img_header->fov_y_end;
+ }
+ else {
+ // assume that image is displayed without ROI (region-of-interest)
+ color_pipe->cam_calib_data.fov_x_start = 0;
+ color_pipe->cam_calib_data.fov_x_end = width-1;
+ color_pipe->cam_calib_data.fov_y_start = 0;
+ color_pipe->cam_calib_data.fov_y_end = height-1;
+ }
+ camera_calib(&(color_pipe->cam_calib_data));
+
+ img_out = color_pipe->cam_calib_data.img_calib;
+ }
+
+
+ /*
+ * Pipeline stage: Color Correction
+ */
+ if(color_pipe->color_calib_data.enable && is_color) {
+
+ // apply user parameter (double buffered)
+ ccm_type = color_pipe->color_calib_data.ccm_new;
+ if(color_pipe->color_calib_data.ccm != ccm_type) {
+ color_pipe->color_calib_data.ccm = ccm_type;
+ memcpy(color_pipe->color_calib_data.a, ccm_presets[ccm_type], sizeof(color_pipe->color_calib_data.a));
+ }
+
+ color_pipe->color_calib_data.img_in = img_out;
+ color_pipe->color_calib_data.bit_channel = bit_channel;
+ color_pipe->color_calib_data.width = width;
+ color_pipe->color_calib_data.height = height;
+ color_calib(&(color_pipe->color_calib_data));
+
+ img_out = color_pipe->color_calib_data.img_calib;
+ }
+
+
+ /*
+ * Pipeline stage: Image sharpening
+ */
+ if(color_pipe->sharp_data.enable) {
+
+ // apply user parameter (double buffered)
+ color_pipe->sharp_data.sharp_factor = color_pipe->sharp_data.sharp_factor_new;
+ color_pipe->sharp_data.sharp_alg = color_pipe->sharp_data.sharp_alg_new;
+ color_pipe->sharp_data.local_sens = color_pipe->sharp_data.local_sens_new;
+
+ color_pipe->sharp_data.img_in = img_out;
+ color_pipe->sharp_data.is_color = is_color;
+ color_pipe->sharp_data.bit_channel = bit_channel;
+ color_pipe->sharp_data.width = width;
+ color_pipe->sharp_data.height = height;
+ sharpening(&(color_pipe->sharp_data));
+
+ img_out = color_pipe->sharp_data.img_sharp;
+ }
+
+
+ /*
+ * Pipeline stage: Gamma correction
+ */
+ if(color_pipe->gamma_data.enable) {
+
+ // apply user parameter (double buffered)
+ color_pipe->gamma_data.gamma = color_pipe->gamma_data.gamma_new;
+
+ color_pipe->gamma_data.img_in = img_out;
+ color_pipe->gamma_data.is_color = is_color;
+ color_pipe->gamma_data.bit_channel = bit_channel;
+ color_pipe->gamma_data.width = width;
+ color_pipe->gamma_data.height = height;
+ gamma_corr(&(color_pipe->gamma_data));
+
+ img_out = color_pipe->gamma_data.img_gamma;
+ }
+
+
+ /*
+ * Return processed image depending on active pipeline stages.
+ */
+ color_pipe->img_out = img_out;
+ color_pipe->is_color = is_color;
+ color_pipe->bit_channel = bit_channel;
+ color_pipe->width = width;
+ color_pipe->height = height;
+}
+
+
+/**
+ * Pipeline stage configuration: Auto-White-Balancing
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param alg demosaicing algorithm type
+ */
+void __stdcall color_pipe_stageconf_debayer(struct color_pipe_t *color_pipe, enum bayer_alg_t alg) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+ color_pipe->debayer_data.alg_new = alg;
+}
+
+
+/**
+ * Pipeline stage configuration: Auto-White-Balancing
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param gray_threshold gray threshold (default 0.3)
+ * @param ctrl_gain gray threshold (default 0.01)
+ */
+void __stdcall color_pipe_stageconf_awb(struct color_pipe_t *color_pipe, int enable, float gray_threshold, float ctrl_gain) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+
+ color_pipe->awb_data.enable = enable;
+ color_pipe->awb_data.gray_threshold_new = gray_threshold;
+ color_pipe->awb_data.ctrl_k_new = ctrl_gain;
+}
+
+
+/**
+ * Pipeline stage configuration: Camera Calibration
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param lense initialize pipeline stage with given lense type
+ */
+void __stdcall color_pipe_stageconf_cam_calib(struct color_pipe_t *color_pipe, int enable, enum o3000_lenses_t lense) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+
+ // range check
+ if(lense < 0 || lense >= (sizeof(o3000_lens_coeffs)/sizeof(struct o3000_lens_coeffs_t))) {
+ printf("%s: Invalid lense type %d\n", __func__, lense);
+ return;
+ }
+
+ color_pipe->cam_calib_data.enable = enable;
+ color_pipe->cam_calib_data.lense_new = lense;
+ color_pipe->cam_calib_data.undistort_map_init = 0;
+}
+
+
+/**
+ * Pipeline stage configuration: Color Calibration
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param ccm_preset initialize pipeline stage with given color correction preset data
+ */
+void __stdcall color_pipe_stageconf_color_calib(struct color_pipe_t *color_pipe, int enable,
+ enum ccm_preset_t ccm_preset) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+
+ // range check
+ if(ccm_preset < 0 || ccm_preset >= (sizeof(ccm_presets)/sizeof(ccm_presets[0]))) {
+ printf("%s: Invalid color type %d\n", __func__, ccm_preset);
+ return;
+ }
+
+ color_pipe->color_calib_data.enable = enable;
+ color_pipe->color_calib_data.ccm_new = ccm_preset;
+}
+
+
+/**
+ * Pipeline stage configuration: Sharpening
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param factor sharpening factor (default 5.0)
+ * @param alg algorithm type
+ * @param sens sensitivity (default 94.0)
+ */
+void __stdcall color_pipe_stageconf_sharp(struct color_pipe_t *color_pipe, int enable,
+ float factor, enum sharp_alg_t alg, float sens) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+ color_pipe->sharp_data.enable = enable;
+ color_pipe->sharp_data.sharp_factor_new = factor;
+ color_pipe->sharp_data.sharp_alg_new = alg;
+ color_pipe->sharp_data.local_sens_new = sens;
+}
+
+
+/**
+ * Pipeline stage configuration: Gamma Correction
+ *
+ * @param color_pipe Pointer to pipeline context
+ * @param enable not 0: enable, 0: disable
+ * @param gamma gamma factor (1.0 means no gamma correction, default 1.2)
+ */
+void __stdcall color_pipe_stageconf_gamma(struct color_pipe_t *color_pipe, int enable, float gamma) {
+
+ // paranoia
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline pointer is NULL!\n", __func__);
+ return;
+ }
+ color_pipe->gamma_data.enable = enable;
+ color_pipe->gamma_data.gamma_new = gamma;
+}
+
+
+/**
+ * Open color image processing pipeline.
+ * This function allocates memory for various pipe algorithm. The pipeline is set up for a maximum possible image size defined
+ * by the height, width and bitdepth per color channel.
+ *
+ * NOTE
+ * This function uses dynamic memory allocation. If the pipeline isn't use anymore do close it by calling @ref color_pipe_close.
+ *
+ * @param color_pipe On return: Pointer to pipeline data. Dynamic memory is allocated.
+ * @param max_img_height maximum possible image height in number of pixels
+ * @param max_img_width maximum possible image width in number of pixels
+ * @param bits_per_channel maximum possible number of bits per color channel
+ * @return 0 on success, -1 on error
+ */
+int __stdcall color_pipe_open(struct color_pipe_t **color_pipe, const int max_img_height, const int max_img_width,
+ const int bits_per_channel) {
+
+ int byte_per_pixel, max_img_size, max_img_size_yuv, max_img_size_binary;
+ struct color_pipe_t *data;
+
+ if(color_pipe == NULL) {
+ printf("%s: Pipeline data pointer is NULL!\n", __func__);
+ return -1;
+ }
+
+ data = calloc(1, sizeof(struct color_pipe_t));
+ if(data == NULL) {
+ PRINTF_ERRNO("calloc");
+ return -1;
+ }
+
+
+ /*
+ * Calculate the number of bytes per pixel are used for a color image.
+ * Always, a color image has 3 channels with the given bit-depth max_img_bpp.
+ *
+ * e. g. 8 bits-per-channel results to 3 byte per pixel
+ * 12 bits-per-channel results to 6 byte per pixel
+ */
+ if((bits_per_channel%8) == 0) {
+ byte_per_pixel = bits_per_channel/8;
+ }
+ else {
+ byte_per_pixel = bits_per_channel/8 + 1;
+ }
+ byte_per_pixel *= 3;
+
+
+ /*
+ * Do calculate the maximum possible image size.
+ */
+ max_img_size = max_img_height*max_img_width*byte_per_pixel;
+
+
+ /*
+ * The YUV image uses 16 bit-per-channel always.
+ */
+ max_img_size_yuv = max_img_height*max_img_width*3*2;
+
+
+ /*
+ * The binary image uses 8 bit-per-channel always.
+ */
+ max_img_size_binary = max_img_height*max_img_width*3;
+
+
+ /*
+ * Important note for dynamic memory allocation:
+ * Various pipeline algorithms are using SIMD instructions like SSE2 (128 bit register) and AVX (256 bit registers). Therfore any
+ * image buffer is allocated to a 32-byte boundary. Using SIMD instructions on a unaligned buffer may generate a general-protection exception.
+ */
+
+ // allocate memory for demosaicing algorithm
+ data->debayer_data.img_rgb = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->debayer_data.img_rgb == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // allocate memory for auto white balancing algorithm
+ data->awb_data.img_rgb_balanced = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->awb_data.img_rgb_balanced == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->awb_data.img_yuv = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
+ if(data->awb_data.img_yuv == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // allocate memory for camera calibration algorithm
+ data->cam_calib_data.img_calib = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->cam_calib_data.img_calib == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->cam_calib_data.calib_map = do_aligned_alloc(ALIGNMENT_SIZE,
+ sizeof(struct lense_undistort_coord_t)*max_img_height*max_img_width,
+ __func__, __LINE__-1);
+ if(data->cam_calib_data.calib_map == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // allocate memory for color calibration algorithm
+ data->color_calib_data.img_calib = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->color_calib_data.img_calib == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // allocate memory for sharpening algorithm
+ data->sharp_data.img_sharp = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->sharp_data.img_sharp == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->sharp_data.img_yuv = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
+ if(data->sharp_data.img_yuv == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->sharp_data.img_yuv_sharp = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
+ if(data->sharp_data.img_yuv_sharp == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->sharp_data.img_sobel = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
+ if(data->sharp_data.img_sobel == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->sharp_data.img_gauss = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
+ if(data->sharp_data.img_gauss == NULL) {
+ goto _pipe_open_abort;
+ }
+ data->sharp_data.sharp_mask = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_binary, __func__, __LINE__-1);
+ if(data->sharp_data.sharp_mask == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // allocate memory for gamma correction algorithm
+ data->gamma_data.img_gamma = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
+ if(data->gamma_data.img_gamma == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ // Lookup table size depends on bits per color channel.
+ data->gamma_data.gamma_table = do_aligned_alloc(ALIGNMENT_SIZE, (1<<bits_per_channel)*sizeof(int), __func__, __LINE__-1);
+ if(data->gamma_data.gamma_table == NULL) {
+ goto _pipe_open_abort;
+ }
+
+ set_default_value(data);
+ *color_pipe = data;
+
+ // detect CPU features
+#if (WITH_SIMD == 1)
+ if(cpu_feature_init()) {
+ printf("%s: Detecting CPU features failed\n", __func__);
+ }
+#endif // WITH_SIMD
+ return 0;
+
+_pipe_open_abort:
+ color_pipe_close(data);
+ return -1;
+}
+
+
+/**
+ * Close color image processing pipeline.
+ * This function cleans up the pipeline and is freeing the used memory.
+ *
+ * @param data Pointer to pipeline data
+ */
+int __stdcall color_pipe_close(struct color_pipe_t *data) {
+
+ if(data == NULL) {
+ printf("%s: Pipeline data pointer is NULL!\n", __func__);
+ return -1;
+ }
+
+ // free various image buffers
+ do_aligned_free(data->debayer_data.img_rgb);
+ do_aligned_free(data->awb_data.img_rgb_balanced);
+ do_aligned_free(data->awb_data.img_yuv);
+ do_aligned_free(data->cam_calib_data.img_calib);
+ do_aligned_free(data->cam_calib_data.calib_map);
+ do_aligned_free(data->color_calib_data.img_calib);
+ do_aligned_free(data->sharp_data.img_sharp);
+ do_aligned_free(data->sharp_data.img_yuv);
+ do_aligned_free(data->sharp_data.img_yuv_sharp);
+ do_aligned_free(data->sharp_data.img_sobel);
+ do_aligned_free(data->sharp_data.img_gauss);
+ do_aligned_free(data->sharp_data.sharp_mask);
+ do_aligned_free(data->gamma_data.img_gamma);
+ do_aligned_free(data->gamma_data.gamma_table);
+
+ // free various image buffers
+ free(data);
+ return 0;
+}
diff --git a/color_pipe.h b/color_pipe.h
new file mode 100644
index 0000000..9dfcdc0
--- /dev/null
+++ b/color_pipe.h
@@ -0,0 +1,328 @@
+/**
+* @file color_pipe.h
+* @brief Color Processing Pipeline Definitions
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2016-03-01
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#ifndef _COLOR_PIPE_H
+#define _COLOR_PIPE_H
+
+
+#include <o3000/o3000_portable.h>
+#include <o3000/image_header.h>
+
+
+/**
+ * library version
+ */
+#define COLOR_PIPE_VERSION "1.0.0"
+
+
+
+
+/**
+ * demosaicing algorithm definition
+ */
+enum bayer_alg_t {
+ BAYER_ALG_BILINEAR = 0, ///< bilinear interpolation
+};
+
+
+/**
+ * Demosaicing (debayer) data structure
+ * This algorithm is enabled always.
+ */
+struct debayer_data_t {
+ void *img_rgb; ///< RGB output image. This buffer must be allocated externly.
+ void *img_raw; ///< Pointer to input image containing raw bayer image.
+ int height; ///< image height in number of pixels
+ int width; ///< image width in number of pixels
+ enum enumDataFormat_t format; ///< data format of input image
+ enum enumBayerPattern_t start_pattern; ///< first pixel starts with this bayer pattern
+ enum bayer_alg_t alg; ///< debayer algorithm type
+ enum bayer_alg_t alg_new; ///< this debayer algorithm type is changed by API call (use double buffering concept)
+};
+
+
+/**
+ * automatic white balance (AWB) data structure
+ */
+struct awb_data_t {
+ int enable; ///< flag to enable this algorithm
+ void *img_rgb_balanced; ///< White balanced RGB output image. This buffer must be allocated externly.
+ void *img_in; ///< Unbalanced RGB input image.
+ int bit_channel; ///< Bits per color channel.
+ int height; ///< image height in number of pixels
+ int width; ///< image width in number of pixels
+ int16_t *img_yuv; ///< Image buffer holding YUV image. This buffer must be allocated externly.
+ float gray_threshold; ///< gray value threshold
+ float gray_threshold_new; ///< this gray value threshold is changed by API call (use double buffering concept)
+ float ctrl_k; ///< controller gain
+ float ctrl_k_new; ///< this controller gain is changed by API call (use double buffering concept)
+ float gain_red; ///< red color gain
+ float gain_blue; ///< blue color gain
+};
+
+
+
+/**
+ * camera distortion coefficient definition
+ */
+struct dist_coeff_t {
+ float k1; ///< radial distortion factor k1
+ float k2; ///< radial distortion factor k2
+ float p1; ///< tangential distortion factor p1
+ float p2; ///< tangential distortion factor p2
+ float k3; ///< radial distortion factor k3
+};
+
+
+/**
+ * camera matrix definition
+ */
+struct camera_matrix_t {
+ float a11; ///< fx: focal length in x-direction
+ float a12; ///< skew: axis skew
+ float a13; ///< cx: optical center in x-direction
+ float a21; ///< must be 0.0
+ float a22; ///< focal length in y-direction
+ float a23; ///< optical center in y-direction
+ float a31; ///< must be 0.0
+ float a32; ///< must be 0.0
+ float a33; ///< must be 1.0
+};
+
+
+/**
+ * O-3000 lense definitions supplied by Stettbacher Signal Processing.
+ * Lense definitions are used to load lens specific distortion coefficients.
+ */
+enum o3000_lenses_t {
+
+ // S-mount lenses
+ O3000_LS_F2_8 = 0, ///< S-mount, focal length 2.8mm, aperture 2.0
+ O3000_LS_F4_2, ///< S-mount, focal length 4.2mm, aperture 1.8
+ O3000_LS_F6_0, ///< S-mount, focal length 6.0mm, aperture 1.8
+ O3000_LS_F8_0, ///< S-mount, focal length 8.0mm, aperture 1.8
+ O3000_LS_F12_0, ///< S-mount, focal length 12.0mm, aperture 1.8
+
+ // CS-mount lenses
+ O3000_LCS_F2_8, ///< CS-mount, focal length 2.8mm, aperture 1.6
+ O3000_LCS_F4_2, ///< CS-mount, focal length 4.2mm, aperture 1.4
+ O3000_LCS_F6_0, ///< CS-mount, focal length 6.0mm, aperture 1.4
+ O3000_LCS_F8_0, ///< CS-mount, focal length 8.0mm, aperture 1.4
+ O3000_LCS_F12_0, ///< CS-mount, focal length 12.0mm, aperture 1.4
+};
+
+
+/**
+ * Lense undistortion coordinate definition.
+ * This coordinate pair defines the undistorted pixel location.
+ *
+ * NOTE The pixel location may lay between a pixel pair. Therefore this coordinates are
+ * scaled by a defined factor defined at @ref cam_calib_data_t.
+ */
+struct lense_undistort_coord_t {
+ int coord_x; ///< x-coordinate of calibrated pixel
+ int coord_y; ///< y-coordinate of calibrated pixel
+};
+
+
+/**
+ * camera calibration structure
+ *
+ * The total width @ref tot_width and height @ref tot_height define the image size take during calibration processs.
+ * The field of view defines the current image windows. This window is less or equal to the total image size.
+ */
+struct cam_calib_data_t {
+ int enable; ///< flag to enable this algorithm
+ void *img_calib; ///< Undistorted (calibrated) output image. This buffer must be allocated externly.
+ void *img_in; ///< Uncalibrated distorted input image.
+ int is_color; ///< Not 0 if it's a color image.
+ int bit_channel; ///< Bits per color channel.
+ int tot_width; ///< total image width in pixels used during calibration process
+ int tot_height; ///< total image height in pixels used during calibration process
+ int fov_x_start; ///< field of view start pixel coordinate in x-direction
+ int fov_x_end; ///< field of view end pixel coordinate in x-direction
+ int fov_y_start; ///< field of view start pixel coordinate in y-direction
+ int fov_y_end; ///< field of view end pixel coordinate in y-direction
+ struct dist_coeff_t dist_coeff; ///< distortion coefficients
+ struct camera_matrix_t camera_matrix; ///< camera matrix
+ enum o3000_lenses_t lense; ///< lense type
+ enum o3000_lenses_t lense_new; ///< lense type is changed by API call (use double buffering concept)
+ int undistort_map_init; ///< flag indicating if lens undistortion map is initialized
+ struct lense_undistort_coord_t *calib_map; ///< Lense undistortion map. This buffer must be allocated externly.
+ int calib_map_scale_fact; ///< Bit shifts applied on undistortion map.
+};
+
+
+/**
+ * Color Correction Matrix presets (CCM) for various camera types
+ */
+enum ccm_preset_t {
+ CCM_PRESET_O3020 = 0, ///< O-3020 camera from Stettbacher Signal Processing
+};
+
+
+/**
+ * color calibration structure definition
+ *
+ *
+ * The color correction matrix A maps the uncalibrated image (img_uncal) to the
+ * calibrated image (img_calib) according to the following transformation:
+ *
+ * img_calib = A * img_uncal
+ *
+ * where
+ *
+ * | a11 a12 a13 |
+ * A = | a21 a22 a23 |
+ * | a31 a32 a33 |
+ *
+ *
+ * | red_uncal |
+ * img_uncal = | green_uncal |
+ * | blue_uncal |
+ *
+ * | red_calib |
+ * img_calib = | green_calib |
+ * | blue_calib |
+ *
+ *
+ * red_calib = a11*red_uncal + a12*green_uncal + a12*blue_uncal
+ * green_calib = a21*red_uncal + a22*green_uncal + a22*blue_uncal
+ * blue_calib = a31*red_uncal + a32*green_uncal + a32*blue_uncal
+ *
+ */
+struct color_calib_data_t {
+ int enable; ///< flag to enable this algorithm
+ void *img_calib; ///< Color calibrated output image. This buffer must be allocated externly.
+ void *img_in; ///< Uncalibrated input image
+ int bit_channel; ///< bits per color channel
+ int width; ///< image width in number of pixels
+ int height; ///< image height in number of pixels
+ float a[3][3]; ///< 3x3 color correction matrix
+ enum ccm_preset_t ccm; ///< color correction matrix type loaded
+ enum ccm_preset_t ccm_new; ///< this color correction matrix type is changed by API call (use double buffering concept)
+};
+
+
+/**
+ * Sharpening algorithm definition
+ */
+enum sharp_alg_t {
+ SHARP_ALG_GLOBAL = 0, ///< global sharpening algorithm
+ SHARP_ALG_LOCAL, ///< local sharpening algorithm
+};
+
+
+/**
+ * Sharpening data definition
+ */
+struct sharp_data_t {
+ int enable; ///< flag to enable this algorithm
+ void *img_sharp; ///< Sharpened output RGB image. This buffer must be allocated externly.
+ void *img_in; ///< Unsharp RGB input image.
+ int is_color; ///< Not 0 if it's a color image
+ int bit_channel; ///< Bits per color channel.
+ int width; ///< image width in number of pixels
+ int height; ///< image height in number of pixels
+ int16_t *img_yuv; ///< YUV image buffer. This buffer must be allocated externly.
+ float sharp_factor; ///< Sharpening factor: As higher as stronger sharpening is done.
+ float sharp_factor_new; ///< this sharpening factor is changed by API call (use double buffering concept)
+ enum sharp_alg_t sharp_alg; ///< sharpening algorithm type
+ enum sharp_alg_t sharp_alg_new; ///< this algorithm type is changed by API call (use double buffering concept)
+ float local_sens; ///< Sensitivity setting of local sharpening algorithm in per cent. 100 % means everything is sharpened like the global algorithm does
+ float local_sens_new; ///< this sensitivity setting is changed by API call (use double buffering concept)
+ int16_t *img_yuv_sharp; ///< YUV image buffer holding sharpened Y-channel. This buffer must be allocated externly.
+ int16_t *img_sobel; ///< Sobel image in YUV color space used by local sharpening algorithm. This buffer must be allocated externly.
+ int16_t *img_gauss; ///< Gaussian low-pass filtered image in YUV color space used by local sharpening algorithm. This buffer must be allocated externly.
+ int8_t *sharp_mask; ///< Binary sharpening mask image used by local sharpening algorithm. This buffer must be allocated externly.
+};
+
+
+/**
+ * Gamma correction data definition
+ */
+struct gamma_data_t {
+ int enable; ///< flag to enable this algorithm
+ void *img_gamma; ///< Gamma corrected output image. This buffer must be allocated externly.
+ void *img_in; ///< Input image.
+ int is_color; ///< Not 0 if it's a color image
+ int bit_channel; ///< Bits per color channel.
+ int width; ///< image width in number of pixels
+ int height; ///< image height in number of pixels
+ float gamma; ///< gamma coefficient
+ float gamma_new; ///< this gamma coefficient is changed by API call (use double buffering concept)
+ int gamma_table_bitdepth; ///< gamma table is initialized with this bit-depth
+ float gamma_table_init; ///< gamma table is initialized with this coefficient
+ int *gamma_table; ///< Lookup table containg gamma corrected value. This buffer must be allocated externly.
+};
+
+
+/**
+ * Color pipe definition structure holding all memory data from different pipeline
+ * stages. This structure and image buffers are allocated dynamically.
+ */
+struct color_pipe_t {
+
+ void *img_out; ///< Points to processed output image in RGB format.
+ int is_color; ///< Not 0 if it's a color image
+ int bit_channel; ///< Bits per color channel.
+ int width; ///< image width in number of pixels
+ int height; ///< image height in number of pixels
+
+ struct debayer_data_t debayer_data; ///< demosaicing data
+ struct awb_data_t awb_data; ///< auto white balancing data
+ struct cam_calib_data_t cam_calib_data; ///< camera calibration data used for lens distortion correction
+ struct color_calib_data_t color_calib_data; ///< color calibration data used for color corretion
+ struct sharp_data_t sharp_data; ///< image sharpening data
+ struct gamma_data_t gamma_data; ///< gamma correction data
+};
+
+
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+void __stdcall color_pipe_process(struct color_pipe_t *color_pipe, void *img_buf, struct img_header_t *img_header);
+int __stdcall color_pipe_open(struct color_pipe_t **color_pipe, const int max_img_height, const int max_img_width, const int bits_per_channel);
+int __stdcall color_pipe_close(struct color_pipe_t *data);
+
+void __stdcall color_pipe_stageconf_debayer(struct color_pipe_t *color_pipe, enum bayer_alg_t alg);
+void __stdcall color_pipe_stageconf_awb(struct color_pipe_t *color_pipe, int enable, float gray_threshold, float ctrl_gain);
+void __stdcall color_pipe_stageconf_cam_calib(struct color_pipe_t *color_pipe, int enable, enum o3000_lenses_t lense);
+void __stdcall color_pipe_stageconf_color_calib(struct color_pipe_t *color_pipe, int enable, enum ccm_preset_t ccm_preset);
+void __stdcall color_pipe_stageconf_sharp(struct color_pipe_t *color_pipe, int enable, float factor, enum sharp_alg_t alg, float sens);
+void __stdcall color_pipe_stageconf_gamma(struct color_pipe_t *color_pipe, int enable, float gamma);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+
+#endif // _COLOR_PIPE_H
diff --git a/color_pipe_private.h b/color_pipe_private.h
new file mode 100644
index 0000000..bf412fe
--- /dev/null
+++ b/color_pipe_private.h
@@ -0,0 +1,57 @@
+/**
+* @file color_pipe_private.c
+* @brief Color Processing Pipeline definitions for internal use only
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#ifndef _COLOR_PROC_PIPE_H
+#define _COLOR_PROC_PIPE_H
+
+#include <o3000/o3000_portable.h>
+#include <o3000/image_header.h>
+#include "color_pipe.h"
+#include "cpu_feature.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+int debayer(struct debayer_data_t *debayer_data);
+
+int white_balance(struct awb_data_t *awb_data);
+
+int camera_calib(struct cam_calib_data_t *data);
+
+int sharpening(struct sharp_data_t *sharp_data);
+
+int gamma_corr(struct gamma_data_t *gamma_data);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+
+#endif // _COLOR_PROC_PIPE_H
diff --git a/cpu_feature.c b/cpu_feature.c
new file mode 100644
index 0000000..62080a5
--- /dev/null
+++ b/cpu_feature.c
@@ -0,0 +1,122 @@
+/**
+* @file cpu_feature.c
+* @brief CPU feature detection (CPU ID)
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-11-12
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#if __x86_64__
+
+#include <stdio.h>
+#include <cpuid.h>
+#include <stdint.h>
+
+#include "cpu_feature.h"
+
+
+// level 1
+static int cpu_feature_sse;
+static int cpu_feature_sse2;
+static int cpu_feature_sse3;
+static int cpu_feature_ssse3;
+static int cpu_feature_fma;
+static int cpu_feature_avx;
+
+// level 7
+static int cpu_feature_avx2;
+
+
+/**
+ * Check whether given CPU feature is available or not.
+ *
+ * @param type CPU feature type to check
+ * @return not 0 if available, 0 if not
+ */
+int cpu_feature_support(enum cpu_feature_t type) {
+
+ /*
+ * Any CPU feature is disabled on 32 bit machine.
+ */
+ switch(type) {
+ case CPU_FEATURE_SSE: return cpu_feature_sse;
+ case CPU_FEATURE_SSE2: return cpu_feature_sse2;
+ case CPU_FEATURE_SSE3: return cpu_feature_sse3;
+ case CPU_FEATURE_SSSE3: return cpu_feature_ssse3;
+ case CPU_FEATURE_FMA: return cpu_feature_fma;
+ case CPU_FEATURE_AVX: return cpu_feature_avx;
+ case CPU_FEATURE_AVX2: return cpu_feature_avx2;
+ }
+ return 0;
+}
+
+
+/**
+ * Initialize CPU features detection like MMX, SSE, SSE2, AVX etc.
+ * This function read and handles the CPU IDs.
+ *
+ * @return 0 on success otherwise -1
+ */
+int cpu_feature_init(void) {
+
+ unsigned int cpuid_basic, /*cpuid_highest, */sig;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ cpuid_basic = __get_cpuid_max(0, &sig);
+ if(cpuid_basic == 0) {
+ printf("%s: Basic CPUID is not supported\n", __func__);
+ return -1;
+ }
+// cpuid_highest = __get_cpuid_max(0x8000000, NULL);
+
+ if(cpuid_basic >= 1) {
+ __cpuid_count(1, 0, eax, ebx, ecx, edx);
+
+ cpu_feature_sse = (edx & bit_SSE) != 0 ? 1:0;
+ cpu_feature_sse2 = (edx & bit_SSE2) != 0 ? 1:0;
+
+ cpu_feature_sse3 = (ecx & bit_SSE3) != 0 ? 1:0;
+ cpu_feature_ssse3 = (ecx & bit_SSSE3) != 0 ? 1:0;
+ cpu_feature_fma = (ecx & bit_FMA) != 0 ? 1:0;
+ cpu_feature_avx = (ecx & bit_AVX) != 0 ? 1:0;
+ }
+
+ if(cpuid_basic >= 7) {
+ __cpuid_count(7, 0, eax, ebx, ecx, edx);
+
+ cpu_feature_avx2 = (ebx & bit_AVX2) != 0 ? 1:0;
+ }
+
+// printf("%s: SSE %s\n", __func__, cpu_feature_sse != 0 ? "yes":"no");
+// printf("%s: SSE2 %s\n", __func__, cpu_feature_sse2 != 0 ? "yes":"no");
+// printf("%s: SSE3 %s\n", __func__, cpu_feature_sse3 != 0 ? "yes":"no");
+// printf("%s: SSSE3 %s\n", __func__, cpu_feature_ssse3 != 0 ? "yes":"no");
+// printf("%s: FMA %s\n", __func__, cpu_feature_fma != 0 ? "yes":"no");
+// printf("%s: AVX %s\n", __func__, cpu_feature_avx != 0 ? "yes":"no");
+// printf("%s: AVX2 %s\n", __func__, cpu_feature_avx2 != 0 ? "yes":"no");
+ return 0;
+}
+
+#endif // __x86_64__
diff --git a/cpu_feature.h b/cpu_feature.h
new file mode 100644
index 0000000..11b5008
--- /dev/null
+++ b/cpu_feature.h
@@ -0,0 +1,48 @@
+/**
+* @file cpu_feature.h
+* @brief CPU feature detection definitions (CPU ID)
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-11-12
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#ifndef _CPU_FEATURE_H
+#define _CPU_FEATURE_H
+
+/**
+ * CPU feature definitions
+ */
+enum cpu_feature_t {
+ CPU_FEATURE_SSE = 0,
+ CPU_FEATURE_SSE2,
+ CPU_FEATURE_SSE3,
+ CPU_FEATURE_SSSE3,
+ CPU_FEATURE_FMA,
+ CPU_FEATURE_AVX,
+ CPU_FEATURE_AVX2,
+};
+
+int cpu_feature_support(enum cpu_feature_t type);
+int cpu_feature_init(void);
+
+#endif // _CPU_FEATURE_H \ No newline at end of file
diff --git a/debayer.c b/debayer.c
new file mode 100644
index 0000000..1733496
--- /dev/null
+++ b/debayer.c
@@ -0,0 +1,243 @@
+/**
+* @file debayer.c
+* @brief demosaicing algorithms
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include "color_pipe_private.h"
+
+
+/**
+ * Return bayer pattern type of next line.
+ *
+ * @param pattern current pattern type
+ * @param offset_height height offset in number of lines
+ * @param offset_width width offset in numbers of rows
+ * @return pattern type of next line
+ */
+static inline enum enumBayerPattern_t getBayerType(enum enumBayerPattern_t pattern, int offset_height, int offset_width) {
+
+ int offset_y = offset_height%2;
+ int offset_x = offset_width%2;
+
+ if(offset_y == 0 && offset_x == 0) {
+ // do nothing
+ }
+ else if(offset_y == 0 && offset_x == 1) {
+ if(pattern == BP_GR) {
+ pattern = BP_RG;
+ }
+ else if(pattern == BP_RG) {
+ pattern = BP_GR;
+ }
+ else if(pattern == BP_BG) {
+ pattern = BP_GB;
+ }
+ else if(pattern == BP_GB) {
+ pattern = BP_BG;
+ }
+ }
+ else if(offset_y == 1 && offset_x == 0) {
+ if(pattern == BP_GR) {
+ pattern = BP_BG;
+ }
+ else if(pattern == BP_RG) {
+ pattern = BP_GB;
+ }
+ else if(pattern == BP_BG) {
+ pattern = BP_GR;
+ }
+ else if(pattern == BP_GB) {
+ pattern = BP_RG;
+ }
+ }
+ else if(offset_y == 1 && offset_x == 1) {
+ if(pattern == BP_GR) {
+ pattern = BP_GB;
+ }
+ else if(pattern == BP_RG) {
+ pattern = BP_BG;
+ }
+ else if(pattern == BP_BG) {
+ pattern = BP_RG;
+ }
+ else if(pattern == BP_GB) {
+ pattern = BP_GR;
+ }
+ }
+ return pattern;
+}
+
+
+/**
+ * Calculate RGB value depening on 9 pixel value from bayer pattern.
+ *
+ * @param p11 upper left pixel
+ * @param p12 upper center pixel
+ * @param p13 upper right pixel
+ * @param p21 center left pixel
+ * @param p22 center center pixel
+ * @param p23 center right pixel
+ * @param p31 lower left pixel
+ * @param p32 lower center pixel
+ * @param p33 lower right pixel
+ * @param pattern current bayer pattern type, this value is adjusted to next expected type after return
+ * @param red On return: red pixel value
+ * @param green On return: green pixel value
+ * @param blue On return: blue pixel value
+ */
+static inline void calc_bilinear(int p11, int p12, int p13,
+ int p21, int p22, int p23,
+ int p31, int p32, int p33,
+ enum enumBayerPattern_t *pattern,
+ int *red, int *green, int *blue) {
+
+ switch(*pattern) {
+ case BP_GR: // green-red-green-red-...
+ *red = (p21 + p23) >> 1;
+ *green = (p11 + p13 + p22*4 + p31 + p33) >> 3;
+ *blue = (p12 + p32) >> 1;
+ *pattern = BP_RG;
+ break;
+
+ case BP_RG: // red-green-red-green-...
+ *red = p22;
+ *green = (p12 + p21 + p23 + p32) >> 2;
+ *blue = (p11 + p13 + p31 + p33) >> 2;
+ *pattern = BP_GR;
+ break;
+
+ case BP_BG: // blue-green-blue-green-...
+ *red = (p11 + p13 + p31 + p33) >> 2;
+ *green = (p12 + p21 + p23 + p32) >> 2;
+ *blue = p22;
+ *pattern = BP_GB;
+ break;
+
+ case BP_GB: // green-blue-green-blue-...
+ *red = (p12 + p32) >> 1;
+ *green = (p11 + p13 + p22*4 + p31 + p33) >> 3;
+ *blue = (p21 + p23) >> 1;
+ *pattern = BP_BG;
+ break;
+ }
+}
+
+
+/**
+ * Bilinear demosaicing algorithm for RGB image (16 bit per color channel).
+ *
+ * @param img_rgb On return: RGB image in R-G-B order
+ * @param img_bayer bayer image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bayer_pattern first pixel starts with this pixel
+ * @return 0 on success otherwise -1
+ */
+static void bilinear16(uint16_t *img_rgb, const uint16_t *img_bayer, const int height, const int width, const enum enumBayerPattern_t bayer_pattern)
+#include "alg_debayer_bilinear.h"
+
+
+/**
+ * Bilinear demosaicing algorithm for RGB image (8 bit per color channel).
+ *
+ * @param img_rgb On return: RGB image in R-G-B order
+ * @param img_bayer bayer image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bayer_pattern first pixel starts with this pixel
+ * @return 0 on success otherwise -1
+ */
+static void bilinear8(uint8_t *img_rgb, const uint8_t *img_bayer, const int height, const int width, const enum enumBayerPattern_t bayer_pattern)
+#include "alg_debayer_bilinear.h"
+
+
+/**
+ * Bilinear demosaicing wrapper function used to select pixel-bit-depth dependet algrotihm.
+ *
+ * @param img_rgb On return: RGB image in R-G-B order
+ * @param img_bayer bayer image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param data_format image data format
+ * @param bayer_pattern first pixel starts with this pixel
+ * @return 0 on success otherwise -1
+ */
+static int bilinear_wrapper(void *img_rgb, const void *img_bayer, const int height, const int width,
+ const enum enumDataFormat_t data_format, const enum enumBayerPattern_t bayer_pattern) {
+ if(data_format == DF_RAW_BAYER_8) {
+ bilinear8(img_rgb, img_bayer, height, width, bayer_pattern);
+ }
+ else if(data_format == DF_RAW_BAYER_12 || data_format == DF_HDR_BAYER_20_COMP) {
+ bilinear16(img_rgb, img_bayer, height, width, bayer_pattern);
+ }
+ else {
+ printf("%s: image data format %d unknown\n", __func__, data_format);
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * Bayer to RGB image conversion algrithm (demosaicing)
+ *
+ * @param debayer_data required data for debayering
+ * @return 0 on success otherwise -1
+ */
+int debayer(struct debayer_data_t *debayer_data) {
+
+ void *img_rgb, *img_raw;
+ int ret, height, width;
+ enum enumDataFormat_t data_format;
+ enum enumBayerPattern_t bayer_pattern;
+ enum bayer_alg_t alg;
+
+ ret = 0;
+ img_rgb = debayer_data->img_rgb;
+ img_raw = debayer_data->img_raw;
+ height = debayer_data->height;
+ width = debayer_data->width;
+ data_format = debayer_data->format;
+ bayer_pattern = debayer_data->start_pattern;
+ alg = debayer_data->alg;
+
+ if(data_format == DF_RAW_MONO_8 || data_format == DF_RAW_MONO_12 || data_format == DF_HDR_MONO_20_COMP) {
+ printf("%s: No debayering is done on monochrome image (image data format %d)\n", __func__, data_format);
+ return -1;
+ }
+
+ switch(alg) {
+ case BAYER_ALG_BILINEAR:
+ ret = bilinear_wrapper(img_rgb, img_raw, height, width, data_format, bayer_pattern);
+ break;
+
+ default:
+ printf("%s: Debayer algorithm %d not implemented yet\n", __func__, alg);
+ ret = -1;
+ }
+ return ret;
+} \ No newline at end of file
diff --git a/filter.c b/filter.c
new file mode 100644
index 0000000..6c8d51a
--- /dev/null
+++ b/filter.c
@@ -0,0 +1,532 @@
+/**
+* @file filter.c
+* @brief various filtering algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-28
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define MAX_GAUSS_KERNEL_SIZE 25 ///< maximum gaussian kernel size (must be an odd number)
+
+
+/**
+ * Filter pixel with given 3x3 kernel. Fixed-point is used.
+ *
+ * @param a11 kernel weight at position 1/1
+ * @param a12 kernel weight at position 1/2
+ * @param a13 kernel weight at position 1/3
+ * @param a21 kernel weight at position 2/1
+ * @param a22 kernel weight at position 2/2
+ * @param a23 kernel weight at position 2/3
+ * @param a31 kernel weight at position 3/1
+ * @param a32 kernel weight at position 3/2
+ * @param a33 kernel weight at position 3/3
+ * @param p11 pixel value at position 1/1
+ * @param p12 pixel value at position 1/2
+ * @param p13 pixel value at position 1/3
+ * @param p21 pixel value at position 2/1
+ * @param p22 pixel value at position 2/2
+ * @param p23 pixel value at position 2/3
+ * @param p31 pixel value at position 3/1
+ * @param p32 pixel value at position 3/2
+ * @param p33 pixel value at position 3/3
+ * @param shift_fact The shifting factor defines how many number of bits the kernel and pixel were shifted to left.
+ * @return filtered pixel value
+ */
+static inline int16_t calc_filter3x3(const int16_t a11, const int16_t a12, const int16_t a13,
+ const int16_t a21, const int16_t a22, const int16_t a23,
+ const int16_t a31, const int16_t a32, const int16_t a33,
+ const int16_t p11, const int16_t p12, const int16_t p13,
+ const int16_t p21, const int16_t p22, const int16_t p23,
+ const int16_t p31, const int16_t p32, const int16_t p33,
+ const int shift_fact) {
+
+ int16_t out;
+
+ out = (a11*p11 + a12*p12 + a13*p13 + a21*p21 + a22*p22 + a23*p23 + a31*p31 + a32*p32 + a33*p33) >> shift_fact;
+ return out;
+}
+
+
+/**
+ * Apply sobel kernel to given 3x3 pixels.
+ *
+ * @param p11 pixel value at position 1/1
+ * @param p12 pixel value at position 1/2
+ * @param p13 pixel value at position 1/3
+ * @param p21 pixel value at position 2/1
+ * @param p22 pixel value at position 2/2
+ * @param p23 pixel value at position 2/3
+ * @param p31 pixel value at position 3/1
+ * @param p32 pixel value at position 3/2
+ * @param p33 pixel value at position 3/3
+ * @return filtered pixel value
+ *
+ * NOTE
+ * For performance reasons the return value is ret = abs(sobel_x) + (abs sobel_y) instead of ret = sqrt(abs(sobel_x) + (abs sobel_y))
+ */
+static inline int16_t calc_filter_sobel(const int16_t p11, const int16_t p12, const int16_t p13,
+ const int16_t p21, const int16_t p22, const int16_t p23,
+ const int16_t p31, const int16_t p32, const int16_t p33) {
+
+ int16_t sobel_y, sobel_x;
+
+ sobel_x = (-1)*p11 + p13 - 2*p21 + 2*p23 - p31 + p33;
+ if(sobel_x < 0) {
+ sobel_x = sobel_x * -1;
+ }
+
+ sobel_y = (-1)*p11 - 2*p12 - p13 + p31 + 2*p32 + p33;
+ if(sobel_y < 0) {
+ sobel_y = sobel_y * -1;
+ }
+
+ return (sobel_x+sobel_y);
+}
+
+
+/**
+ * Filter pixel at given image by applying NxN kernel. Each pixel at the image is a 32 bit signed value. The step size
+ * defines the offset to the next pixel. E. g. a step size of 3 means the image uses 3 32-bit channels.
+ * The image and the kernel uses fixed-point values. Therfore a shifting factor is used.
+ *
+ * NOTE
+ * The kernel heigth and width must be an odd value (e. g. 3x5 is accepted). This function doesn't make any sanity checks
+ * due to performance reason.
+ *
+ * @param img start image address
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param step_size step size
+ * @param coord_y y pixel coordinate to apply kernel
+ * @param coord_x x pixel coordinate to apply kernel
+ * @param a pointer to filter kernel (the kernel values must be shifted correctly)
+ * @param kernel_height kernel height in number of pixels
+ * @param kernel_width kernel width in number of pixels
+ * @param shift_fact shifting factor
+ * @return filtered pixel value
+ */
+static int16_t calc_filterNxN(const int16_t *img, const int height, const int width, const int step_size,
+ const int coord_y, const int coord_x, const int16_t *a, const int kernel_height, const int kernel_width,
+ const int shift_fact) {
+
+ int y, x, y_start, y_end, x_start, x_end;
+ int index_img, index_kernel;
+ int64_t out;
+
+ y_start = coord_y-kernel_height/2;
+ y_end = y_start+kernel_height;
+ x_start = coord_x-kernel_width/2;
+ x_end = x_start+kernel_width;
+
+ index_kernel = 0;
+ out = 0;
+ for(y = y_start; y < y_end; y++) {
+ index_img = (y*width + x_start)*step_size;
+ for(x = x_start; x < x_end; x++) {
+ out += a[index_kernel]*img[index_img];
+ index_img += step_size;
+ index_kernel++;
+ }
+ }
+ return (out>>shift_fact);
+}
+
+
+/**
+ * Apply sobel filter (edge detection) on given input image of type: 3 channels, 16 bit signed fixed-point per channel
+ *
+ * @param img_sobel On return: sobel filtered image
+ * @param img_in input image to be filtered
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param skip_ch1 set to 0 if channel 1 should be filtered
+ * @param skip_ch2 set to 0 if channel 2 should be filtered
+ * @param skip_ch3 set to 0 if channel 3 should be filtered
+ */
+void filter_sobel_3s16(int16_t *img_sobel, const int16_t *img_in, const int height, const int width,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3) {
+
+ int y, x, index_upper, index_center, index_lower, index_left, index_right;
+
+ for(y = 1; y < (height-1); y++) {
+
+ index_upper = (y-1)*width*3;
+ index_center = y*width*3;
+ index_lower = (y+1)*width*3;
+
+
+ for(x = 1; x < (width-1); x++) {
+
+ if(!skip_ch1) {
+ img_sobel[index_center+3] = calc_filter_sobel( img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6]);
+ }
+ else {
+ img_sobel[index_center+3] = 0;
+ }
+
+ index_upper++;
+ index_center++;
+ index_lower++;
+
+ if(!skip_ch2) {
+ img_sobel[index_center+3] = calc_filter_sobel( img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6]);
+ }
+ else {
+ img_sobel[index_center+3] = 0;
+ }
+
+ index_upper++;
+ index_center++;
+ index_lower++;
+
+ if(!skip_ch3) {
+ img_sobel[index_center+3] = calc_filter_sobel( img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6]);
+ }
+ else {
+ img_sobel[index_center+3] = 0;
+ }
+
+ index_upper++;
+ index_center++;
+ index_lower++;
+ }
+ }
+
+
+ /*
+ * Image border are set to 0
+ */
+ index_upper = 0;
+ index_lower = (height-1)*width*3;
+ for(x = 0; x < width; x++) {
+
+ // horizontal upper border
+ img_sobel[index_upper] = 0;
+ img_sobel[index_upper+1] = 0;
+ img_sobel[index_upper+2] = 0;
+ index_upper += 3;
+
+ // horizontal lower border
+ img_sobel[index_lower] = 0;
+ img_sobel[index_lower+1] = 0;
+ img_sobel[index_lower+2] = 0;
+ index_lower += 3;
+ }
+
+ index_left = 0;
+ index_right = width*3-3;
+ for(y = 0; y < height; y++) {
+
+ // verticel left border
+ img_sobel[index_left] = 0;;
+ img_sobel[index_left+1] = 0;
+ img_sobel[index_left+2] = 0;
+ index_left += width*3;
+
+ // verticel right border
+ img_sobel[index_right] = 0;;
+ img_sobel[index_right+1] = 0;
+ img_sobel[index_right+2] = 0;
+ index_right += width*3;
+ }
+}
+
+
+/**
+ * Apply gauss low-pass filter on given input image of type: 3 channels, 16 bit signed fixed-point per channel.
+ * An odd kernel size is expected.
+ *
+ * @param img_gauss On return: gauss low-pass filtered image
+ * @param img_in input image to be filtered
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param kernel_size filter kernel size (minimum of 3, maximum see @ref MAX_GAUSS_KERNEL_SIZE)
+ * @param spread gaussian curve spreading value, as higher as bigger spread (spread = sigma)
+ * @param skip_ch1 set to 0 if channel 1 should be filtered
+ * @param skip_ch2 set to 0 if channel 2 should be filtered
+ * @param skip_ch3 set to 0 if channel 3 should be filtered
+ */
+void filter_gauss_3s16(int16_t *img_gauss, const int16_t *img_in, const int height, const int width, int kernel_size, const float spread,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3) {
+
+ int y, x, index, index_upper, index_center, index_lower;
+ int kernel_start, kernel_end;
+ float a[MAX_GAUSS_KERNEL_SIZE*MAX_GAUSS_KERNEL_SIZE];
+ int16_t a_s16[MAX_GAUSS_KERNEL_SIZE*MAX_GAUSS_KERNEL_SIZE];
+ float sum;
+ int x_start, x_end, y_start, y_end;
+ const int shift_fact = 10; // 2^10 = 1024--> about 3 digits
+
+
+ if((kernel_size%2) == 0) {
+ // even kernel size!!
+ kernel_size++;
+ }
+
+ if(kernel_size < 3) {
+ kernel_size = 3;
+ }
+ else if(kernel_size > MAX_GAUSS_KERNEL_SIZE) {
+ kernel_size = MAX_GAUSS_KERNEL_SIZE;
+ }
+
+
+ /*
+ * Generate 2D-gaussian kernel depending on given size.
+ * Norm kernel, that sum over each kernel element is 1.0.
+ */
+ kernel_start = -1*kernel_size/2;
+ kernel_end = -1*kernel_start;
+ index = 0;
+ sum = 0;
+ for(y = kernel_start; y <= kernel_end; y++) {
+ for(x = kernel_start; x <= kernel_end; x++) {
+ a[index] = (1.0/(2*M_PI*spread*spread))*expf((-1)*((x*x+y*y)/(2*spread*spread)));
+ sum += a[index];
+ index++;
+ }
+ }
+ index = 0;
+ for(y = 0; y < kernel_size; y++) {
+ for(x = 0; x < kernel_size; x++) {
+ a[index] /= sum;
+ a_s16[index] = (int16_t)roundf(a[index] * (1<<shift_fact));
+// printf("XXX a[%d,%d] = %.20f --> %d\n", y, x, a[index], a_s16[index]);
+ index++;
+ }
+ }
+
+
+ /*
+ * This loop filters the image without border region.
+ */
+ y_start = kernel_size/2;
+ y_end = height-y_start;
+ x_start = kernel_size/2;
+ x_end = width-x_start;
+ for(y = y_start; y < y_end; y++) {
+
+ index = (y*width+x_start)*3;
+ index_upper = (y-1)*width*3;
+ index_center = y*width*3;
+ index_lower = (y+1)*width*3;
+
+ for(x = x_start; x < x_end; x++) {
+
+ if(!skip_ch1) {
+ if(kernel_size == 3) {
+ img_gauss[index_center] = calc_filter3x3( a_s16[0], a_s16[1], a_s16[2], a_s16[3], a_s16[4], a_s16[5], a_s16[6], a_s16[7], a_s16[8],
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ }
+ else {
+ img_gauss[index] = calc_filterNxN(img_in, height, width, 3, y, x, a_s16, kernel_size, kernel_size, shift_fact);
+ }
+ }
+ else {
+ img_gauss[index] = 0;
+ }
+
+ index++;
+ index_upper++;
+ index_center++;
+ index_lower++;
+
+ if(!skip_ch2) {
+ if(kernel_size == 3) {
+ img_gauss[index_center] = calc_filter3x3( a_s16[0], a_s16[1], a_s16[2], a_s16[3], a_s16[4], a_s16[5], a_s16[6], a_s16[7], a_s16[8],
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ }
+ else {
+ img_gauss[index] = calc_filterNxN(img_in+1, height, width, 3, y, x, a_s16, kernel_size, kernel_size, shift_fact);
+ }
+ }
+ else {
+ img_gauss[index] = 0;
+ }
+
+ index++;
+ index_upper++;
+ index_center++;
+ index_lower++;
+
+ if(!skip_ch3) {
+ if(kernel_size == 3) {
+ img_gauss[index_center] = calc_filter3x3( a_s16[0], a_s16[1], a_s16[2], a_s16[3], a_s16[4], a_s16[5], a_s16[6], a_s16[7], a_s16[8],
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ }
+ else {
+ img_gauss[index] = calc_filterNxN(img_in+2, height, width, 3, y, x, a_s16, kernel_size, kernel_size, shift_fact);
+ }
+ }
+ else {
+ img_gauss[index] = 0;
+ }
+
+ index++;
+ index_upper++;
+ index_center++;
+ index_lower++;
+ }
+ }
+
+
+ /*
+ * Image border are not filtered
+ */
+ // handler upper horizontal border area
+ index = 0;
+ for(y = 0; y < y_start; y++) {
+ for(x = 0; x < width; x++) {
+ img_gauss[index] = img_in[index];
+ img_gauss[index+1] = img_in[index+1];
+ img_gauss[index+2] = img_in[index+2];
+ index += 3;
+ }
+ }
+
+ // handler lower horizontal border area
+ index = y_end*width*3;
+ for(y = y_end; y < height; y++) {
+ for(x = 0; x < width; x++) {
+ img_gauss[index] = img_in[index];
+ img_gauss[index+1] = img_in[index+1];
+ img_gauss[index+2] = img_in[index+2];
+ index += 3;
+ }
+ }
+
+ // handler left vertical border area
+ for(y = 0; y < height; y++) {
+ index = y*width*3;
+ for(x = 0; x < x_start; x++) {
+ img_gauss[index] = img_in[index];
+ img_gauss[index+1] = img_in[index+1];
+ img_gauss[index+2] = img_in[index+2];
+ index += 3;
+ }
+ }
+
+ // handler right vertical border area
+ for(y = 0; y < height; y++) {
+ index = (y*width+x_end)*3;
+ for(x = x_end; x < width; x++) {
+ img_gauss[index] = img_in[index];
+ img_gauss[index+1] = img_in[index+1];
+ img_gauss[index+2] = img_in[index+2];
+ index += 3;
+ }
+ }
+}
+
+
+/**
+ * Generate binary image from given image of type: 3 channels, 16 bit signed fixed-point per channel.
+ * All pixel values above the given threshold are set to the defined value. Otherwise the values are set to 0.
+ *
+ * @param img_gauss On return: gauss low-pass filtered image
+ * @param img_in input image to be filtered
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param threshold binary threshold
+ * @param bin_value Pixel values above given threshold are set to this value. Values below the threshold are set to 0.
+ * @param skip_ch1 set to 0 if channel 1 should be filtered
+ * @param skip_ch2 set to 0 if channel 2 should be filtered
+ * @param skip_ch3 set to 0 if channel 3 should be filtered
+ */
+void filter_binary_3s16(int8_t *img_bin, const int16_t *img_in, const int height, const int width, const int16_t threshold, const int8_t bin_value,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3) {
+
+ int y, x, index;
+
+ index = 0;
+
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+ if(!skip_ch1) {
+ if(img_in[index] >= threshold) {
+ img_bin[index] = bin_value;
+ }
+ else {
+ img_bin[index] = 0;
+ }
+ }
+ else {
+ img_bin[index] = 0;
+ }
+
+ index++;
+
+ if(!skip_ch2) {
+ if(img_in[index] >= threshold) {
+ img_bin[index] = bin_value;
+ }
+ else {
+ img_bin[index] = 0;
+ }
+ }
+ else {
+ img_bin[index] = 0;
+ }
+
+ index++;
+
+ if(!skip_ch3) {
+ if(img_in[index] >= threshold) {
+ img_bin[index] = bin_value;
+ }
+ else {
+ img_bin[index] = 0;
+ }
+ }
+ else {
+ img_bin[index] = 0;
+ }
+
+ index++;
+ }
+ }
+
+}
+
diff --git a/filter.h b/filter.h
new file mode 100644
index 0000000..5b70294
--- /dev/null
+++ b/filter.h
@@ -0,0 +1,52 @@
+/**
+* @file filter.h
+* @brief various filtering algorithm definitions
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-28
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+
+#ifndef _FILTER_H
+#define _FILTER_H
+
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+void filter_sobel_3s16(int16_t *img_sobel, const int16_t *img_in, const int height, const int width,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3) ;
+
+void filter_gauss_3s16(int16_t *img_gauss, const int16_t *img_in, const int height, const int width, int kernel_size, const float spread,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3);
+
+void filter_binary_3s16(int8_t *img_bin, const int16_t *img_in, const int height, const int width, const int16_t threshold, const int8_t bin_value,
+ const int skip_ch1, const int skip_ch2, const int skip_ch3);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+
+#endif // _FILTER_H \ No newline at end of file
diff --git a/gamma_corr.c b/gamma_corr.c
new file mode 100644
index 0000000..c0e5cd4
--- /dev/null
+++ b/gamma_corr.c
@@ -0,0 +1,113 @@
+/**
+* @file gamma_corr.c
+* @brief gamma correction algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-09-08
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#include "color_pipe_private.h"
+
+
+/**
+ * Apply gamma correction to given image type: RGB or monochrome with 8 bit per color channel
+ *
+ * @param img_rgb On return: gamma corrected image
+ * @param img_in image to be gamma corrected
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param max_val maximum pixel value
+ * @param gamma gamma coefficient
+ * @param is_color 1 if it's a color image, 0 if it's a monochrome image
+ * @return 0 on success otherwise -1
+ */
+static void gamma_corr8(uint8_t *img_rgb, const uint8_t *img_in, const int height, const int width, const int *gamma_table, const int is_color)
+#include "alg_gamma.h"
+
+
+/**
+ * Apply gamma correction to given image type: RGB or monochrome with 16 bit per color channel
+ *
+ * @param img_rgb On return: gamma corrected image
+ * @param img_in image to be gamma corrected
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param max_val maximum pixel value
+ * @param gamma gamma coefficient
+ * @param is_color 1 if it's a color image, 0 if it's a monochrome image
+ * @return 0 on success otherwise -1
+ */
+static void gamma_corr16(uint16_t *img_rgb, const uint16_t *img_in, const int height, const int width, const int *gamma_table, const int is_color)
+#include "alg_gamma.h"
+
+
+/**
+ * Apply gamma correction to given image type.
+ *
+ * @param gamma_data required data for gamma correction
+ * @return 0 on success otherwise -1
+ */
+int gamma_corr(struct gamma_data_t *gamma_data) {
+
+ void *img_gamma, *img_in;
+ int *gamma_table;
+ int i, is_color, bit_channel, width, height, max_pix, gamma_table_bitdepth;
+ float gamma, gamma_table_init;
+
+
+ // put variables on stack
+ is_color = gamma_data->is_color;
+ img_gamma = gamma_data->img_gamma;
+ img_in = gamma_data->img_in;
+ bit_channel = gamma_data->bit_channel;
+ width = gamma_data->width;
+ height = gamma_data->height;
+ gamma = gamma_data->gamma;
+ max_pix = (1<<bit_channel)-1;
+ gamma_table_bitdepth = gamma_data->gamma_table_bitdepth;
+ gamma_table_init = gamma_data->gamma_table_init;
+ gamma_table = gamma_data->gamma_table;
+
+ /*
+ * Do check whether lookup table is initialzed with correct gamma value
+ */
+ if(gamma_table_bitdepth != bit_channel || gamma != gamma_table_init) {
+ for(i = 0; i <= max_pix; i++) {
+ gamma_table[i] = roundf(max_pix*pow(i/((float)max_pix), 1.0/gamma));
+ }
+ gamma_data->gamma_table_bitdepth = bit_channel;
+ gamma_data->gamma_table_init = gamma;
+ }
+
+ if(bit_channel <= 8) {
+ gamma_corr8(img_gamma, img_in, height, width, gamma_table, is_color);
+ }
+ else if(bit_channel <= 16) {
+ gamma_corr16(img_gamma, img_in, height, width, gamma_table, is_color);
+ }
+ return 0;
+} \ No newline at end of file
diff --git a/sharpening.c b/sharpening.c
new file mode 100644
index 0000000..9e8ba86
--- /dev/null
+++ b/sharpening.c
@@ -0,0 +1,482 @@
+/**
+* @file sharp.c
+* @brief sharpening algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-27
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <string.h>
+
+#if (WITH_SIMD == 1)
+#include <immintrin.h> // see /usr/lib64/gcc/x86_64-suse-linux/4.7/include/immintrin.h
+#endif // WITH_SIMD
+
+#include "color_pipe_private.h"
+#include "color.h"
+#include "filter.h"
+
+
+/**
+ * Transform monochrome to YUV image.
+ *
+ * @param img_yuv On return: image in YUV color space
+ * @param img_mono input monochrome image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bit_channel monochrome bit resolution
+ */
+static void mono_to_yuv(int16_t *img_yuv, const void *img_mono, const int height, const int width, const int bit_channel) {
+
+ int y, x, index_mono, index_yuv;
+ const uint8_t *in8 = img_mono;
+ const uint16_t *in16 = img_mono;
+
+
+ index_mono = 0;
+ index_yuv = 0;
+ for(y = 1; y < (height-1); y++) {
+ for(x = 1; x < (width-1); x++) {
+ if(bit_channel <= 8) {
+ img_yuv[index_yuv] = in8[index_mono];
+ img_yuv[index_yuv+1] = 0;
+ img_yuv[index_yuv+2] = 0;
+ }
+ else if(bit_channel <= 16) {
+ img_yuv[index_yuv] = in16[index_mono];
+ img_yuv[index_yuv+1] = 0;
+ img_yuv[index_yuv+2] = 0;
+ }
+ index_mono++;
+ index_yuv += 3;
+ }
+ }
+}
+
+
+/**
+ * Transform YUV to monochrome image.
+ *
+ * @param img_mono On return: monochrome image
+ * @param img_yuv input monochrome image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bit_channel monochrome bit resolution
+ */
+static void yuv_to_mono(void *img_mono, const int16_t *img_yuv, const int height, const int width, const int bit_channel) {
+ int y, x, index_mono, index_yuv;
+ int8_t *out8 = img_mono;
+ int16_t *out16 = img_mono;
+ const int pix_max = (1<<bit_channel)-1;
+ int value;
+
+ index_mono = 0;
+ index_yuv = 0;
+
+ for(y = 1; y < (height-1); y++) {
+ for(x = 1; x < (width-1); x++) {
+ value = img_yuv[index_yuv];
+
+ if(value < 0) {
+ value = 0;
+ }
+ else if(value > pix_max) {
+ value = pix_max;
+ }
+
+ if(bit_channel <= 8) {
+ out8[index_mono] = value;
+ }
+ else if(bit_channel <= 16) {
+ out16[index_mono] = value;
+ }
+
+ index_mono++;
+ index_yuv += 3;
+ }
+ }
+}
+
+
+/**
+ * Sharp pixel by applying 3x3 filter kernel. Fixed-point is used.
+ * The kernel weights around the center are equal.
+ *
+ * @param a_other kernel weight around center
+ * @param a_center kernel weight at center position
+ * @param p11 pixel value at position 1/1
+ * @param p12 pixel value at position 1/2
+ * @param p13 pixel value at position 1/3
+ * @param p21 pixel value at position 2/1
+ * @param p22 pixel value at position 2/2
+ * @param p23 pixel value at position 2/3
+ * @param p31 pixel value at position 3/1
+ * @param p32 pixel value at position 3/2
+ * @param p33 pixel value at position 3/3
+ * @param shift_fact The shifting factor defines how many number of bits the kernel and pixel were shifted to left.
+ * @return filtered pixel value
+ */
+static inline int16_t do_sharp(const int16_t a_other, const int16_t a_center,
+ const int16_t p11, const int16_t p12, const int16_t p13,
+ const int16_t p21, const int16_t p22, const int16_t p23,
+ const int16_t p31, const int16_t p32, const int16_t p33,
+ const int shift_fact) {
+
+ int16_t out;
+
+
+ out = ( a_other*p11 + a_other*p12 + a_other*p13 +
+ a_other*p21 + a_center*p22 + a_other*p23 +
+ a_other*p31 + a_other*p32 + a_other*p33) >> shift_fact;
+ return out;
+}
+
+#if 0
+/*
+ * Sharpening algorithm by using SSE instructions.
+ * It's slower than the scalar algorithm above!!
+ */
+static int16_t do_sharp_sse(__m128i coeff_line0, __m128i coeff_line1, __m128i coeff_line2,
+ __m128i px_line0, __m128i px_line1, __m128i px_line2, __m128i mask,
+ int shift_fact) {
+
+ __m128i y_line0, y_line1, y_line2, madd_line0, madd_line1, madd_line2;
+ int32_t res[2];
+
+
+ y_line0 = _mm_shuffle_epi8(px_line0, mask);
+ y_line1 = _mm_shuffle_epi8(px_line1, mask);
+ y_line2 = _mm_shuffle_epi8(px_line2, mask);
+
+ madd_line0 = _mm_madd_epi16(y_line0, coeff_line0);
+ madd_line1 = _mm_madd_epi16(y_line1, coeff_line1);
+ madd_line2 = _mm_madd_epi16(y_line2, coeff_line2);
+
+ madd_line0 = _mm_hadd_epi32(madd_line0, madd_line1);
+ madd_line0 = _mm_hadd_epi32(madd_line0, madd_line2);
+
+ madd_line0 = _mm_hadd_epi32(madd_line0, madd_line0);
+ madd_line0 = _mm_hadd_epi32(madd_line0, madd_line0);
+
+ _mm_storel_epi64((__m128i*)&res, madd_line0);
+ return (res[0] >> shift_fact);
+}
+#endif
+
+/**
+ * Make given YUV image sharper. Use the given the filter strength to tune the sharpening strength.
+ * If the local sharpening is set only those pixels are sharpened defined at the sharpening mask.
+ *
+ * This sharpening algorithm high-pass filters the input image and adds it to itself. Therfore all edges become sharper.
+ * The sharpening is done on the Y-channel only. The brightness is of interest. The U and V channel won't be touched to avoid color
+ * shiftings.
+ *
+ * @param img_out On return: high-pass filtered YUV image
+ * @param img_in YUV image to filter with given kernel
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param sharp_strength sharpening strength factor
+ * @param max_y maximum Y-channel value (depends on bit per pixel)
+ * @param local_flag not 0 if local sharpening must be done based on sharpening mask
+ * @param sharp_mask sharpening mask (binary image)
+ */
+static void make_sharper(int16_t *img_out, const int16_t * img_in, const int height, const int width, const float sharp_strength, const int max_y,
+ const int local_flag, const int8_t *sharp_mask) {
+
+ int y, x, index_upper, index_center, index_lower;
+ int16_t filter_output;
+// __m128i coeff_line0, coeff_line1, coeff_line2, mask;
+
+
+ /*
+ * don't touch it or check high-pass filter coefficient a_center for overflow!!
+ */
+ const int shift_fact = 10;
+
+ /*
+ * High-pass filter coefficients
+ *
+ * e. g. shift_fact = 10 and sharp_strength = 4
+ * --> a_other = -4/8.0 * 2^10 = 2^9 --> no overflow
+ * --> a_center = 4*2^10 = 2^12 --> no overflow
+ *
+ * e. g. shift_fact = 10 and sharp_strength = 32
+ * --> a_other = -32/8.0 * 2^10 = 2^12 --> no overflow
+ * --> a_center = 32*2^10 = 2^15 --> overflow because this value cis not possible with int16_t datatype
+ */
+ const int16_t a_other = -1.0*sharp_strength/8.0*(1<<shift_fact);
+ const int16_t a_center = sharp_strength*(1<<shift_fact);
+
+
+ /*
+ coeff_line0 = _mm_set_epi16(0, 0, 0, 0, 0, a_other, a_other, a_other);
+ coeff_line1 = _mm_set_epi16(0, 0, 0, 0, 0, a_other, a_center, a_other);
+ coeff_line2 = _mm_set_epi16(0, 0, 0, 0, 0, a_other, a_other, a_other);
+ mask = _mm_set_epi8(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, 12, 7, 6, 1, 0);
+ */
+
+ for(y = 1; y < (height-1); y++) {
+
+ index_upper = (y-1)*width*3;
+ index_center = y*width*3;
+ index_lower = (y+1)*width*3;
+
+
+ for(x = 1; x < (width-1); x++) {
+
+ if(local_flag != 0 && sharp_mask[index_center+3] == 0) {
+ // don't sharp this pixel
+ filter_output = 0;
+ }
+ else {
+ filter_output = do_sharp(a_other, a_center,
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ /*
+ filter_output = do_sharp_sse( coeff_line0, coeff_line1, coeff_line2,
+ _mm_lddqu_si128((__m128i*)(&img_in[index_upper])),
+ _mm_lddqu_si128((__m128i*)(&img_in[index_center])),
+ _mm_lddqu_si128((__m128i*)(&img_in[index_lower])),
+ mask,
+ shift_fact);
+ */
+
+ }
+
+ filter_output += img_in[index_center+3];
+
+ img_out[index_center+3] = filter_output;
+ img_out[index_center+4] = img_in[index_center+4];
+ img_out[index_center+5] = img_in[index_center+5];
+ index_upper += 3;
+ index_center += 3;
+ index_lower += 3;
+ }
+ }
+
+ // handle horizontal upper border line (without corners)
+ index_center = 0;
+ index_upper = width*3;
+ index_lower = index_upper;
+ for(x = 1; x < (width-1); x++) {
+ if(local_flag != 0 && sharp_mask[index_center+3] == 0) {
+ // don't sharp this pixel
+ filter_output = 0;
+ }
+ else {
+ filter_output = do_sharp(a_other, a_center,
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ }
+
+ filter_output += img_in[index_center+3];
+
+ img_out[index_center+3] = filter_output;
+ img_out[index_center+4] = img_in[index_center+4];
+ img_out[index_center+5] = img_in[index_center+5];
+ index_upper += 3;
+ index_center += 3;
+ index_lower += 3;
+ }
+
+ // handle horizontal lower border line (without corners)
+ index_center = (height-1)*width*3;
+ index_upper = (height-2)*width*3;
+ index_lower = index_upper;
+ for(x = 1; x < (width-1); x++) {
+ if(local_flag != 0 && sharp_mask[index_center+3] == 0) {
+ // don't sharp this pixel
+ filter_output = 0;
+ }
+ else {
+ filter_output = do_sharp(a_other, a_center,
+ img_in[index_upper], img_in[index_upper+3], img_in[index_upper+6],
+ img_in[index_center], img_in[index_center+3], img_in[index_center+6],
+ img_in[index_lower], img_in[index_lower+3], img_in[index_lower+6],
+ shift_fact);
+ }
+
+ filter_output += img_in[index_center+3];
+
+ img_out[index_center+3] = filter_output;
+ img_out[index_center+4] = img_in[index_center+4];
+ img_out[index_center+5] = img_in[index_center+5];
+ index_upper += 3;
+ index_center += 3;
+ index_lower += 3;
+ }
+
+ // handle vertical left border line (without corners)
+ for(y = 1; y < (height-1); y++) {
+ index_upper = (y-1)*width*3;
+ index_center = y*width*3;
+ index_lower = (y+1)*width*3;
+
+ if(local_flag != 0 && sharp_mask[index_center+3] == 0) {
+ // don't sharp this pixel
+ filter_output = 0;
+ }
+ else {
+ filter_output = do_sharp(a_other, a_center,
+ img_in[index_upper+3], img_in[index_upper], img_in[index_upper+3],
+ img_in[index_center+3], img_in[index_center], img_in[index_center+3],
+ img_in[index_lower+3], img_in[index_lower], img_in[index_lower+3],
+ shift_fact);
+ }
+
+ filter_output += img_in[index_center];
+ img_out[index_center] = filter_output;
+ img_out[index_center+1] = img_in[index_center+1];
+ img_out[index_center+2] = img_in[index_center+2];
+ }
+
+ // handle vertical right border line (without corners)
+ for(y = 1; y < (height-1); y++) {
+ index_upper = y*width*3-3;
+ index_center = (y+1)*width*3-3;
+ index_lower = (y+2)*width*3-3;
+
+ if(local_flag != 0 && sharp_mask[index_center+3] == 0) {
+ // don't sharp this pixel
+ filter_output = 0;
+ }
+ else {
+ filter_output = do_sharp(a_other, a_center,
+ img_in[index_upper-3], img_in[index_upper], img_in[index_upper-3],
+ img_in[index_center-3], img_in[index_center], img_in[index_center-3],
+ img_in[index_lower-3], img_in[index_lower], img_in[index_lower-3],
+ shift_fact);
+ }
+
+ filter_output += img_in[index_center];
+ img_out[index_center] = filter_output;
+ img_out[index_center+1] = img_in[index_center+1];
+ img_out[index_center+2] = img_in[index_center+2];
+ }
+
+
+ /*
+ * Image corners are not sharpened!!
+ */
+
+ // handle upper left corner
+ img_out[0] = img_in[0];
+ img_out[1] = img_in[1];
+ img_out[2] = img_in[2];
+
+ // handle upper right corner
+ index_center = width*3-3;
+ img_out[index_center] = img_in[index_center];
+ img_out[index_center+1] = img_in[index_center+1];
+ img_out[index_center+2] = img_in[index_center+2];
+
+ // handle lower left corner
+ index_center = (height-1)*width*3;
+ img_out[index_center] = img_in[index_center];
+ img_out[index_center+1] = img_in[index_center+1];
+ img_out[index_center+2] = img_in[index_center+2];
+
+ // handle lower right corner
+ index_center = height*width*3-3;
+ img_out[index_center] = img_in[index_center];
+ img_out[index_center+1] = img_in[index_center+1];
+ img_out[index_center+2] = img_in[index_center+2];
+}
+
+
+/**
+ * Sharpening algorithm.
+ *
+ * @param sharp_data required sharpening data
+ * @return 0 on success otherwise -1
+ */
+int sharpening(struct sharp_data_t *sharp_data) {
+
+ void *img_sharp, *img_unsharp;
+ int is_color, bit_channel, width, height;
+ int16_t *img_yuv, *img_yuv_sharp, *img_sobel, *img_gauss;
+ float sharp_factor;
+ enum sharp_alg_t sharp_alg;
+ int8_t *sharp_mask;
+ int local_sens;
+
+
+ // put variables on stack
+ is_color = sharp_data->is_color;
+ img_sharp = sharp_data->img_sharp;
+ img_unsharp = sharp_data->img_in;
+ bit_channel = sharp_data->bit_channel;
+ width = sharp_data->width;
+ height = sharp_data->height;
+ img_yuv = sharp_data->img_yuv;
+ sharp_factor = sharp_data->sharp_factor;
+ sharp_alg = sharp_data->sharp_alg;
+ local_sens = sharp_data->local_sens;
+ img_yuv_sharp = sharp_data->img_yuv_sharp;
+ img_sobel = sharp_data->img_sobel;
+ img_gauss = sharp_data->img_gauss;
+ sharp_mask = sharp_data->sharp_mask;
+
+
+ /*
+ * Sharpening is done on Y-channel.
+ * In case of color image, the RGB is transformed to YUV. In case of monochrom image,
+ * the Y-channel is used only.
+ */
+ if(is_color) {
+ // RGB to YUV transformation
+ color_rgb_to_yuv(img_yuv, img_unsharp, height, width, bit_channel);
+ }
+ else {
+ mono_to_yuv(img_yuv, img_unsharp, height, width, bit_channel);
+ }
+
+ /*
+ * In case of local sharpening, do calculate sharpening mask.
+ */
+ if(sharp_alg == SHARP_ALG_LOCAL) {
+ filter_sobel_3s16(img_sobel, img_yuv, height, width, 0, 1, 1);
+ filter_gauss_3s16(img_gauss, img_sobel, height, width, 3, 1.0, 0, 1, 1); // incresing the kernel size need more computing performance
+ local_sens = (int)((1.0-local_sens/100.0)*(1<<bit_channel));
+ filter_binary_3s16(sharp_mask, img_gauss, height, width, local_sens, (1<<bit_channel)-1, 0, 1, 1);
+ }
+
+ /*
+ * Y-channel is sharpened only to avoid color shifting
+ */
+ make_sharper(img_yuv_sharp, img_yuv, height, width, sharp_factor, (1<<bit_channel)-1, sharp_alg == SHARP_ALG_LOCAL ? 1:0, sharp_mask);
+
+ // YUV to RGB transformation
+ if(is_color) {
+ color_yuv_to_rgb(img_sharp, img_yuv_sharp, height, width, bit_channel);
+ }
+ else {
+ yuv_to_mono(img_sharp, img_yuv_sharp, height, width, bit_channel);
+ }
+ return 0;
+}
diff --git a/white_balance.c b/white_balance.c
new file mode 100644
index 0000000..0040fb9
--- /dev/null
+++ b/white_balance.c
@@ -0,0 +1,285 @@
+/**
+* @file white_balance.c
+* @brief white balance algorithm
+* @author Patrick Roth - roth@stettbacher.ch
+* @version 1.0
+* @date 2015-08-20
+* @copyright 2012-2016 Stettbacher Signal Processing AG
+*
+* @remarks
+*
+* <PRE>
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+* </PRE>
+*
+*/
+
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+
+#include "color_pipe_private.h"
+#include "color.h"
+
+/**
+ * Minimum color gain
+ */
+#define GAIN_MIN 0.1
+
+/**
+ * Maximum color gain
+ */
+#define GAIN_MAX 10.0
+
+
+
+/**
+ * Calculate the average value of U and V component on given YUV image.
+ *
+ * @param img_yuv YUV image buffer
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param threshold gray value threshold
+ * @param u_avg On return: average value from all gray pixels (U-component)
+ * @param v_avg On return: average value from all gray pixels (V-component)
+ * @return -1 if no gray pixel is available otherwise 0
+ */
+static int get_u_v_avg(const int16_t *img_yuv, const int height, const int width, const float threshold, float *u_avg, float *v_avg) {
+ int index, num_gray_pixel, y, x;
+ int sum_u, sum_v;
+ int pixel_y, pixel_u, pixel_v, color_val;
+ const int shift_fact = 10;
+ const int thresh = threshold * ((float)(1 << shift_fact));
+
+ /*
+ * Accumulate all gray pixels below given threshold
+ */
+ index = 0;
+ num_gray_pixel = 0;
+ sum_u = 0;
+ sum_v = 0;
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+
+ pixel_y = img_yuv[index];
+ if(pixel_y == 0) {
+ index += 3;
+ continue;
+ }
+
+ pixel_u = img_yuv[index+1];
+ pixel_v = img_yuv[index+2];
+
+ color_val = ((abs(pixel_u) + abs(pixel_v)) << shift_fact)/pixel_y;
+
+ if(color_val < thresh) {
+ // it's a gray pixel
+ num_gray_pixel++;
+ sum_u += pixel_u;
+ sum_v += pixel_v;
+ }
+ index += 3;
+ }
+ }
+
+ // check whether if there's any gray pixel
+ if(num_gray_pixel == 0) {
+ // no gray pixel --> nothing to do
+ return -1;
+ }
+
+ // calculate avarage of U and V component over all gray pixels
+ *u_avg = ((float)sum_u)/num_gray_pixel;
+ *v_avg = ((float)sum_v)/num_gray_pixel;
+
+// printf("XXX num_gray_pixel = %d, u_avg = %f, v_avg = %f\n", num_gray_pixel, *u_avg, *v_avg);
+
+ return 0;
+}
+
+
+/**
+ * Apply red and blue pixel gain on input image.
+ *
+ * @param img_out On return: Gain is applied on this image
+ * @param img_in input RGB image
+ * @param height image height in number of pixels
+ * @param width image width in number of pixels
+ * @param bit_channel bits per color channel
+ * @param gain_blue blue gain to apply
+ * @param gain_red red gain to apply
+ */
+static void apply_pixel_gain_rgb(void *img_out, const void *img_in, const int height, const int width,
+ const int bit_channel, const float gain_blue, const float gain_red) {
+
+ int index, x, y, pixel_red, pixel_blue;
+ const int shift_fact = 8;
+ const int blue = gain_blue * ((float)(1 << shift_fact));
+ const int red = gain_red * ((float)(1 << shift_fact));
+ const uint8_t *img8_in = img_in;
+ const uint16_t *img16_in = img_in;
+ uint8_t *img8_out = img_out;
+ uint16_t *img16_out = img_out;
+ const int max_pix = (1 << bit_channel) - 1;
+
+
+ index = 0;
+ pixel_red = 0;
+ pixel_blue = 0;
+ for(y = 0; y < height; y++) {
+ for(x = 0; x < width; x++) {
+
+ // apply gain on red pixel
+ if(bit_channel <= 8) {
+ pixel_red = (img8_in[index]*red) >> shift_fact;
+ }
+ else if(bit_channel <= 16) {
+ pixel_red = (img16_in[index]*red) >> shift_fact;
+ }
+ if(pixel_red > max_pix) {
+ pixel_red = max_pix;
+ }
+ else if (pixel_red < 0) {
+ pixel_red = 0;
+ }
+
+ if(bit_channel <= 8) {
+ img8_out[index] = pixel_red;
+ }
+ else if(bit_channel <= 16) {
+ img16_out[index] = pixel_red;
+ }
+ index++;
+
+ // no gain is applied on green pixel
+ if(bit_channel <= 8) {
+ img8_out[index] = img8_in[index];
+ }
+ else if(bit_channel <= 16) {
+ img16_out[index] = img16_in[index];
+ }
+ index++;
+
+ // apply gain on blue pixel
+ if(bit_channel <= 8) {
+ pixel_blue = (img8_in[index]*blue) >> shift_fact;
+ }
+ else if(bit_channel <= 16) {
+ pixel_blue = (img16_in[index]*blue) >> shift_fact;
+ }
+ if(pixel_blue > max_pix) {
+ pixel_blue = max_pix;
+ }
+ else if (pixel_blue < 0) {
+ pixel_blue = 0;
+ }
+
+ if(bit_channel <= 8) {
+ img8_out[index] = pixel_blue;
+ }
+ else if(bit_channel <= 16) {
+ img16_out[index] = pixel_blue;
+ }
+
+ index++;
+ }
+ }
+}
+
+
+/**
+ * Local white balance algorithm.
+ *
+ * Reference:
+ * J.-y Huo et al, "Robust Automatic White Balance Algorithm using for Gray Color Points in Images", IEEE Xplore, 2006
+ *
+ * @param awb_data white balancing algorithm data
+ * @return 0 on success otherwise -1
+ */
+int white_balance(struct awb_data_t *awb_data) {
+
+ float u_avg, v_avg;
+ float ctrl_k, gray_threshold;
+ int bit_channel, height, width;
+ void *img_balanced, *img_in;
+ int16_t *img_yuv;
+ int uv_div;
+
+
+ // following parameter may be scaled
+ ctrl_k = awb_data->ctrl_k;
+ gray_threshold = awb_data->gray_threshold;
+ bit_channel = awb_data->bit_channel;
+ img_balanced = awb_data->img_rgb_balanced;
+ img_in = awb_data->img_in;
+ img_yuv = awb_data->img_yuv;
+ height = awb_data->height;
+ width = awb_data->width;
+
+
+ // Apply red and blue gain on input image.
+ apply_pixel_gain_rgb(img_balanced, img_in, height, width, bit_channel, awb_data->gain_blue, awb_data->gain_red);
+
+
+ // RGB to YUV color space conversion
+ if(color_rgb_to_yuv(img_yuv, img_balanced, height, width, bit_channel)) {
+ // conversion failed --> do abort
+ return -1;
+ }
+
+ if(get_u_v_avg(img_yuv, height, width, gray_threshold, &u_avg, &v_avg)) {
+ // no gray pixel --> nothing to do
+ return 0;
+ }
+
+
+ /*
+ * Norm U/V channel average value to 8 bit per color channel image.
+ *
+ */
+ uv_div = 1 << (bit_channel - 8);
+ u_avg /= uv_div;
+ v_avg /= uv_div;
+
+ // adjust blue gain if image is bluish
+ if(fabs(u_avg) > 0.4) {
+ awb_data->gain_blue -= ctrl_k*u_avg;
+ }
+
+ // adjust red gain if image is reddish
+ if(fabs(v_avg) > 0.4) {
+ awb_data->gain_red -= ctrl_k*v_avg;
+ }
+
+
+ // range check of blue red
+ if(awb_data->gain_blue < GAIN_MIN) {
+ awb_data->gain_blue = GAIN_MIN;
+ }
+ else if(awb_data->gain_blue > GAIN_MAX) {
+ awb_data->gain_blue = 3.0;
+ }
+
+ // range check of red gain
+ if(awb_data->gain_red < GAIN_MIN) {
+ awb_data->gain_red = GAIN_MIN;
+ }
+ else if(awb_data->gain_red > GAIN_MAX) {
+ awb_data->gain_red = 3.0;
+ }
+
+// printf("XXX u_avg = %.4f, v_avg = %.4f, red = %.4f, blue = %.4f, bit_channel = %d\n", u_avg, v_avg, awb_data->gain_red, awb_data->gain_blue, bit_channel);
+ return 0;
+} \ No newline at end of file