/**
* @file main.cpp
* @brief Color Image Processing Pipeline with O-3000 USB camera
* @author Patrick Roth - roth@stettbacher.ch
* @copyright Stettbacher Signal Processing AG
*
* @remarks
*
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*
*/
/**
* Uncomment to analyze image processing time
*/
// #define DEBUG_PROC_TIME
#ifdef DEBUG_PROC_TIME
#include
#endif // DEBUG_PROC_TIME
#include
#include
#include
#include
#include "color_pipe.h"
#include "color_pipe_private.h"
#include "color.h"
/**
* Logging macro for string error logging (libc). This macro inserts strerror(errno) in a suitable way.
*/
#define PRINTF_ERRNO(x) \
printf(x" in %s() line %d failed: %s\n", __func__, __LINE__-1, strerror(errno));
/**
* Alignment size in bytes.
* Image buffers are aligned to this given boundary.
*/
#define ALIGNMENT_SIZE 32
/**
* Coefficient definition used to undistort lenses.
*/
struct o3000_lens_coeffs_t {
struct dist_coeff_t dist_coeff; ///< distortion coefficients
struct camera_matrix_t camera_matrix; ///< camera matrix
};
/**
* Lens undistortion coefficients of various lens types supplied by
* Stettbacher Signal Processing.
*/
const static struct o3000_lens_coeffs_t o3000_lens_coeffs[] = {
// S-mount, focal length 2.8m, aperture 2.0 (O3000_LS_F2_8)
{
.dist_coeff = {
.k1 = -1.7989363928888906e+01,
.k2 = 4.2371667641386335e+02,
.p1 = -5.5177683005299717e-03,
.p2 = -1.8027296799469215e-02,
.k3 = -5.1212552122750130e+03,
},
.camera_matrix = {
.a11 = 5.5641130307342128e+03,
.a12 = 0,
.a13 = 6.4044160626552366e+02,
.a21 = 0,
.a22 = 5.5583733034586849e+03,
.a23 = 5.3305307740745866e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// S-mount, focal length 4.2mm, aperture 1.8 (O3000_LS_F4_2)
{
.dist_coeff = {
.k1 = -5.6100382549536558e+00,
.k2 = 3.7504235968196980e+01,
.p1 = -1.1849075953406191e-02,
.p2 = -2.0833317381133629e-02,
.k3 = -1.4657907716904774e+02,
},
.camera_matrix = {
.a11 = 3.8385004722247168e+03,
.a12 = 0,
.a13 = 6.5463814905337483e+02,
.a21 = 0,
.a22 = 3.8289385545784967e+03,
.a23 = 5.4227950629136478e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// S-mount, focal length 6.0mm, aperture 1.8 (O3000_LS_F6_0)
{
.dist_coeff = {
.k1 = -3.2037738664730195e+00,
.k2 = 1.1127115993662951e+01,
.p1 = -1.6455451408872675e-02,
.p2 = -2.4114999934222298e-02,
.k3 = -1.2882650294739891e+01,
},
.camera_matrix = {
.a11 = 3.7083736372135381e+03,
.a12 = 0,
.a13 = 6.6465346812371035e+02,
.a21 = 0,
.a22 = 3.6972315248821769e+03,
.a23 = 5.5003224793025629e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// S-mount, focal length 8.0mm, aperture 1.8 (O3000_LS_F8_0)
{
.dist_coeff = {
.k1 = -2.4661259044966712e+00,
.k2 = 1.1778658083457410e+00,
.p1 = -8.5928173466905556e-03,
.p2 = -1.4375183749585565e-02,
.k3 = 1.4290871342330237e+02,
},
.camera_matrix = {
.a11 = 4.3637409203781626e+03,
.a12 = 0,
.a13 = 6.6812858595376599e+02,
.a21 = 0,
.a22 = 4.3451519470626554e+03,
.a23 = 5.5034252965175574e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// S-mount, focal length 12.0mm, aperture 1.8 (O3000_LS_F12_0)
{
.dist_coeff = {
.k1 = -5.3454594843785479e+00,
.k2 = 6.4871676948306629e+01,
.p1 = 1.0455391312916947e-01,
.p2 = 4.7057889548236420e-02,
.k3 = 1.2045606388669163e+00,
},
.camera_matrix = {
.a11 = 1.0122924739235064e+04,
.a12 = 0,
.a13 = 5.4063808328357356e+02,
.a21 = 0,
.a22 = 1.0091265861649332e+04,
.a23 = 3.2225828876237193e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// CS-mount, focal length 2.8mm, aperture 1.6 (O3000_LCS_F2_8)
{
.dist_coeff = {
.k1 = -4.2767583407486480e+00,
.k2 = 2.6248731301034013e+01,
.p1 = 7.8609123258541538e-03,
.p2 = 3.5374054685996053e-03,
.k3 = -8.9935343886238059e+01,
},
.camera_matrix = {
.a11 = 2.6998000732890600e+03,
.a12 = 0,
.a13 = 6.3616455649992679e+02,
.a21 = 0,
.a22 = 2.6987125203839237e+03,
.a23 = 4.4895958452543323e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// CS-mount, focal length 4.2mm, aperture 1.4 (O3000_LCS_F4_2)
{
.dist_coeff = {
.k1 = -3.7570498088693711e+01,
.k2 = 1.5728357422468230e+03,
.p1 = 1.1791307984552163e-02,
.p2 = -1.3742991959700961e-02,
.k3 = 1.0475497983752284e+01,
},
.camera_matrix = {
.a11 = 9.9917306224860204e+03,
.a12 = 0,
.a13 = 6.5441343169200013e+02,
.a21 = 0,
.a22 = 9.9479425952720158e+03,
.a23 = 4.6795575668109700e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// CS-mount, focal length 6.0mm, aperture 1.4 (O3000_LCS_F6_0)
{
.dist_coeff = {
.k1 = -2.3964178081799389e+01,
.k2 = 4.4902969904416392e+02,
.p1 = 2.2481087999585000e-01,
.p2 = 1.1427760423539150e-01,
.k3 = 1.3202448608914709e+01,
},
.camera_matrix = {
.a11 = 1.0267898783331597e+04,
.a12 = 0,
.a13 = 5.9040975894428607e+02,
.a21 = 0,
.a22 = 1.0167762137245367e+04,
.a23 = 3.7217036432075685e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// CS-mount, focal length 8.0mm, aperture 1.4 (O3000_LCS_F8_0)
{
.dist_coeff = {
.k1 = -3.1323351826805144e+01,
.k2 = -8.8565542864692248e-01,
.p1 = 1.3154594427821961e-01,
.p2 = 1.3393386186128195e-01,
.k3 = -1.7372379469761756e-03,
},
.camera_matrix = {
.a11 = 1.6071195111825766e+04,
.a12 = 0,
.a13 = 5.9208178498651694e+02,
.a21 = 0,
.a22 = 1.6265935400534616e+04,
.a23 = 4.0867129284489448e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
// CS-mount, focal length 12.0mm, aperture 1.4 (O3000_LCS_F12_0)
{
.dist_coeff = {
.k1 = -8.7854099735158311e+00,
.k2 = 3.0664687310188293e+02,
.p1 = -1.5840425493675159e-01,
.p2 = -2.4142181141228097e-02,
.k3 = 1.4519448386845686e+00,
},
.camera_matrix = {
.a11 = 1.2466587046030105e+04,
.a12 = 0,
.a13 = 6.9244116287526458e+02,
.a21 = 0,
.a22 = 1.2309699089674952e+04,
.a23 = 6.9766565927729926e+02,
.a31 = 0,
.a32 = 0,
.a33 = 1.0,
},
},
};
/**
* Color Correction Matrix for various ambient lights.
*
* How to get the color correction matrix (CCM):
*
* 1. Place a 24 patch Macbeth chart in a properly illuminated location. It's recommended to use a
* light booth with a normed color temperature (i. g. d65). Otherwise, you can do the
* calibration process during a cloudy day because the illument is about d65 (6500 K). Put
* the chart in the front of a window and switch off the room light.
* 2. Enable auto white balance and camera calibration (lense correction) algorithm. All other algorithms
* must be disabled.
* 3. Adjust image brightness and make sure that the lower left white patch has a value about 220.
* Use the XML-command brightness to reach the defined value.
* 4. Save the image and use the software SensorTune from Aptina to get the correction matrix.
*/
static const float ccm_presets[][3][3] = {
// CCM_PRESET_O3020
{
{1.7392, -0.7660, 0.1968},
{-0.2509, 1.5322, -0.1113},
{0.0840, -0.4782, 1.5641},
},
};
#ifdef DEBUG_PROC_TIME
/**
* Return timestamp in milliseconds.
* The actual time the Epoch in milliseconds is returned.
*/
uint64_t get_ts(void) {
struct timeval tv;
uint64_t ts;
if(gettimeofday(&tv, NULL)) {
printf("%s: %s\n", __func__, strerror(errno));
return 0;
}
ts = (uint64_t)tv.tv_sec * 1e3 + (uint64_t)tv.tv_usec/1000;
return ts;
}
#endif // DEBUG_PROC_TIME
/**
* Initialize pipeline with reasonable default value.
*
* @param pipe Pointer to pipeline data.
*/
static void set_default_value(struct color_pipe_t *pipe) {
pipe->debayer_data.alg = BAYER_ALG_BILINEAR;
pipe->debayer_data.alg_new = pipe->debayer_data.alg;
pipe->awb_data.enable = 0;
pipe->awb_data.gray_threshold = 0.3f;
pipe->awb_data.gray_threshold_new = pipe->awb_data.gray_threshold;
pipe->awb_data.ctrl_k = 0.01f;
pipe->awb_data.ctrl_k_new = pipe->awb_data.ctrl_k;
pipe->awb_data.gain_red = 1.0f;
pipe->awb_data.gain_blue = 1.0f;
pipe->cam_calib_data.enable = 0;
pipe->cam_calib_data.lense = O3000_LS_F2_8;
pipe->cam_calib_data.lense_new = pipe->cam_calib_data.lense;
memcpy(&(pipe->cam_calib_data.dist_coeff), &o3000_lens_coeffs[pipe->cam_calib_data.lense].dist_coeff, sizeof(struct dist_coeff_t));
memcpy(&(pipe->cam_calib_data.camera_matrix), &o3000_lens_coeffs[pipe->cam_calib_data.lense].camera_matrix, sizeof(struct camera_matrix_t));
pipe->cam_calib_data.undistort_map_init = 0;
pipe->color_calib_data.enable = 0;
pipe->color_calib_data.ccm = CCM_PRESET_O3020;
pipe->color_calib_data.ccm_new = pipe->color_calib_data.ccm;
memcpy(pipe->color_calib_data.a, ccm_presets[pipe->color_calib_data.ccm], sizeof(pipe->color_calib_data.a));
pipe->sharp_data.enable = 0;
pipe->sharp_data.sharp_factor = 5.0f;
pipe->sharp_data.sharp_factor_new = pipe->sharp_data.sharp_factor;
pipe->sharp_data.sharp_alg = SHARP_ALG_LOCAL;
pipe->sharp_data.sharp_alg_new = pipe->sharp_data.sharp_alg;
pipe->sharp_data.local_sens = 94.0f;
pipe->sharp_data.local_sens_new = pipe->sharp_data.local_sens_new;
pipe->gamma_data.enable = 0;
pipe->gamma_data.gamma = 1.2f;
pipe->gamma_data.gamma_new = pipe->gamma_data.gamma;
pipe->trapcorr_data.enable = 0;
pipe->trapcorr_data.map_init = 0;
pipe->trapcorr_data.wv = 0.0f;
pipe->trapcorr_data.wh = 0.0f;
pipe->trapcorr_data.wv_new = pipe->trapcorr_data.wv;
pipe->trapcorr_data.wh_new = pipe->trapcorr_data.wh;
pipe->proj_data.enable = 0;
pipe->proj_data.map_init = 0;
pipe->proj_data.c_inv[0][0] = 1.0f; // use identity matrix
pipe->proj_data.c_inv[0][1] = 0.0f;
pipe->proj_data.c_inv[0][2] = 0.0f;
pipe->proj_data.c_inv[1][0] = 0.0f;
pipe->proj_data.c_inv[1][1] = 1.0f;
pipe->proj_data.c_inv[1][2] = 0.0f;
pipe->proj_data.c_inv[2][0] = 0.0f;
pipe->proj_data.c_inv[2][1] = 0.0f;
pipe->proj_data.c_inv[2][2] = 1.0f;
memcpy(pipe->proj_data.c_inv_new, pipe->proj_data.c_inv, sizeof(pipe->proj_data.c_inv));
pipe->proj_data.c_upd = 0;
}
/**
* Free aligned memory.
*
* @param buf Pointer to aligned memory to be freed
*/
static void do_aligned_free(void *buf) {
if(buf != NULL) {
ALIGNED_FREE(buf);
}
}
/**
* Allocate aligned memory.
*
* @param alignment aligment size in bytes like 8, 16, 32
* @param size size in bytes to allocate
* @param func for debugging purposes do specify the calling function name
* @param line for debugging purposes do specify the line number from calling this function
* @return Pointer to aligned allocated memory or NULL on error
*/
static void *do_aligned_alloc(size_t alignment, size_t size, const char *func, int line) {
void *mem;
// The image size must be a multiple of the alignment size.
if((size % alignment) != 0) {
size = ((size/alignment)+1)*alignment;
}
mem = ALIGNED_ALLOC(alignment, size);
if(mem == NULL) {
printf("%s: aligned_alloc() line %d failed: %s\n", func, line, strerror(errno));
return NULL;
}
return mem;
}
/**
* Process raw image at color pipeline.
*
*
* @param color_pipe Pointer to pipeline data.
* @param img_buf raw input image
* @param img_header image header @see o3000.h
*/
void __stdcall color_pipe_process(struct color_pipe_t *__restrict__ color_pipe,
void *__restrict__ img_buf,
struct img_header_t *__restrict__ img_header) {
int height, width, bit_channel, is_color;
int header_version;
enum enumBayerPattern_t bayer_patter;
enum enumDataFormat_t raw_format;
void *img_out;
enum o3000_lenses_t lens_type;
enum ccm_preset_t ccm_type;
#ifdef DEBUG_PROC_TIME
uint64_t ts_start = get_ts();
uint64_t ts_debayer, ts_awb, ts_calib, ts_ccm, ts_sharp, ts_gamma, ts_trapcorr, ts_projection;
#endif // DEBUG_PROC_TIME
/*
* Extract image header information.
*/
raw_format = (enum enumDataFormat_t) (img_header->format);
width = img_header->width;
height = img_header->height;
bayer_patter = (enum enumBayerPattern_t) (img_header->bayer_pattern);
header_version = img_header->version;
// set bit per pixel
if(raw_format == DF_RAW_MONO_8 || raw_format == DF_RAW_BAYER_8) {
bit_channel = 8;
}
else {
bit_channel = 12;
}
// set flag to indicate mono or color image
if(raw_format == DF_RAW_MONO_8 || raw_format == DF_RAW_MONO_12 || raw_format == DF_HDR_MONO_20_COMP) {
is_color = 0;
}
else {
is_color = 1;
}
// set output image to raw image
img_out = img_buf;
/*
* Pipeline stage: Demosaicing
*/
if(is_color) {
color_pipe->debayer_data.img_raw = img_buf;
color_pipe->debayer_data.height = height;
color_pipe->debayer_data.width = width;
color_pipe->debayer_data.format = raw_format;
color_pipe->debayer_data.start_pattern = bayer_patter;
debayer(&(color_pipe->debayer_data));
img_out = color_pipe->debayer_data.img_rgb;
}
#ifdef DEBUG_PROC_TIME
ts_debayer = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: White-Balancing
*/
if(color_pipe->awb_data.enable && is_color) {
// reset color gains if gray threshold or proportional factor have changed
if( color_pipe->awb_data.ctrl_k != color_pipe->awb_data.ctrl_k_new ||
color_pipe->awb_data.gray_threshold != color_pipe->awb_data.gray_threshold_new) {
color_pipe->awb_data.gain_red = 1;
color_pipe->awb_data.gain_blue = 1;
}
// apply user parameter (double buffered)
color_pipe->awb_data.ctrl_k = color_pipe->awb_data.ctrl_k_new;
color_pipe->awb_data.gray_threshold = color_pipe->awb_data.gray_threshold_new;
color_pipe->awb_data.img_in = img_out;
color_pipe->awb_data.bit_channel = bit_channel;
color_pipe->awb_data.height = height;
color_pipe->awb_data.width = width;
white_balance(&(color_pipe->awb_data));
img_out = color_pipe->awb_data.img_rgb_balanced;
}
else {
// always reset color gain if stage is disabled
color_pipe->awb_data.gain_red = 1;
color_pipe->awb_data.gain_blue = 1;
}
#ifdef DEBUG_PROC_TIME
ts_awb = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Camera calibration
*/
if(color_pipe->cam_calib_data.enable) {
// apply user parameter (double buffered)
lens_type = color_pipe->cam_calib_data.lense_new;
if(color_pipe->cam_calib_data.lense != lens_type) {
color_pipe->cam_calib_data.lense = lens_type;
memcpy(&(color_pipe->cam_calib_data.dist_coeff), &o3000_lens_coeffs[lens_type].dist_coeff, sizeof(struct dist_coeff_t));
memcpy(&(color_pipe->cam_calib_data.camera_matrix), &o3000_lens_coeffs[lens_type].camera_matrix, sizeof(struct camera_matrix_t));
}
color_pipe->cam_calib_data.img_in = img_out;
color_pipe->cam_calib_data.is_color = is_color;
// reninit undistortion map if image format or resolution have changed
if( color_pipe->cam_calib_data.bit_channel != bit_channel ||
color_pipe->cam_calib_data.tot_width != width ||
color_pipe->cam_calib_data.tot_height != height) {
color_pipe->cam_calib_data.undistort_map_init = 0;
}
color_pipe->cam_calib_data.bit_channel = bit_channel;
color_pipe->cam_calib_data.tot_width = width;
color_pipe->cam_calib_data.tot_height = height;
// field-of-view available since O-3000 image header version 4
if(header_version >= 4) {
// reninit undistortion map if field-of-view has changed
if( color_pipe->cam_calib_data.fov_x_start != img_header->fov_x_start ||
color_pipe->cam_calib_data.fov_x_end != img_header->fov_x_end ||
color_pipe->cam_calib_data.fov_y_start != img_header->fov_y_start ||
color_pipe->cam_calib_data.fov_y_end != img_header->fov_y_end) {
color_pipe->cam_calib_data.undistort_map_init = 0;
}
color_pipe->cam_calib_data.fov_x_start = img_header->fov_x_start;
color_pipe->cam_calib_data.fov_x_end = img_header->fov_x_end;
color_pipe->cam_calib_data.fov_y_start = img_header->fov_y_start;
color_pipe->cam_calib_data.fov_y_end = img_header->fov_y_end;
}
else {
// assume that image is displayed without ROI (region-of-interest)
color_pipe->cam_calib_data.fov_x_start = 0;
color_pipe->cam_calib_data.fov_x_end = width-1;
color_pipe->cam_calib_data.fov_y_start = 0;
color_pipe->cam_calib_data.fov_y_end = height-1;
}
camera_calib(&(color_pipe->cam_calib_data));
img_out = color_pipe->cam_calib_data.img_calib;
}
#ifdef DEBUG_PROC_TIME
ts_calib = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Color Correction
*/
if(color_pipe->color_calib_data.enable && is_color) {
// apply user parameter (double buffered)
ccm_type = color_pipe->color_calib_data.ccm_new;
if(color_pipe->color_calib_data.ccm != ccm_type) {
color_pipe->color_calib_data.ccm = ccm_type;
memcpy(color_pipe->color_calib_data.a, ccm_presets[ccm_type], sizeof(color_pipe->color_calib_data.a));
}
color_pipe->color_calib_data.img_in = img_out;
color_pipe->color_calib_data.bit_channel = bit_channel;
color_pipe->color_calib_data.width = width;
color_pipe->color_calib_data.height = height;
color_calib(&(color_pipe->color_calib_data));
img_out = color_pipe->color_calib_data.img_calib;
}
#ifdef DEBUG_PROC_TIME
ts_ccm = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Image sharpening
*/
if(color_pipe->sharp_data.enable) {
// apply user parameter (double buffered)
color_pipe->sharp_data.sharp_factor = color_pipe->sharp_data.sharp_factor_new;
color_pipe->sharp_data.sharp_alg = color_pipe->sharp_data.sharp_alg_new;
color_pipe->sharp_data.local_sens = color_pipe->sharp_data.local_sens_new;
color_pipe->sharp_data.img_in = img_out;
color_pipe->sharp_data.is_color = is_color;
color_pipe->sharp_data.bit_channel = bit_channel;
color_pipe->sharp_data.width = width;
color_pipe->sharp_data.height = height;
sharpening(&(color_pipe->sharp_data));
img_out = color_pipe->sharp_data.img_sharp;
}
#ifdef DEBUG_PROC_TIME
ts_sharp = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Gamma correction
*/
if(color_pipe->gamma_data.enable) {
// apply user parameter (double buffered)
color_pipe->gamma_data.gamma = color_pipe->gamma_data.gamma_new;
color_pipe->gamma_data.img_in = img_out;
color_pipe->gamma_data.is_color = is_color;
color_pipe->gamma_data.bit_channel = bit_channel;
color_pipe->gamma_data.width = width;
color_pipe->gamma_data.height = height;
gamma_corr(&(color_pipe->gamma_data));
img_out = color_pipe->gamma_data.img_gamma;
}
#ifdef DEBUG_PROC_TIME
ts_gamma = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Isosceles trapezoid correction
*/
if(color_pipe->trapcorr_data.enable) {
// auto-reinit perspective correction map if image format, resolution or weights have changed
if( color_pipe->trapcorr_data.bit_channel != bit_channel ||
color_pipe->trapcorr_data.width != width ||
color_pipe->trapcorr_data.height != height ||
color_pipe->trapcorr_data.wv != color_pipe->trapcorr_data.wv_new ||
color_pipe->trapcorr_data.wh != color_pipe->trapcorr_data.wh_new) {
color_pipe->trapcorr_data.map_init = 0;
}
// apply user parameter (double buffered)
color_pipe->trapcorr_data.wv = color_pipe->trapcorr_data.wv_new;
color_pipe->trapcorr_data.wh = color_pipe->trapcorr_data.wh_new;
color_pipe->trapcorr_data.img_in = img_out;
color_pipe->trapcorr_data.is_color = is_color;
color_pipe->trapcorr_data.bit_channel = bit_channel;
color_pipe->trapcorr_data.width = width;
color_pipe->trapcorr_data.height = height;
trapcorr(&(color_pipe->trapcorr_data));
img_out = color_pipe->trapcorr_data.img_out;
}
#ifdef DEBUG_PROC_TIME
ts_trapcorr = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Pipeline stage: Projective transformation
*/
if(color_pipe->proj_data.enable) {
// auto-reinit perspective correction map if image format, resolution or weights have changed
if( color_pipe->proj_data.bit_channel != bit_channel ||
color_pipe->proj_data.width != width ||
color_pipe->proj_data.height != height) {
color_pipe->proj_data.map_init = 0;
}
// apply user parameter (double buffered)
if(color_pipe->proj_data.c_upd) {
memcpy(color_pipe->proj_data.c_inv, color_pipe->proj_data.c_inv_new, sizeof(color_pipe->proj_data.c_inv));
color_pipe->proj_data.c_upd = 0;
color_pipe->proj_data.map_init = 0;
}
color_pipe->proj_data.img_in = img_out;
color_pipe->proj_data.is_color = is_color;
color_pipe->proj_data.bit_channel = bit_channel;
color_pipe->proj_data.width = width;
color_pipe->proj_data.height = height;
projection(&(color_pipe->proj_data));
img_out = color_pipe->proj_data.img_out;
}
#ifdef DEBUG_PROC_TIME
ts_projection = get_ts();
#endif // DEBUG_PROC_TIME
/*
* Return processed image depending on active pipeline stages.
*/
color_pipe->img_out = img_out;
color_pipe->is_color = is_color;
color_pipe->bit_channel = bit_channel;
color_pipe->width = width;
color_pipe->height = height;
#ifdef DEBUG_PROC_TIME
printf(" debayer: %lld msec\n", ts_debayer - ts_start);
printf(" awb: %lld msec\n", ts_awb - ts_debayer);
printf(" camera calib: %lld msec\n", ts_calib - ts_awb);
printf(" color correction: %lld msec\n", ts_ccm - ts_calib);
printf(" sharpening: %lld msec\n", ts_sharp - ts_ccm);
printf(" gamma: %lld msec\n", ts_gamma - ts_sharp);
printf(" trapeze correction: %lld msec\n", ts_trapcorr - ts_gamma);
printf(" projective transformation: %lld msec\n", ts_projection - ts_trapcorr);
#endif // DEBUG_PROC_TIME
}
/**
* Pipeline stage configuration: Auto-White-Balancing
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param alg demosaicing algorithm type
*/
void __stdcall color_pipe_stageconf_debayer(struct color_pipe_t *color_pipe, enum bayer_alg_t alg) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
color_pipe->debayer_data.alg_new = alg;
}
/**
* Pipeline stage configuration: Auto-White-Balancing
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param gray_threshold gray threshold (default 0.3)
* @param ctrl_gain gray threshold (default 0.01)
*/
void __stdcall color_pipe_stageconf_awb(struct color_pipe_t *color_pipe, int enable, float gray_threshold, float ctrl_gain) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
color_pipe->awb_data.enable = enable;
color_pipe->awb_data.gray_threshold_new = gray_threshold;
color_pipe->awb_data.ctrl_k_new = ctrl_gain;
}
/**
* Pipeline stage configuration: Camera Calibration
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param lense initialize pipeline stage with given lense type
*/
void __stdcall color_pipe_stageconf_cam_calib(struct color_pipe_t *color_pipe, int enable, enum o3000_lenses_t lense) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
// range check
if(lense < 0 || lense >= (sizeof(o3000_lens_coeffs)/sizeof(struct o3000_lens_coeffs_t))) {
printf("%s: Invalid lense type %d\n", __func__, lense);
return;
}
color_pipe->cam_calib_data.enable = enable;
color_pipe->cam_calib_data.lense_new = lense;
color_pipe->cam_calib_data.undistort_map_init = 0;
}
/**
* Pipeline stage configuration: Color Calibration
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param ccm_preset initialize pipeline stage with given color correction preset data
*/
void __stdcall color_pipe_stageconf_color_calib(struct color_pipe_t *color_pipe, int enable,
enum ccm_preset_t ccm_preset) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
// range check
if(ccm_preset < 0 || ccm_preset >= (sizeof(ccm_presets)/sizeof(ccm_presets[0]))) {
printf("%s: Invalid color type %d\n", __func__, ccm_preset);
return;
}
color_pipe->color_calib_data.enable = enable;
color_pipe->color_calib_data.ccm_new = ccm_preset;
}
/**
* Pipeline stage configuration: Sharpening
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param factor sharpening factor (default 5.0)
* @param alg algorithm type
* @param sens sensitivity (default 94.0)
*/
void __stdcall color_pipe_stageconf_sharp(struct color_pipe_t *color_pipe, int enable,
float factor, enum sharp_alg_t alg, float sens) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
color_pipe->sharp_data.enable = enable;
color_pipe->sharp_data.sharp_factor_new = factor;
color_pipe->sharp_data.sharp_alg_new = alg;
color_pipe->sharp_data.local_sens_new = sens;
}
/**
* Pipeline stage configuration: Gamma Correction
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param gamma gamma factor (1.0 means no gamma correction, default 1.2)
*/
void __stdcall color_pipe_stageconf_gamma(struct color_pipe_t *color_pipe, int enable, float gamma) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
color_pipe->gamma_data.enable = enable;
color_pipe->gamma_data.gamma_new = gamma;
}
/**
* Pipeline stage configuration: Isosceles Trapeze Correction
*
* The vertical and horizontal correction weight are per cent values ranging
* from -100.0 % to +100.0 %. A positive weight means that the upper horizontal trapeze
* is fixed and won't shrink while a negative value means the opposite lower line won't change.
* A weight of zero means not correction.
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param wv vertical weight (range: -100.0 to +100.0)
* @param wh horizontal weight (range: -100.0 to +100.0)
*/
void __stdcall color_pipe_stageconf_trapcorr(struct color_pipe_t *color_pipe, int enable, float wv, float wh) {
// paranoia
if(color_pipe == NULL) {
printf("%s: Pipeline pointer is NULL!\n", __func__);
return;
}
// range check
if(wv < -100.0f) {
wv = -100.0f;
}
else if(wv > 100.0) {
wv = 100.0f;
}
if(wh < -100.0f) {
wh = -100.0f;
}
else if(wh > 100.0) {
wh = 100.0f;
}
color_pipe->trapcorr_data.enable = enable;
color_pipe->trapcorr_data.wv_new = wv;
color_pipe->trapcorr_data.wh_new = wh;
color_pipe->trapcorr_data.map_init = 0;
}
/**
* Pipeline stage configuration: projective transformation
*
* Project point p to p' using the projection matrix C
* with homogeneous coordinates.
*
* t * p' = C * p
*
* where:
* / u \ / x \
* p' = | v | p = | y | t = scaling factor
* \ 1 / \ 1 /
*
*
* / c00 c01 c02 \
* C = | c10 c11 c12 |
* \ c20 c21 c22 /
*
* p': projected points
* p: point to be projected
* C: projection matrix
*
* The color-pipe requires the invers of matrix C because
* an inverse mapping is implemented.
*
* @param color_pipe Pointer to pipeline context
* @param enable not 0: enable, 0: disable
* @param c_inv inverse of 3x3 projection matrix C
*/
void __stdcall color_pipe_stageconf_projection(struct color_pipe_t *color_pipe, int enable, float c_inv[3][3]) {
memcpy(color_pipe->proj_data.c_inv_new, c_inv, sizeof(color_pipe->proj_data.c_inv_new));
color_pipe->proj_data.c_upd = 1;
color_pipe->proj_data.enable = enable;
}
/**
* Open color image processing pipeline.
* This function allocates memory for various pipe algorithm. The pipeline is set up for a maximum possible image size defined
* by the height, width and bitdepth per color channel.
*
* NOTE
* This function uses dynamic memory allocation. If the pipeline isn't use anymore do close it by calling @ref color_pipe_close.
*
* @param color_pipe On return: Pointer to pipeline data. Dynamic memory is allocated.
* @param max_img_height maximum possible image height in number of pixels
* @param max_img_width maximum possible image width in number of pixels
* @param bits_per_channel maximum possible number of bits per color channel
* @return 0 on success, -1 on error
*/
int __stdcall color_pipe_open(struct color_pipe_t **color_pipe, const int max_img_height, const int max_img_width,
const int bits_per_channel) {
int byte_per_pixel, max_img_size, max_img_size_yuv, max_img_size_binary;
struct color_pipe_t *data;
if(color_pipe == NULL) {
printf("%s: Pipeline data pointer is NULL!\n", __func__);
return -1;
}
data = calloc(1, sizeof(struct color_pipe_t));
if(data == NULL) {
PRINTF_ERRNO("calloc");
return -1;
}
/*
* Calculate the number of bytes per pixel are used for a color image.
* Always, a color image has 3 channels with the given bit-depth max_img_bpp.
*
* e. g. 8 bits-per-channel results to 3 byte per pixel
* 12 bits-per-channel results to 6 byte per pixel
*/
if((bits_per_channel%8) == 0) {
byte_per_pixel = bits_per_channel/8;
}
else {
byte_per_pixel = bits_per_channel/8 + 1;
}
byte_per_pixel *= 3;
/*
* Do calculate the maximum possible image size.
*/
max_img_size = max_img_height*max_img_width*byte_per_pixel;
/*
* The YUV image uses 16 bit-per-channel always.
*/
max_img_size_yuv = max_img_height*max_img_width*3*2;
/*
* The binary image uses 8 bit-per-channel always.
*/
max_img_size_binary = max_img_height*max_img_width*3;
/*
* Important note for dynamic memory allocation:
* Various pipeline algorithms are using SIMD instructions like SSE2 (128 bit register) and AVX (256 bit registers). Therfore any
* image buffer is allocated to a 32-byte boundary. Using SIMD instructions on a unaligned buffer may generate a general-protection exception.
*/
// allocate memory for demosaicing algorithm
data->debayer_data.img_rgb = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->debayer_data.img_rgb == NULL) {
goto _pipe_open_abort;
}
// allocate memory for auto white balancing algorithm
data->awb_data.img_rgb_balanced = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->awb_data.img_rgb_balanced == NULL) {
goto _pipe_open_abort;
}
data->awb_data.img_yuv = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
if(data->awb_data.img_yuv == NULL) {
goto _pipe_open_abort;
}
// allocate memory for camera calibration algorithm
data->cam_calib_data.img_calib = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->cam_calib_data.img_calib == NULL) {
goto _pipe_open_abort;
}
data->cam_calib_data.calib_map = do_aligned_alloc(ALIGNMENT_SIZE,
sizeof(struct coord_t)*max_img_height*max_img_width,
__func__, __LINE__-1);
if(data->cam_calib_data.calib_map == NULL) {
goto _pipe_open_abort;
}
// allocate memory for color calibration algorithm
data->color_calib_data.img_calib = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->color_calib_data.img_calib == NULL) {
goto _pipe_open_abort;
}
// allocate memory for sharpening algorithm
data->sharp_data.img_sharp = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->sharp_data.img_sharp == NULL) {
goto _pipe_open_abort;
}
data->sharp_data.img_yuv = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
if(data->sharp_data.img_yuv == NULL) {
goto _pipe_open_abort;
}
data->sharp_data.img_yuv_sharp = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
if(data->sharp_data.img_yuv_sharp == NULL) {
goto _pipe_open_abort;
}
data->sharp_data.img_sobel = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
if(data->sharp_data.img_sobel == NULL) {
goto _pipe_open_abort;
}
data->sharp_data.img_gauss = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_yuv, __func__, __LINE__-1);
if(data->sharp_data.img_gauss == NULL) {
goto _pipe_open_abort;
}
data->sharp_data.sharp_mask = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size_binary, __func__, __LINE__-1);
if(data->sharp_data.sharp_mask == NULL) {
goto _pipe_open_abort;
}
// allocate memory for gamma correction algorithm
data->gamma_data.img_gamma = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->gamma_data.img_gamma == NULL) {
goto _pipe_open_abort;
}
// Lookup table size depends on bits per color channel.
data->gamma_data.gamma_table = do_aligned_alloc(ALIGNMENT_SIZE, (1<gamma_data.gamma_table == NULL) {
goto _pipe_open_abort;
}
// allocate memory for isosceles trapeze correction algorithm
data->trapcorr_data.img_out = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->trapcorr_data.img_out == NULL) {
goto _pipe_open_abort;
}
data->trapcorr_data.map = do_aligned_alloc(ALIGNMENT_SIZE,
sizeof(struct coord_t)*max_img_height*max_img_width,
__func__, __LINE__-1);
if(data->trapcorr_data.map == NULL) {
goto _pipe_open_abort;
}
// allocate memory for projective transformation
data->proj_data.img_out = do_aligned_alloc(ALIGNMENT_SIZE, max_img_size, __func__, __LINE__-1);
if(data->proj_data.img_out == NULL) {
goto _pipe_open_abort;
}
data->proj_data.map = do_aligned_alloc(ALIGNMENT_SIZE,
sizeof(struct coord_t)*max_img_height*max_img_width,
__func__, __LINE__-1);
if(data->proj_data.map == NULL) {
goto _pipe_open_abort;
}
// set suitable and valid defaults
set_default_value(data);
*color_pipe = data;
// detect CPU features
#if (WITH_SIMD == 1)
if(cpu_feature_init()) {
printf("%s: Detecting CPU features failed\n", __func__);
}
#endif // WITH_SIMD
return 0;
_pipe_open_abort:
color_pipe_close(data);
return -1;
}
/**
* Close color image processing pipeline.
* This function cleans up the pipeline and is freeing the used memory.
*
* @param data Pointer to pipeline data
*/
int __stdcall color_pipe_close(struct color_pipe_t *data) {
if(data == NULL) {
printf("%s: Pipeline data pointer is NULL!\n", __func__);
return -1;
}
// free various image buffers
do_aligned_free(data->debayer_data.img_rgb);
do_aligned_free(data->awb_data.img_rgb_balanced);
do_aligned_free(data->awb_data.img_yuv);
do_aligned_free(data->cam_calib_data.img_calib);
do_aligned_free(data->cam_calib_data.calib_map);
do_aligned_free(data->color_calib_data.img_calib);
do_aligned_free(data->sharp_data.img_sharp);
do_aligned_free(data->sharp_data.img_yuv);
do_aligned_free(data->sharp_data.img_yuv_sharp);
do_aligned_free(data->sharp_data.img_sobel);
do_aligned_free(data->sharp_data.img_gauss);
do_aligned_free(data->sharp_data.sharp_mask);
do_aligned_free(data->gamma_data.img_gamma);
do_aligned_free(data->gamma_data.gamma_table);
do_aligned_free(data->trapcorr_data.img_out);
do_aligned_free(data->trapcorr_data.map);
do_aligned_free(data->proj_data.img_out);
do_aligned_free(data->proj_data.map);
// free various image buffers
free(data);
return 0;
}
/**
* Return library version.
*
* @param major On return: major number
* @param minor On return: minor number
* @param release On return: release number
*/
void __stdcall color_pipe_get_version(int *major, int *minor, int *release) {
if(major == NULL || minor == NULL || release == NULL) {
printf("%s: at least one version variable is NULL!\n", __func__);
return;
}
*major = PIPE_VERSION_MAJOR;
*minor = PIPE_VERSION_MINOR;
*release = PIPE_VERSION_RELEASE;
}