/** * @file camera_calib.c * @brief camera calibration algorithm * @author Patrick Roth - roth@stettbacher.ch * @copyright Stettbacher Signal Processing AG * * @remarks * *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* 
* */ #include #include #if (WITH_SIMD == 1) #include // see /usr/lib64/gcc/x86_64-suse-linux/4.7/include/immintrin.h #endif // WITH_SIMD #include "color_pipe_private.h" #if 0 not used anymore!! #ifdef __AVX__ static inline void interpolate_avx(const int bit_channel, void *img_out, const int x, const int y, const int height, const int width, const void *img_in, const int32_t x_corr_start, const int32_t y_corr_start, const int32_t x_weight, const int32_t y_weight, const int shift_fact) { int x_coord_end, y_coord_end, index_start, index_end; int a11, a12, a21, a22; int red, green, blue; int index; uint8_t *out8 = img_out; uint16_t *out16 = img_out; const uint8_t *in8 = img_in; const uint16_t *in16= img_in; // init pixel value to black red = 0; green = 0; blue = 0; // calculate pixel index of destination image (calibrated image) index = ((y*width)+x)*3; if(x_corr_start >= width || y_corr_start >= height || x_corr_start < 0 || y_corr_start < 0) { // out of range --> return black value goto _end_interpolate; } x_coord_end = x_corr_start + 1; y_coord_end = y_corr_start + 1; index_start = (y_corr_start*width + x_corr_start)*3; index_end = (y_coord_end*width + x_coord_end)*3; // calculate wheights used for interpolation a11 = ((1<> (2*shift_fact); out8[index+1] = green >> (2*shift_fact); out8[index+2] = blue >> (2*shift_fact); } else if(bit_channel <= 16) { out16[index] = red >> (2*shift_fact); out16[index+1] = green >> (2*shift_fact); out16[index+2] = blue >> (2*shift_fact); } } /** * Calculate camera calibration lookup-table (by using AVX vector instructions). * * @param data required camera calibration data */ static void calc_calib_avx(struct cam_calib_data_t *data) { int v, u, i, height, width, fov_x_start, fov_x_end, fov_y_start, fov_y_end; float k1, k2, p1, p2, k3; float fx, fy, cx, cy; float _y; void *img_calib, *img_uncalib; int bit_channel; const int shift_fact = 9; // align variable to 32-byte boundary float _x[8] __attribute__((aligned(32))); int32_t x_corr_start[8] __attribute__((aligned(32))); int32_t y_corr_start[8] __attribute__((aligned(32))); int32_t x_weight[8] __attribute__((aligned(32))); int32_t y_weight[8] __attribute__((aligned(32))); __m256 _y_vect, _y_sq_vect; __m256 _x_vect, _x_sq_vect; __m256 r_sq_vect, r_sqx2_vect, r_sqx3_vect; __m256 dist_radial_vect, k1_vect, k2_vect, k3_vect, dist_radial_offset; __m256 dist_tang_x_vect, dist_tang_y_vect, p1_vect, p2_vect, a_p1, a_p2; __m256 dist_x_vect, dist_y_vect; __m256 const_2_vect; __m256 x_corr_vect, y_corr_vect; __m256 fx_vect, cx_vect, fov_x_start_vect; __m256 fy_vect, cy_vect, fov_y_start_vect; __m256 _x_fact_y_vect; __m256i x_corr_start_vect, y_corr_start_vect; __m256 shift_fact_vect; __m256i x_weight_vect, y_weight_vect; img_calib = data->img_calib; img_uncalib = data->img_in; bit_channel = data->bit_channel; height = data->tot_height; width = data->tot_width; fov_x_start = data->fov_x_start; fov_x_end = data->fov_x_end; fov_y_start = data->fov_y_start; fov_y_end = data->fov_y_end; fov_x_start_vect = _mm256_set1_ps(fov_x_start); fov_y_start_vect = _mm256_set1_ps(fov_y_start); k1 = data->dist_coeff.k1; k2 = data->dist_coeff.k2; k3 = data->dist_coeff.k3; k1_vect = _mm256_set1_ps(k1); k2_vect = _mm256_set1_ps(k2); k3_vect = _mm256_set1_ps(k3); p1 = data->dist_coeff.p1; p2 = data->dist_coeff.p2; p1_vect = _mm256_set1_ps(p1); p2_vect = _mm256_set1_ps(p2); fx = data->camera_matrix.a11; fy = data->camera_matrix.a22; cx = data->camera_matrix.a13; cy = data->camera_matrix.a23; fx_vect = _mm256_set1_ps(fx); fy_vect = _mm256_set1_ps(fy); cx_vect = _mm256_set1_ps(cx); cy_vect = _mm256_set1_ps(cy); dist_radial_offset = _mm256_set1_ps(1.0); const_2_vect = _mm256_set1_ps(2.0); a_p1 = _mm256_mul_ps(_mm256_set1_ps(2.0), p1_vect); a_p2 = _mm256_mul_ps(_mm256_set1_ps(2.0), p2_vect); shift_fact_vect = _mm256_set1_ps((float)(1<img_calib; img_uncalib = data->img_in; bit_channel = data->bit_channel; height = data->tot_height; width = data->tot_width; fov_x_start = data->fov_x_start; fov_x_end = data->fov_x_end; fov_y_start = data->fov_y_start; fov_y_end = data->fov_y_end; k1 = data->dist_coeff.k1; k2 = data->dist_coeff.k2; p1 = data->dist_coeff.p1; p2 = data->dist_coeff.p2; k3 = data->dist_coeff.k3; fx = data->camera_matrix.a11; fy = data->camera_matrix.a22; cx = data->camera_matrix.a13; cy = data->camera_matrix.a23; for(v = fov_y_start; v <= fov_y_end; v++) { _y = (v-cy)/fy; _y_sq = _y*_y; for(u = fov_x_start; u <= fov_x_end; u++) { _x = (u-cx)/fx; _x_sq = _x*_x; r_sq = _y_sq + _x_sq; dist_radial = (1 + k1*r_sq + k2*r_sq*r_sq + k3*r_sq*r_sq*r_sq); dist_tang_x = 2*p1*_x*_y + p2*(r_sq + 2*_x_sq); dist_x = _x*dist_radial + dist_tang_x; dist_tang_y = p1*(r_sq + 2*_y_sq) + 2*p2*_x*_y; dist_y = _y*dist_radial + dist_tang_y; x_corr = fx*dist_x + cx - fov_x_start; y_corr = fy*dist_y + cy - fov_y_start; if(bit_channel <= 8) { interpolate8(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr); } else if(bit_channel <= 16) { interpolate16(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr); } } } } #endif /* __AVX__ */ #endif /** * Calculate undistortion map used for camera calibration. * * The undistortion map corrects the radial and tangential distortion. * The five distortion coefficients are used: k1, k2, p1, p2, k3 * * radial distortion causes barrel or pillow effect: * * x_dist = x*(1 + k1*r² + k2*r⁴ + k3*r⁶) * y_dist = y*(1 + k1*r² + k2*r⁴ + k3*r⁶) * * * tangention distortion (lense is not perfectly aligned): * * x_dist = x + (2*p1*x*y + p2*(r² + 2*x²)) * y_dist = y + (p1*(r²+2*y²) + 2*p2*x*y) * * In addition, the intrincic parameters of the camera are used containing * informations about the focal length (fx, fy) and optical center (cx, cy). * * | fx 0 cx | * A = | 0 fy cy | * | 0 0 1 | * * * @param data required camera calibration data */ static void init_undistort_map_scalar(struct cam_calib_data_t *data) { int v, u, fov_x_start, fov_x_end, fov_y_start, fov_y_end; float k1, k2, p1, p2, k3; float fx, fy, cx, cy; float dist_radial, dist_tang_x, dist_tang_y, dist_x, dist_y; float _x, _y, _y_sq, _x_sq, r_sq; float x_corr, y_corr; struct coord_t *map; const int scale_fact = (1 << (data->calib_map_scale_fact)); float x_resolution_scaling, y_resolution_scaling; // camera_matrix needs to be scaled depending on the camera resolution fov_x_start = data->fov_x_start; fov_x_end = data->fov_x_end; fov_y_start = data->fov_y_start; fov_y_end = data->fov_y_end; k1 = data->dist_coeff.k1; k2 = data->dist_coeff.k2; p1 = data->dist_coeff.p1; p2 = data->dist_coeff.p2; k3 = data->dist_coeff.k3; /* * While the distortion coefficients are the same regardless of the camera resolution * used, the camera matrix coefficients should be scaled along with the current * resolution from the calibrated resolution. (see OpenCV documentation) */ x_resolution_scaling = (fov_x_end - fov_x_start + 1) / data->tot_width; y_resolution_scaling = (fov_y_end - fov_y_start + 1) / data->tot_height; fx = data->camera_matrix.a11/x_resolution_scaling; fy = data->camera_matrix.a22/y_resolution_scaling; cx = data->camera_matrix.a13/x_resolution_scaling; cy = data->camera_matrix.a23/y_resolution_scaling; map = data->calib_map; for(v = fov_y_start; v <= fov_y_end; v++) { _y = (v-cy)/fy; _y_sq = _y*_y; for(u = fov_x_start; u <= fov_x_end; u++) { _x = (u-cx)/fx; _x_sq = _x*_x; r_sq = _y_sq + _x_sq; dist_radial = (1 + k1*r_sq + k2*r_sq*r_sq + k3*r_sq*r_sq*r_sq); dist_tang_x = 2*p1*_x*_y + p2*(r_sq + 2*_x_sq); dist_x = _x*dist_radial + dist_tang_x; dist_tang_y = p1*(r_sq + 2*_y_sq) + 2*p2*_x*_y; dist_y = _y*dist_radial + dist_tang_y; x_corr = fx*dist_x + cx - fov_x_start; y_corr = fy*dist_y + cy - fov_y_start; map->x = (int)roundf(x_corr*scale_fact); map->y = (int)roundf(y_corr*scale_fact); map++; } } } /** * Pixel value interpolation of RGB image (8 bit per color channel). * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel. * * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights: * +------+------+ * | | | * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70% * | | | * +------+------+ * | | | * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30% * | | | * +------+------+ * * The weights are applied to the neighors and the resulting pixel value is saved at the given location. * * NOTE * The input and output image must have the same pixel size. * * @param img_out On return: image with interpolated values * @param x saved interpolated pixel value at this x-coordinate * @param y saved interpolated pixel value at this y-coordinate * @param height image height of input and output image in number of pixels * @param width image width of input and output image in number of pixels * @param img_in input image to interpolate pixel values * @param coord_x x-coordinate to interpolate * @param coord_y y-coordinate to interpolate * @param scale_fact coordinates are scaled by this factor */ static void interpolate_rgb8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width, const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact) #include "alg_interpolate_rgb_scalar.h" /** * Pixel value interpolation of RGB image (16 bit per color channel). * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel. * * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights: * +------+------+ * | | | * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70% * | | | * +------+------+ * | | | * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30% * | | | * +------+------+ * * The weights are applied to the neighors and the resulting pixel value is saved at the given location. * * NOTE * The input and output image must have the same pixel size. * * @param img_out On return: image with interpolated values * @param x saved interpolated pixel value at this x-coordinate * @param y saved interpolated pixel value at this y-coordinate * @param height image height of input and output image in number of pixels * @param width image width of input and output image in number of pixels * @param img_in input image to interpolate pixel values * @param coord_x x-coordinate to interpolate * @param coord_y y-coordinate to interpolate * @param scale_fact coordinates are scaled by this factor */ static void interpolate_rgb16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width, const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact) #include "alg_interpolate_rgb_scalar.h" /** * Pixel value interpolation of monochrome image (8 bit per pixel). * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel. * * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights: * +------+------+ * | | | * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70% * | | | * +------+------+ * | | | * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30% * | | | * +------+------+ * * The weights are applied to the neighors and the resulting pixel value is saved at the given location. * * NOTE * The input and output image must have the same pixel size. * * @param img_out On return: image with interpolated values * @param x saved interpolated pixel value at this x-coordinate * @param y saved interpolated pixel value at this y-coordinate * @param height image height of input and output image in number of pixels * @param width image width of input and output image in number of pixels * @param img_in input image to interpolate pixel values * @param coord_x x-coordinate to interpolate * @param coord_y y-coordinate to interpolate * @param scale_fact coordinates are scaled by this factor */ static void interpolate_mono8_scalar(uint8_t *img_out, const int x, const int y, const int height, const int width, const uint8_t *img_in, const int coord_x, const int coord_y, const int scale_fact) #include "alg_interpolate_mono_scalar.h" /** * Pixel value interpolation of monochrome image (16 bit per pixel). * If a pixel coordinate with a fraction part is of interest, do interpolate the correct value from their neighbor's pixel. * * E. g. the pixel coordinate x/y = 1.8/2.3 gives the following weights: * +------+------+ * | | | * | 14% | 56% | 14% = 20%*70%, 56% = 80%*70% * | | | * +------+------+ * | | | * | 6% | 24% | 6% = 20%*30%, 24% = 80%*30% * | | | * +------+------+ * * The weights are applied to the neighors and the resulting pixel value is saved at the given location. * * NOTE * The input and output image must have the same pixel size. * * @param img_out On return: image with interpolated values * @param x saved interpolated pixel value at this x-coordinate * @param y saved interpolated pixel value at this y-coordinate * @param height image height of input and output image in number of pixels * @param width image width of input and output image in number of pixels * @param img_in input image to interpolate pixel values * @param coord_x x-coordinate to interpolate * @param coord_y y-coordinate to interpolate * @param scale_fact coordinates are scaled by this factor */ static void interpolate_mono16_scalar(uint16_t *img_out, const int x, const int y, const int height, const int width, const uint16_t *img_in, const int coord_x, const int coord_y, const int scale_fact) #include "alg_interpolate_mono_scalar.h" /** * Lense calibration using bilinear interpolation. * * @param data required camera calibration data */ static void calib_scalar(struct cam_calib_data_t *data) { int v, u, height, width, fov_x_start, fov_x_end, fov_y_start, fov_y_end; int x_corr, y_corr; void *img_calib, *img_uncalib; int bit_channel; struct coord_t *map; const int scale_fact = data->calib_map_scale_fact; const int is_color = data->is_color; img_calib = data->img_calib; img_uncalib = data->img_in; height = data->tot_height; width = data->tot_width; fov_x_start = data->fov_x_start; fov_x_end = data->fov_x_end; fov_y_start = data->fov_y_start; fov_y_end = data->fov_y_end; bit_channel = data->bit_channel; map = data->calib_map; for(v = fov_y_start; v <= fov_y_end; v++) { for(u = fov_x_start; u <= fov_x_end; u++) { x_corr = map->x; y_corr = map->y; map++; if(bit_channel <= 8) { if(is_color) { interpolate_rgb8_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact); } else { interpolate_mono8_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact); } } else if(bit_channel <= 16) { if(is_color) { interpolate_rgb16_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact); } else { interpolate_mono16_scalar(img_calib, u - fov_x_start, v - fov_y_start, height, width, img_uncalib, x_corr, y_corr, scale_fact); } } } } } /** * Perform camera calibration by applying following algorithms: * - radial and tangential lens distortion correction * - bilinear interpolation to calibrate image * * The undistorted (calibrated) and distorted (input) image must be the same size. * * @param data required camera calibration data * @return 0 on success otherwise -1 */ int camera_calib(struct cam_calib_data_t *data) { /* * Check whether undistortion map (lookup-table) is initialized. */ if(data->undistort_map_init == 0) { data->calib_map_scale_fact = 9; // scale by 9 means 2^9 = 512 init_undistort_map_scalar(data); data->undistort_map_init = 1; } calib_scalar(data); return 0; }