001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.calib3d;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.calib3d.UsacParams;
009import org.opencv.core.Mat;
010import org.opencv.core.MatOfDouble;
011import org.opencv.core.MatOfPoint2f;
012import org.opencv.core.MatOfPoint3f;
013import org.opencv.core.Point;
014import org.opencv.core.Rect;
015import org.opencv.core.Scalar;
016import org.opencv.core.Size;
017import org.opencv.core.TermCriteria;
018import org.opencv.utils.Converters;
019
020// C++: class Calib3d
021
022public class Calib3d {
023
024    // C++: enum <unnamed>
025    public static final int
026            CV_ITERATIVE = 0,
027            CV_EPNP = 1,
028            CV_P3P = 2,
029            CV_DLS = 3,
030            CvLevMarq_DONE = 0,
031            CvLevMarq_STARTED = 1,
032            CvLevMarq_CALC_J = 2,
033            CvLevMarq_CHECK_ERR = 3,
034            LMEDS = 4,
035            RANSAC = 8,
036            RHO = 16,
037            USAC_DEFAULT = 32,
038            USAC_PARALLEL = 33,
039            USAC_FM_8PTS = 34,
040            USAC_FAST = 35,
041            USAC_ACCURATE = 36,
042            USAC_PROSAC = 37,
043            USAC_MAGSAC = 38,
044            CALIB_CB_ADAPTIVE_THRESH = 1,
045            CALIB_CB_NORMALIZE_IMAGE = 2,
046            CALIB_CB_FILTER_QUADS = 4,
047            CALIB_CB_FAST_CHECK = 8,
048            CALIB_CB_EXHAUSTIVE = 16,
049            CALIB_CB_ACCURACY = 32,
050            CALIB_CB_LARGER = 64,
051            CALIB_CB_MARKER = 128,
052            CALIB_CB_SYMMETRIC_GRID = 1,
053            CALIB_CB_ASYMMETRIC_GRID = 2,
054            CALIB_CB_CLUSTERING = 4,
055            CALIB_NINTRINSIC = 18,
056            CALIB_USE_INTRINSIC_GUESS = 0x00001,
057            CALIB_FIX_ASPECT_RATIO = 0x00002,
058            CALIB_FIX_PRINCIPAL_POINT = 0x00004,
059            CALIB_ZERO_TANGENT_DIST = 0x00008,
060            CALIB_FIX_FOCAL_LENGTH = 0x00010,
061            CALIB_FIX_K1 = 0x00020,
062            CALIB_FIX_K2 = 0x00040,
063            CALIB_FIX_K3 = 0x00080,
064            CALIB_FIX_K4 = 0x00800,
065            CALIB_FIX_K5 = 0x01000,
066            CALIB_FIX_K6 = 0x02000,
067            CALIB_RATIONAL_MODEL = 0x04000,
068            CALIB_THIN_PRISM_MODEL = 0x08000,
069            CALIB_FIX_S1_S2_S3_S4 = 0x10000,
070            CALIB_TILTED_MODEL = 0x40000,
071            CALIB_FIX_TAUX_TAUY = 0x80000,
072            CALIB_USE_QR = 0x100000,
073            CALIB_FIX_TANGENT_DIST = 0x200000,
074            CALIB_FIX_INTRINSIC = 0x00100,
075            CALIB_SAME_FOCAL_LENGTH = 0x00200,
076            CALIB_ZERO_DISPARITY = 0x00400,
077            CALIB_USE_LU = (1 << 17),
078            CALIB_USE_EXTRINSIC_GUESS = (1 << 22),
079            FM_7POINT = 1,
080            FM_8POINT = 2,
081            FM_LMEDS = 4,
082            FM_RANSAC = 8,
083            fisheye_CALIB_USE_INTRINSIC_GUESS = 1 << 0,
084            fisheye_CALIB_RECOMPUTE_EXTRINSIC = 1 << 1,
085            fisheye_CALIB_CHECK_COND = 1 << 2,
086            fisheye_CALIB_FIX_SKEW = 1 << 3,
087            fisheye_CALIB_FIX_K1 = 1 << 4,
088            fisheye_CALIB_FIX_K2 = 1 << 5,
089            fisheye_CALIB_FIX_K3 = 1 << 6,
090            fisheye_CALIB_FIX_K4 = 1 << 7,
091            fisheye_CALIB_FIX_INTRINSIC = 1 << 8,
092            fisheye_CALIB_FIX_PRINCIPAL_POINT = 1 << 9,
093            fisheye_CALIB_ZERO_DISPARITY = 1 << 10,
094            fisheye_CALIB_FIX_FOCAL_LENGTH = 1 << 11;
095
096
097    // C++: enum GridType (cv.CirclesGridFinderParameters.GridType)
098    public static final int
099            CirclesGridFinderParameters_SYMMETRIC_GRID = 0,
100            CirclesGridFinderParameters_ASYMMETRIC_GRID = 1;
101
102
103    // C++: enum HandEyeCalibrationMethod (cv.HandEyeCalibrationMethod)
104    public static final int
105            CALIB_HAND_EYE_TSAI = 0,
106            CALIB_HAND_EYE_PARK = 1,
107            CALIB_HAND_EYE_HORAUD = 2,
108            CALIB_HAND_EYE_ANDREFF = 3,
109            CALIB_HAND_EYE_DANIILIDIS = 4;
110
111
112    // C++: enum LocalOptimMethod (cv.LocalOptimMethod)
113    public static final int
114            LOCAL_OPTIM_NULL = 0,
115            LOCAL_OPTIM_INNER_LO = 1,
116            LOCAL_OPTIM_INNER_AND_ITER_LO = 2,
117            LOCAL_OPTIM_GC = 3,
118            LOCAL_OPTIM_SIGMA = 4;
119
120
121    // C++: enum NeighborSearchMethod (cv.NeighborSearchMethod)
122    public static final int
123            NEIGH_FLANN_KNN = 0,
124            NEIGH_GRID = 1,
125            NEIGH_FLANN_RADIUS = 2;
126
127
128    // C++: enum RobotWorldHandEyeCalibrationMethod (cv.RobotWorldHandEyeCalibrationMethod)
129    public static final int
130            CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0,
131            CALIB_ROBOT_WORLD_HAND_EYE_LI = 1;
132
133
134    // C++: enum SamplingMethod (cv.SamplingMethod)
135    public static final int
136            SAMPLING_UNIFORM = 0,
137            SAMPLING_PROGRESSIVE_NAPSAC = 1,
138            SAMPLING_NAPSAC = 2,
139            SAMPLING_PROSAC = 3;
140
141
142    // C++: enum ScoreMethod (cv.ScoreMethod)
143    public static final int
144            SCORE_METHOD_RANSAC = 0,
145            SCORE_METHOD_MSAC = 1,
146            SCORE_METHOD_MAGSAC = 2,
147            SCORE_METHOD_LMEDS = 3;
148
149
150    // C++: enum SolvePnPMethod (cv.SolvePnPMethod)
151    public static final int
152            SOLVEPNP_ITERATIVE = 0,
153            SOLVEPNP_EPNP = 1,
154            SOLVEPNP_P3P = 2,
155            SOLVEPNP_DLS = 3,
156            SOLVEPNP_UPNP = 4,
157            SOLVEPNP_AP3P = 5,
158            SOLVEPNP_IPPE = 6,
159            SOLVEPNP_IPPE_SQUARE = 7,
160            SOLVEPNP_SQPNP = 8,
161            SOLVEPNP_MAX_COUNT = 8+1;
162
163
164    // C++: enum UndistortTypes (cv.UndistortTypes)
165    public static final int
166            PROJ_SPHERICAL_ORTHO = 0,
167            PROJ_SPHERICAL_EQRECT = 1;
168
169
170    //
171    // C++:  void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
172    //
173
174    /**
175     * Converts a rotation matrix to a rotation vector or vice versa.
176     *
177     * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
178     * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
179     * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
180     * derivatives of the output array components with respect to the input array components.
181     *
182     * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r  \leftarrow r/ \theta \\ R =  \cos(\theta) I + (1- \cos{\theta} ) r r^T +  \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
183     *
184     * Inverse transformation can be also done easily, since
185     *
186     * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
187     *
188     * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
189     * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
190     * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
191     *
192     * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
193     * can be found in:
194     * <ul>
195     *   <li>
196     *      A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
197     *   </li>
198     * </ul>
199     *
200     * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
201     * <ul>
202     *   <li>
203     *      A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
204     *   </li>
205     *   <li>
206     *      Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
207     *   </li>
208     *   <li>
209     *      A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
210     *   </li>
211     * </ul>
212     */
213    public static void Rodrigues(Mat src, Mat dst, Mat jacobian) {
214        Rodrigues_0(src.nativeObj, dst.nativeObj, jacobian.nativeObj);
215    }
216
217    /**
218     * Converts a rotation matrix to a rotation vector or vice versa.
219     *
220     * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
221     * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
222     * derivatives of the output array components with respect to the input array components.
223     *
224     * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r  \leftarrow r/ \theta \\ R =  \cos(\theta) I + (1- \cos{\theta} ) r r^T +  \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
225     *
226     * Inverse transformation can be also done easily, since
227     *
228     * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
229     *
230     * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
231     * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
232     * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
233     *
234     * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
235     * can be found in:
236     * <ul>
237     *   <li>
238     *      A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
239     *   </li>
240     * </ul>
241     *
242     * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
243     * <ul>
244     *   <li>
245     *      A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
246     *   </li>
247     *   <li>
248     *      Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
249     *   </li>
250     *   <li>
251     *      A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
252     *   </li>
253     * </ul>
254     */
255    public static void Rodrigues(Mat src, Mat dst) {
256        Rodrigues_1(src.nativeObj, dst.nativeObj);
257    }
258
259
260    //
261    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
262    //
263
264    /**
265     * Finds a perspective transformation between two planes.
266     *
267     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
268     * or vector&lt;Point2f&gt; .
269     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
270     * a vector&lt;Point2f&gt; .
271     * @param method Method used to compute a homography matrix. The following methods are possible:
272     * <ul>
273     *   <li>
274     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
275     *   </li>
276     *   <li>
277     *    REF: RANSAC - RANSAC-based robust method
278     *   </li>
279     *   <li>
280     *    REF: LMEDS - Least-Median robust method
281     *   </li>
282     *   <li>
283     *    REF: RHO - PROSAC-based robust method
284     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
285     * (used in the RANSAC and RHO methods only). That is, if
286     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
287     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
288     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
289     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
290     * mask values are ignored.
291     * @param maxIters The maximum number of RANSAC iterations.
292     * @param confidence Confidence level, between 0 and 1.
293     *   </li>
294     * </ul>
295     *
296     * The function finds and returns the perspective transformation \(H\) between the source and the
297     * destination planes:
298     *
299     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
300     *
301     * so that the back-projection error
302     *
303     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
304     *
305     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
306     * pairs to compute an initial homography estimate with a simple least-squares scheme.
307     *
308     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
309     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
310     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
311     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
312     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
313     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
314     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
315     * the mask of inliers/outliers.
316     *
317     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
318     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
319     * re-projection error even more.
320     *
321     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
322     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
323     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
324     * noise is rather small, use the default method (method=0).
325     *
326     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
327     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
328     * cannot be estimated, an empty one will be returned.
329     *
330     * SEE:
331     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
332     * perspectiveTransform
333     * @return automatically generated
334     */
335    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters, double confidence) {
336        Mat srcPoints_mat = srcPoints;
337        Mat dstPoints_mat = dstPoints;
338        return new Mat(findHomography_0(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters, confidence));
339    }
340
341    /**
342     * Finds a perspective transformation between two planes.
343     *
344     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
345     * or vector&lt;Point2f&gt; .
346     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
347     * a vector&lt;Point2f&gt; .
348     * @param method Method used to compute a homography matrix. The following methods are possible:
349     * <ul>
350     *   <li>
351     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
352     *   </li>
353     *   <li>
354     *    REF: RANSAC - RANSAC-based robust method
355     *   </li>
356     *   <li>
357     *    REF: LMEDS - Least-Median robust method
358     *   </li>
359     *   <li>
360     *    REF: RHO - PROSAC-based robust method
361     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
362     * (used in the RANSAC and RHO methods only). That is, if
363     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
364     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
365     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
366     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
367     * mask values are ignored.
368     * @param maxIters The maximum number of RANSAC iterations.
369     *   </li>
370     * </ul>
371     *
372     * The function finds and returns the perspective transformation \(H\) between the source and the
373     * destination planes:
374     *
375     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
376     *
377     * so that the back-projection error
378     *
379     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
380     *
381     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
382     * pairs to compute an initial homography estimate with a simple least-squares scheme.
383     *
384     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
385     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
386     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
387     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
388     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
389     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
390     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
391     * the mask of inliers/outliers.
392     *
393     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
394     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
395     * re-projection error even more.
396     *
397     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
398     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
399     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
400     * noise is rather small, use the default method (method=0).
401     *
402     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
403     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
404     * cannot be estimated, an empty one will be returned.
405     *
406     * SEE:
407     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
408     * perspectiveTransform
409     * @return automatically generated
410     */
411    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters) {
412        Mat srcPoints_mat = srcPoints;
413        Mat dstPoints_mat = dstPoints;
414        return new Mat(findHomography_1(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters));
415    }
416
417    /**
418     * Finds a perspective transformation between two planes.
419     *
420     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
421     * or vector&lt;Point2f&gt; .
422     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
423     * a vector&lt;Point2f&gt; .
424     * @param method Method used to compute a homography matrix. The following methods are possible:
425     * <ul>
426     *   <li>
427     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
428     *   </li>
429     *   <li>
430     *    REF: RANSAC - RANSAC-based robust method
431     *   </li>
432     *   <li>
433     *    REF: LMEDS - Least-Median robust method
434     *   </li>
435     *   <li>
436     *    REF: RHO - PROSAC-based robust method
437     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
438     * (used in the RANSAC and RHO methods only). That is, if
439     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
440     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
441     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
442     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
443     * mask values are ignored.
444     *   </li>
445     * </ul>
446     *
447     * The function finds and returns the perspective transformation \(H\) between the source and the
448     * destination planes:
449     *
450     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
451     *
452     * so that the back-projection error
453     *
454     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
455     *
456     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
457     * pairs to compute an initial homography estimate with a simple least-squares scheme.
458     *
459     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
460     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
461     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
462     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
463     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
464     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
465     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
466     * the mask of inliers/outliers.
467     *
468     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
469     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
470     * re-projection error even more.
471     *
472     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
473     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
474     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
475     * noise is rather small, use the default method (method=0).
476     *
477     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
478     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
479     * cannot be estimated, an empty one will be returned.
480     *
481     * SEE:
482     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
483     * perspectiveTransform
484     * @return automatically generated
485     */
486    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask) {
487        Mat srcPoints_mat = srcPoints;
488        Mat dstPoints_mat = dstPoints;
489        return new Mat(findHomography_2(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj));
490    }
491
492    /**
493     * Finds a perspective transformation between two planes.
494     *
495     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
496     * or vector&lt;Point2f&gt; .
497     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
498     * a vector&lt;Point2f&gt; .
499     * @param method Method used to compute a homography matrix. The following methods are possible:
500     * <ul>
501     *   <li>
502     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
503     *   </li>
504     *   <li>
505     *    REF: RANSAC - RANSAC-based robust method
506     *   </li>
507     *   <li>
508     *    REF: LMEDS - Least-Median robust method
509     *   </li>
510     *   <li>
511     *    REF: RHO - PROSAC-based robust method
512     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
513     * (used in the RANSAC and RHO methods only). That is, if
514     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
515     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
516     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
517     * mask values are ignored.
518     *   </li>
519     * </ul>
520     *
521     * The function finds and returns the perspective transformation \(H\) between the source and the
522     * destination planes:
523     *
524     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
525     *
526     * so that the back-projection error
527     *
528     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
529     *
530     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
531     * pairs to compute an initial homography estimate with a simple least-squares scheme.
532     *
533     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
534     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
535     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
536     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
537     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
538     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
539     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
540     * the mask of inliers/outliers.
541     *
542     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
543     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
544     * re-projection error even more.
545     *
546     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
547     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
548     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
549     * noise is rather small, use the default method (method=0).
550     *
551     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
552     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
553     * cannot be estimated, an empty one will be returned.
554     *
555     * SEE:
556     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
557     * perspectiveTransform
558     * @return automatically generated
559     */
560    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold) {
561        Mat srcPoints_mat = srcPoints;
562        Mat dstPoints_mat = dstPoints;
563        return new Mat(findHomography_3(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold));
564    }
565
566    /**
567     * Finds a perspective transformation between two planes.
568     *
569     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
570     * or vector&lt;Point2f&gt; .
571     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
572     * a vector&lt;Point2f&gt; .
573     * @param method Method used to compute a homography matrix. The following methods are possible:
574     * <ul>
575     *   <li>
576     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
577     *   </li>
578     *   <li>
579     *    REF: RANSAC - RANSAC-based robust method
580     *   </li>
581     *   <li>
582     *    REF: LMEDS - Least-Median robust method
583     *   </li>
584     *   <li>
585     *    REF: RHO - PROSAC-based robust method
586     * (used in the RANSAC and RHO methods only). That is, if
587     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
588     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
589     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
590     * mask values are ignored.
591     *   </li>
592     * </ul>
593     *
594     * The function finds and returns the perspective transformation \(H\) between the source and the
595     * destination planes:
596     *
597     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
598     *
599     * so that the back-projection error
600     *
601     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
602     *
603     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
604     * pairs to compute an initial homography estimate with a simple least-squares scheme.
605     *
606     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
607     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
608     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
609     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
610     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
611     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
612     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
613     * the mask of inliers/outliers.
614     *
615     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
616     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
617     * re-projection error even more.
618     *
619     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
620     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
621     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
622     * noise is rather small, use the default method (method=0).
623     *
624     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
625     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
626     * cannot be estimated, an empty one will be returned.
627     *
628     * SEE:
629     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
630     * perspectiveTransform
631     * @return automatically generated
632     */
633    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method) {
634        Mat srcPoints_mat = srcPoints;
635        Mat dstPoints_mat = dstPoints;
636        return new Mat(findHomography_4(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method));
637    }
638
639    /**
640     * Finds a perspective transformation between two planes.
641     *
642     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
643     * or vector&lt;Point2f&gt; .
644     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
645     * a vector&lt;Point2f&gt; .
646     * <ul>
647     *   <li>
648     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
649     *   </li>
650     *   <li>
651     *    REF: RANSAC - RANSAC-based robust method
652     *   </li>
653     *   <li>
654     *    REF: LMEDS - Least-Median robust method
655     *   </li>
656     *   <li>
657     *    REF: RHO - PROSAC-based robust method
658     * (used in the RANSAC and RHO methods only). That is, if
659     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
660     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
661     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
662     * mask values are ignored.
663     *   </li>
664     * </ul>
665     *
666     * The function finds and returns the perspective transformation \(H\) between the source and the
667     * destination planes:
668     *
669     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
670     *
671     * so that the back-projection error
672     *
673     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
674     *
675     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
676     * pairs to compute an initial homography estimate with a simple least-squares scheme.
677     *
678     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
679     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
680     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
681     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
682     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
683     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
684     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
685     * the mask of inliers/outliers.
686     *
687     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
688     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
689     * re-projection error even more.
690     *
691     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
692     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
693     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
694     * noise is rather small, use the default method (method=0).
695     *
696     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
697     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
698     * cannot be estimated, an empty one will be returned.
699     *
700     * SEE:
701     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
702     * perspectiveTransform
703     * @return automatically generated
704     */
705    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints) {
706        Mat srcPoints_mat = srcPoints;
707        Mat dstPoints_mat = dstPoints;
708        return new Mat(findHomography_5(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj));
709    }
710
711
712    //
713    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params)
714    //
715
716    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, Mat mask, UsacParams params) {
717        Mat srcPoints_mat = srcPoints;
718        Mat dstPoints_mat = dstPoints;
719        return new Mat(findHomography_6(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, mask.nativeObj, params.nativeObj));
720    }
721
722
723    //
724    // C++:  Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
725    //
726
727    /**
728     * Computes an RQ decomposition of 3x3 matrices.
729     *
730     * @param src 3x3 input matrix.
731     * @param mtxR Output 3x3 upper-triangular matrix.
732     * @param mtxQ Output 3x3 orthogonal matrix.
733     * @param Qx Optional output 3x3 rotation matrix around x-axis.
734     * @param Qy Optional output 3x3 rotation matrix around y-axis.
735     * @param Qz Optional output 3x3 rotation matrix around z-axis.
736     *
737     * The function computes a RQ decomposition using the given rotations. This function is used in
738     * decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
739     * and a rotation matrix.
740     *
741     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
742     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
743     * sequence of rotations about the three principal axes that results in the same orientation of an
744     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
745     * are only one of the possible solutions.
746     * @return automatically generated
747     */
748    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz) {
749        return RQDecomp3x3_0(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj, Qz.nativeObj);
750    }
751
752    /**
753     * Computes an RQ decomposition of 3x3 matrices.
754     *
755     * @param src 3x3 input matrix.
756     * @param mtxR Output 3x3 upper-triangular matrix.
757     * @param mtxQ Output 3x3 orthogonal matrix.
758     * @param Qx Optional output 3x3 rotation matrix around x-axis.
759     * @param Qy Optional output 3x3 rotation matrix around y-axis.
760     *
761     * The function computes a RQ decomposition using the given rotations. This function is used in
762     * decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
763     * and a rotation matrix.
764     *
765     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
766     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
767     * sequence of rotations about the three principal axes that results in the same orientation of an
768     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
769     * are only one of the possible solutions.
770     * @return automatically generated
771     */
772    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy) {
773        return RQDecomp3x3_1(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj);
774    }
775
776    /**
777     * Computes an RQ decomposition of 3x3 matrices.
778     *
779     * @param src 3x3 input matrix.
780     * @param mtxR Output 3x3 upper-triangular matrix.
781     * @param mtxQ Output 3x3 orthogonal matrix.
782     * @param Qx Optional output 3x3 rotation matrix around x-axis.
783     *
784     * The function computes a RQ decomposition using the given rotations. This function is used in
785     * decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
786     * and a rotation matrix.
787     *
788     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
789     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
790     * sequence of rotations about the three principal axes that results in the same orientation of an
791     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
792     * are only one of the possible solutions.
793     * @return automatically generated
794     */
795    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx) {
796        return RQDecomp3x3_2(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj);
797    }
798
799    /**
800     * Computes an RQ decomposition of 3x3 matrices.
801     *
802     * @param src 3x3 input matrix.
803     * @param mtxR Output 3x3 upper-triangular matrix.
804     * @param mtxQ Output 3x3 orthogonal matrix.
805     *
806     * The function computes a RQ decomposition using the given rotations. This function is used in
807     * decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
808     * and a rotation matrix.
809     *
810     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
811     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
812     * sequence of rotations about the three principal axes that results in the same orientation of an
813     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
814     * are only one of the possible solutions.
815     * @return automatically generated
816     */
817    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ) {
818        return RQDecomp3x3_3(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj);
819    }
820
821
822    //
823    // C++:  void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
824    //
825
826    /**
827     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
828     *
829     * @param projMatrix 3x4 input projection matrix P.
830     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
831     * @param rotMatrix Output 3x3 external rotation matrix R.
832     * @param transVect Output 4x1 translation vector T.
833     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
834     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
835     * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
836     * @param eulerAngles Optional three-element vector containing three Euler angles of rotation in
837     * degrees.
838     *
839     * The function computes a decomposition of a projection matrix into a calibration and a rotation
840     * matrix and the position of a camera.
841     *
842     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
843     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
844     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
845     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
846     *
847     * The function is based on RQDecomp3x3 .
848     */
849    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles) {
850        decomposeProjectionMatrix_0(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj, eulerAngles.nativeObj);
851    }
852
853    /**
854     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
855     *
856     * @param projMatrix 3x4 input projection matrix P.
857     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
858     * @param rotMatrix Output 3x3 external rotation matrix R.
859     * @param transVect Output 4x1 translation vector T.
860     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
861     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
862     * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
863     * degrees.
864     *
865     * The function computes a decomposition of a projection matrix into a calibration and a rotation
866     * matrix and the position of a camera.
867     *
868     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
869     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
870     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
871     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
872     *
873     * The function is based on RQDecomp3x3 .
874     */
875    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ) {
876        decomposeProjectionMatrix_1(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj);
877    }
878
879    /**
880     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
881     *
882     * @param projMatrix 3x4 input projection matrix P.
883     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
884     * @param rotMatrix Output 3x3 external rotation matrix R.
885     * @param transVect Output 4x1 translation vector T.
886     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
887     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
888     * degrees.
889     *
890     * The function computes a decomposition of a projection matrix into a calibration and a rotation
891     * matrix and the position of a camera.
892     *
893     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
894     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
895     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
896     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
897     *
898     * The function is based on RQDecomp3x3 .
899     */
900    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY) {
901        decomposeProjectionMatrix_2(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj);
902    }
903
904    /**
905     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
906     *
907     * @param projMatrix 3x4 input projection matrix P.
908     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
909     * @param rotMatrix Output 3x3 external rotation matrix R.
910     * @param transVect Output 4x1 translation vector T.
911     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
912     * degrees.
913     *
914     * The function computes a decomposition of a projection matrix into a calibration and a rotation
915     * matrix and the position of a camera.
916     *
917     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
918     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
919     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
920     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
921     *
922     * The function is based on RQDecomp3x3 .
923     */
924    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX) {
925        decomposeProjectionMatrix_3(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj);
926    }
927
928    /**
929     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
930     *
931     * @param projMatrix 3x4 input projection matrix P.
932     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
933     * @param rotMatrix Output 3x3 external rotation matrix R.
934     * @param transVect Output 4x1 translation vector T.
935     * degrees.
936     *
937     * The function computes a decomposition of a projection matrix into a calibration and a rotation
938     * matrix and the position of a camera.
939     *
940     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
941     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
942     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
943     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
944     *
945     * The function is based on RQDecomp3x3 .
946     */
947    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect) {
948        decomposeProjectionMatrix_4(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj);
949    }
950
951
952    //
953    // C++:  void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
954    //
955
956    /**
957     * Computes partial derivatives of the matrix product for each multiplied matrix.
958     *
959     * @param A First multiplied matrix.
960     * @param B Second multiplied matrix.
961     * @param dABdA First output derivative matrix d(A\*B)/dA of size
962     * \(\texttt{A.rows*B.cols} \times {A.rows*A.cols}\) .
963     * @param dABdB Second output derivative matrix d(A\*B)/dB of size
964     * \(\texttt{A.rows*B.cols} \times {B.rows*B.cols}\) .
965     *
966     * The function computes partial derivatives of the elements of the matrix product \(A*B\) with regard to
967     * the elements of each of the two input matrices. The function is used to compute the Jacobian
968     * matrices in stereoCalibrate but can also be used in any other similar optimization function.
969     */
970    public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB) {
971        matMulDeriv_0(A.nativeObj, B.nativeObj, dABdA.nativeObj, dABdB.nativeObj);
972    }
973
974
975    //
976    // C++:  void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
977    //
978
979    /**
980     * Combines two rotation-and-shift transformations.
981     *
982     * @param rvec1 First rotation vector.
983     * @param tvec1 First translation vector.
984     * @param rvec2 Second rotation vector.
985     * @param tvec2 Second translation vector.
986     * @param rvec3 Output rotation vector of the superposition.
987     * @param tvec3 Output translation vector of the superposition.
988     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
989     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
990     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
991     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
992     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
993     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
994     * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
995     * @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
996     *
997     * The functions compute:
998     *
999     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1000     *
1001     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1002     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1003     *
1004     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1005     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1006     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1007     * function that contains a matrix multiplication.
1008     */
1009    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2) {
1010        composeRT_0(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj, dt3dt2.nativeObj);
1011    }
1012
1013    /**
1014     * Combines two rotation-and-shift transformations.
1015     *
1016     * @param rvec1 First rotation vector.
1017     * @param tvec1 First translation vector.
1018     * @param rvec2 Second rotation vector.
1019     * @param tvec2 Second translation vector.
1020     * @param rvec3 Output rotation vector of the superposition.
1021     * @param tvec3 Output translation vector of the superposition.
1022     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1023     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1024     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1025     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1026     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1027     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
1028     * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
1029     *
1030     * The functions compute:
1031     *
1032     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1033     *
1034     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1035     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1036     *
1037     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1038     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1039     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1040     * function that contains a matrix multiplication.
1041     */
1042    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2) {
1043        composeRT_1(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj);
1044    }
1045
1046    /**
1047     * Combines two rotation-and-shift transformations.
1048     *
1049     * @param rvec1 First rotation vector.
1050     * @param tvec1 First translation vector.
1051     * @param rvec2 Second rotation vector.
1052     * @param tvec2 Second translation vector.
1053     * @param rvec3 Output rotation vector of the superposition.
1054     * @param tvec3 Output translation vector of the superposition.
1055     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1056     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1057     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1058     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1059     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1060     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
1061     *
1062     * The functions compute:
1063     *
1064     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1065     *
1066     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1067     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1068     *
1069     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1070     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1071     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1072     * function that contains a matrix multiplication.
1073     */
1074    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1) {
1075        composeRT_2(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj);
1076    }
1077
1078    /**
1079     * Combines two rotation-and-shift transformations.
1080     *
1081     * @param rvec1 First rotation vector.
1082     * @param tvec1 First translation vector.
1083     * @param rvec2 Second rotation vector.
1084     * @param tvec2 Second translation vector.
1085     * @param rvec3 Output rotation vector of the superposition.
1086     * @param tvec3 Output translation vector of the superposition.
1087     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1088     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1089     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1090     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1091     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1092     *
1093     * The functions compute:
1094     *
1095     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1096     *
1097     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1098     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1099     *
1100     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1101     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1102     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1103     * function that contains a matrix multiplication.
1104     */
1105    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1) {
1106        composeRT_3(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj);
1107    }
1108
1109    /**
1110     * Combines two rotation-and-shift transformations.
1111     *
1112     * @param rvec1 First rotation vector.
1113     * @param tvec1 First translation vector.
1114     * @param rvec2 Second rotation vector.
1115     * @param tvec2 Second translation vector.
1116     * @param rvec3 Output rotation vector of the superposition.
1117     * @param tvec3 Output translation vector of the superposition.
1118     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1119     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1120     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1121     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1122     *
1123     * The functions compute:
1124     *
1125     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1126     *
1127     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1128     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1129     *
1130     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1131     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1132     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1133     * function that contains a matrix multiplication.
1134     */
1135    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2) {
1136        composeRT_4(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj);
1137    }
1138
1139    /**
1140     * Combines two rotation-and-shift transformations.
1141     *
1142     * @param rvec1 First rotation vector.
1143     * @param tvec1 First translation vector.
1144     * @param rvec2 Second rotation vector.
1145     * @param tvec2 Second translation vector.
1146     * @param rvec3 Output rotation vector of the superposition.
1147     * @param tvec3 Output translation vector of the superposition.
1148     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1149     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1150     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1151     *
1152     * The functions compute:
1153     *
1154     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1155     *
1156     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1157     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1158     *
1159     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1160     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1161     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1162     * function that contains a matrix multiplication.
1163     */
1164    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2) {
1165        composeRT_5(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj);
1166    }
1167
1168    /**
1169     * Combines two rotation-and-shift transformations.
1170     *
1171     * @param rvec1 First rotation vector.
1172     * @param tvec1 First translation vector.
1173     * @param rvec2 Second rotation vector.
1174     * @param tvec2 Second translation vector.
1175     * @param rvec3 Output rotation vector of the superposition.
1176     * @param tvec3 Output translation vector of the superposition.
1177     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1178     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1179     *
1180     * The functions compute:
1181     *
1182     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1183     *
1184     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1185     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1186     *
1187     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1188     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1189     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1190     * function that contains a matrix multiplication.
1191     */
1192    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1) {
1193        composeRT_6(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj);
1194    }
1195
1196    /**
1197     * Combines two rotation-and-shift transformations.
1198     *
1199     * @param rvec1 First rotation vector.
1200     * @param tvec1 First translation vector.
1201     * @param rvec2 Second rotation vector.
1202     * @param tvec2 Second translation vector.
1203     * @param rvec3 Output rotation vector of the superposition.
1204     * @param tvec3 Output translation vector of the superposition.
1205     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1206     *
1207     * The functions compute:
1208     *
1209     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1210     *
1211     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1212     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1213     *
1214     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1215     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1216     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1217     * function that contains a matrix multiplication.
1218     */
1219    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1) {
1220        composeRT_7(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj);
1221    }
1222
1223    /**
1224     * Combines two rotation-and-shift transformations.
1225     *
1226     * @param rvec1 First rotation vector.
1227     * @param tvec1 First translation vector.
1228     * @param rvec2 Second rotation vector.
1229     * @param tvec2 Second translation vector.
1230     * @param rvec3 Output rotation vector of the superposition.
1231     * @param tvec3 Output translation vector of the superposition.
1232     *
1233     * The functions compute:
1234     *
1235     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1236     *
1237     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1238     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details.
1239     *
1240     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1241     * vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in
1242     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1243     * function that contains a matrix multiplication.
1244     */
1245    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3) {
1246        composeRT_8(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj);
1247    }
1248
1249
1250    //
1251    // C++:  void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
1252    //
1253
1254    /**
1255     * Projects 3D points to an image plane.
1256     *
1257     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1258     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1259     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1260     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1261     * @param tvec The translation vector, see parameter description above.
1262     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1263     * @param distCoeffs Input vector of distortion coefficients
1264     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1265     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1266     * vector&lt;Point2f&gt; .
1267     * @param jacobian Optional output 2Nx(10+&lt;numDistCoeffs&gt;) jacobian matrix of derivatives of image
1268     * points with respect to components of the rotation vector, translation vector, focal lengths,
1269     * coordinates of the principal point and the distortion coefficients. In the old interface different
1270     * components of the jacobian are returned via different output parameters.
1271     * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
1272     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1273     * jacobian matrix.
1274     *
1275     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1276     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1277     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1278     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1279     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1280     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1281     * parameters.
1282     *
1283     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1284     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1285     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1286     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1287     */
1288    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio) {
1289        Mat objectPoints_mat = objectPoints;
1290        Mat distCoeffs_mat = distCoeffs;
1291        Mat imagePoints_mat = imagePoints;
1292        projectPoints_0(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj, aspectRatio);
1293    }
1294
1295    /**
1296     * Projects 3D points to an image plane.
1297     *
1298     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1299     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1300     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1301     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1302     * @param tvec The translation vector, see parameter description above.
1303     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1304     * @param distCoeffs Input vector of distortion coefficients
1305     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1306     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1307     * vector&lt;Point2f&gt; .
1308     * @param jacobian Optional output 2Nx(10+&lt;numDistCoeffs&gt;) jacobian matrix of derivatives of image
1309     * points with respect to components of the rotation vector, translation vector, focal lengths,
1310     * coordinates of the principal point and the distortion coefficients. In the old interface different
1311     * components of the jacobian are returned via different output parameters.
1312     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1313     * jacobian matrix.
1314     *
1315     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1316     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1317     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1318     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1319     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1320     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1321     * parameters.
1322     *
1323     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1324     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1325     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1326     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1327     */
1328    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian) {
1329        Mat objectPoints_mat = objectPoints;
1330        Mat distCoeffs_mat = distCoeffs;
1331        Mat imagePoints_mat = imagePoints;
1332        projectPoints_1(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj);
1333    }
1334
1335    /**
1336     * Projects 3D points to an image plane.
1337     *
1338     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1339     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1340     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1341     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1342     * @param tvec The translation vector, see parameter description above.
1343     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1344     * @param distCoeffs Input vector of distortion coefficients
1345     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1346     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1347     * vector&lt;Point2f&gt; .
1348     * points with respect to components of the rotation vector, translation vector, focal lengths,
1349     * coordinates of the principal point and the distortion coefficients. In the old interface different
1350     * components of the jacobian are returned via different output parameters.
1351     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1352     * jacobian matrix.
1353     *
1354     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1355     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1356     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1357     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1358     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1359     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1360     * parameters.
1361     *
1362     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1363     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1364     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1365     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1366     */
1367    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints) {
1368        Mat objectPoints_mat = objectPoints;
1369        Mat distCoeffs_mat = distCoeffs;
1370        Mat imagePoints_mat = imagePoints;
1371        projectPoints_2(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj);
1372    }
1373
1374
1375    //
1376    // C++:  bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
1377    //
1378
1379    /**
1380     * Finds an object pose from 3D-2D point correspondences.
1381     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1382     * coordinate frame to the camera coordinate frame, using different methods:
1383     * <ul>
1384     *   <li>
1385     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1386     *   </li>
1387     *   <li>
1388     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1389     *   </li>
1390     *   <li>
1391     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1392     * Number of input points must be 4. Object points must be defined in the following order:
1393     *   <ul>
1394     *     <li>
1395     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1396     *     </li>
1397     *     <li>
1398     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1399     *     </li>
1400     *     <li>
1401     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1402     *     </li>
1403     *     <li>
1404     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1405     *     </li>
1406     *   </ul>
1407     *   <li>
1408     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1409     *   </li>
1410     * </ul>
1411     *
1412     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1413     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1414     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1415     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1416     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1417     * @param distCoeffs Input vector of distortion coefficients
1418     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1419     * assumed.
1420     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1421     * the model coordinate system to the camera coordinate system.
1422     * @param tvec Output translation vector.
1423     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
1424     * the provided rvec and tvec values as initial approximations of the rotation and translation
1425     * vectors, respectively, and further optimizes them.
1426     * @param flags Method for solving a PnP problem:
1427     * <ul>
1428     *   <li>
1429     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
1430     * this case the function finds such a pose that minimizes reprojection error, that is the sum
1431     * of squared distances between the observed projections imagePoints and the projected (using
1432     * REF: projectPoints ) objectPoints .
1433     *   </li>
1434     *   <li>
1435     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
1436     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
1437     * In this case the function requires exactly four object and image points.
1438     *   </li>
1439     *   <li>
1440     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
1441     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
1442     * In this case the function requires exactly four object and image points.
1443     *   </li>
1444     *   <li>
1445     *    REF: SOLVEPNP_EPNP Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
1446     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
1447     *   </li>
1448     *   <li>
1449     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
1450     * Method is based on the paper of J. Hesch and S. Roumeliotis.
1451     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
1452     *   </li>
1453     *   <li>
1454     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
1455     * Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
1456     * F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
1457     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
1458     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
1459     * focal length.
1460     *   </li>
1461     *   <li>
1462     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
1463     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
1464     *   </li>
1465     *   <li>
1466     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
1467     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
1468     * It requires 4 coplanar object points defined in the following order:
1469     *   <ul>
1470     *     <li>
1471     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1472     *     </li>
1473     *     <li>
1474     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1475     *     </li>
1476     *     <li>
1477     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1478     *     </li>
1479     *     <li>
1480     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1481     *     </li>
1482     *   </ul>
1483     *   <li>
1484     *    REF: SOLVEPNP_SQPNP Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
1485     * Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (CITE: Terzakis20). It requires 3 or more points.
1486     *   </li>
1487     * </ul>
1488     *
1489     *
1490     * The function estimates the object pose given a set of object points, their corresponding image
1491     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
1492     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
1493     * and the Z-axis forward).
1494     *
1495     * ![](pnp.jpg)
1496     *
1497     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
1498     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
1499     *
1500     * \(
1501     *   \begin{align*}
1502     *   \begin{bmatrix}
1503     *   u \\
1504     *   v \\
1505     *   1
1506     *   \end{bmatrix} &amp;=
1507     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
1508     *   \begin{bmatrix}
1509     *   X_{w} \\
1510     *   Y_{w} \\
1511     *   Z_{w} \\
1512     *   1
1513     *   \end{bmatrix} \\
1514     *   \begin{bmatrix}
1515     *   u \\
1516     *   v \\
1517     *   1
1518     *   \end{bmatrix} &amp;=
1519     *   \begin{bmatrix}
1520     *   f_x &amp; 0 &amp; c_x \\
1521     *   0 &amp; f_y &amp; c_y \\
1522     *   0 &amp; 0 &amp; 1
1523     *   \end{bmatrix}
1524     *   \begin{bmatrix}
1525     *   1 &amp; 0 &amp; 0 &amp; 0 \\
1526     *   0 &amp; 1 &amp; 0 &amp; 0 \\
1527     *   0 &amp; 0 &amp; 1 &amp; 0
1528     *   \end{bmatrix}
1529     *   \begin{bmatrix}
1530     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
1531     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
1532     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
1533     *   0 &amp; 0 &amp; 0 &amp; 1
1534     *   \end{bmatrix}
1535     *   \begin{bmatrix}
1536     *   X_{w} \\
1537     *   Y_{w} \\
1538     *   Z_{w} \\
1539     *   1
1540     *   \end{bmatrix}
1541     *   \end{align*}
1542     * \)
1543     *
1544     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
1545     * a 3D point expressed in the world frame into the camera frame:
1546     *
1547     * \(
1548     *   \begin{align*}
1549     *   \begin{bmatrix}
1550     *   X_c \\
1551     *   Y_c \\
1552     *   Z_c \\
1553     *   1
1554     *   \end{bmatrix} &amp;=
1555     *   \hspace{0.2em} ^{c}\bf{T}_w
1556     *   \begin{bmatrix}
1557     *   X_{w} \\
1558     *   Y_{w} \\
1559     *   Z_{w} \\
1560     *   1
1561     *   \end{bmatrix} \\
1562     *   \begin{bmatrix}
1563     *   X_c \\
1564     *   Y_c \\
1565     *   Z_c \\
1566     *   1
1567     *   \end{bmatrix} &amp;=
1568     *   \begin{bmatrix}
1569     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
1570     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
1571     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
1572     *   0 &amp; 0 &amp; 0 &amp; 1
1573     *   \end{bmatrix}
1574     *   \begin{bmatrix}
1575     *   X_{w} \\
1576     *   Y_{w} \\
1577     *   Z_{w} \\
1578     *   1
1579     *   \end{bmatrix}
1580     *   \end{align*}
1581     * \)
1582     *
1583     * <b>Note:</b>
1584     * <ul>
1585     *   <li>
1586     *       An example of how to use solvePnP for planar augmented reality can be found at
1587     *         opencv_source_code/samples/python/plane_ar.py
1588     *   </li>
1589     *   <li>
1590     *       If you are using Python:
1591     *   <ul>
1592     *     <li>
1593     *          Numpy array slices won't work as input because solvePnP requires contiguous
1594     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
1595     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
1596     *     </li>
1597     *     <li>
1598     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
1599     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
1600     *         which requires 2-channel information.
1601     *     </li>
1602     *     <li>
1603     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
1604     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
1605     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
1606     *     </li>
1607     *   </ul>
1608     *   <li>
1609     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
1610     *        unstable and sometimes give completely wrong results. If you pass one of these two
1611     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
1612     *   </li>
1613     *   <li>
1614     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
1615     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
1616     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
1617     *   </li>
1618     *   <li>
1619     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
1620     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
1621     *        global solution to converge.
1622     *   </li>
1623     *   <li>
1624     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
1625     *   </li>
1626     *   <li>
1627     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
1628     *        Number of input points must be 4. Object points must be defined in the following order:
1629     *   <ul>
1630     *     <li>
1631     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
1632     *     </li>
1633     *     <li>
1634     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
1635     *     </li>
1636     *     <li>
1637     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
1638     *     </li>
1639     *     <li>
1640     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
1641     *     </li>
1642     *   </ul>
1643     *   <ul>
1644     *     <li>
1645     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
1646     *     </li>
1647     *   </ul>
1648     *   </li>
1649     * </ul>
1650     * @return automatically generated
1651     */
1652    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags) {
1653        Mat objectPoints_mat = objectPoints;
1654        Mat imagePoints_mat = imagePoints;
1655        Mat distCoeffs_mat = distCoeffs;
1656        return solvePnP_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, flags);
1657    }
1658
1659    /**
1660     * Finds an object pose from 3D-2D point correspondences.
1661     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1662     * coordinate frame to the camera coordinate frame, using different methods:
1663     * <ul>
1664     *   <li>
1665     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1666     *   </li>
1667     *   <li>
1668     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1669     *   </li>
1670     *   <li>
1671     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1672     * Number of input points must be 4. Object points must be defined in the following order:
1673     *   <ul>
1674     *     <li>
1675     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1676     *     </li>
1677     *     <li>
1678     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1679     *     </li>
1680     *     <li>
1681     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1682     *     </li>
1683     *     <li>
1684     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1685     *     </li>
1686     *   </ul>
1687     *   <li>
1688     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1689     *   </li>
1690     * </ul>
1691     *
1692     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1693     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1694     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1695     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1696     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1697     * @param distCoeffs Input vector of distortion coefficients
1698     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1699     * assumed.
1700     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1701     * the model coordinate system to the camera coordinate system.
1702     * @param tvec Output translation vector.
1703     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
1704     * the provided rvec and tvec values as initial approximations of the rotation and translation
1705     * vectors, respectively, and further optimizes them.
1706     * <ul>
1707     *   <li>
1708     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
1709     * this case the function finds such a pose that minimizes reprojection error, that is the sum
1710     * of squared distances between the observed projections imagePoints and the projected (using
1711     * REF: projectPoints ) objectPoints .
1712     *   </li>
1713     *   <li>
1714     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
1715     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
1716     * In this case the function requires exactly four object and image points.
1717     *   </li>
1718     *   <li>
1719     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
1720     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
1721     * In this case the function requires exactly four object and image points.
1722     *   </li>
1723     *   <li>
1724     *    REF: SOLVEPNP_EPNP Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
1725     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
1726     *   </li>
1727     *   <li>
1728     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
1729     * Method is based on the paper of J. Hesch and S. Roumeliotis.
1730     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
1731     *   </li>
1732     *   <li>
1733     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
1734     * Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
1735     * F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
1736     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
1737     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
1738     * focal length.
1739     *   </li>
1740     *   <li>
1741     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
1742     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
1743     *   </li>
1744     *   <li>
1745     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
1746     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
1747     * It requires 4 coplanar object points defined in the following order:
1748     *   <ul>
1749     *     <li>
1750     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1751     *     </li>
1752     *     <li>
1753     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1754     *     </li>
1755     *     <li>
1756     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1757     *     </li>
1758     *     <li>
1759     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1760     *     </li>
1761     *   </ul>
1762     *   <li>
1763     *    REF: SOLVEPNP_SQPNP Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
1764     * Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (CITE: Terzakis20). It requires 3 or more points.
1765     *   </li>
1766     * </ul>
1767     *
1768     *
1769     * The function estimates the object pose given a set of object points, their corresponding image
1770     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
1771     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
1772     * and the Z-axis forward).
1773     *
1774     * ![](pnp.jpg)
1775     *
1776     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
1777     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
1778     *
1779     * \(
1780     *   \begin{align*}
1781     *   \begin{bmatrix}
1782     *   u \\
1783     *   v \\
1784     *   1
1785     *   \end{bmatrix} &amp;=
1786     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
1787     *   \begin{bmatrix}
1788     *   X_{w} \\
1789     *   Y_{w} \\
1790     *   Z_{w} \\
1791     *   1
1792     *   \end{bmatrix} \\
1793     *   \begin{bmatrix}
1794     *   u \\
1795     *   v \\
1796     *   1
1797     *   \end{bmatrix} &amp;=
1798     *   \begin{bmatrix}
1799     *   f_x &amp; 0 &amp; c_x \\
1800     *   0 &amp; f_y &amp; c_y \\
1801     *   0 &amp; 0 &amp; 1
1802     *   \end{bmatrix}
1803     *   \begin{bmatrix}
1804     *   1 &amp; 0 &amp; 0 &amp; 0 \\
1805     *   0 &amp; 1 &amp; 0 &amp; 0 \\
1806     *   0 &amp; 0 &amp; 1 &amp; 0
1807     *   \end{bmatrix}
1808     *   \begin{bmatrix}
1809     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
1810     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
1811     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
1812     *   0 &amp; 0 &amp; 0 &amp; 1
1813     *   \end{bmatrix}
1814     *   \begin{bmatrix}
1815     *   X_{w} \\
1816     *   Y_{w} \\
1817     *   Z_{w} \\
1818     *   1
1819     *   \end{bmatrix}
1820     *   \end{align*}
1821     * \)
1822     *
1823     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
1824     * a 3D point expressed in the world frame into the camera frame:
1825     *
1826     * \(
1827     *   \begin{align*}
1828     *   \begin{bmatrix}
1829     *   X_c \\
1830     *   Y_c \\
1831     *   Z_c \\
1832     *   1
1833     *   \end{bmatrix} &amp;=
1834     *   \hspace{0.2em} ^{c}\bf{T}_w
1835     *   \begin{bmatrix}
1836     *   X_{w} \\
1837     *   Y_{w} \\
1838     *   Z_{w} \\
1839     *   1
1840     *   \end{bmatrix} \\
1841     *   \begin{bmatrix}
1842     *   X_c \\
1843     *   Y_c \\
1844     *   Z_c \\
1845     *   1
1846     *   \end{bmatrix} &amp;=
1847     *   \begin{bmatrix}
1848     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
1849     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
1850     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
1851     *   0 &amp; 0 &amp; 0 &amp; 1
1852     *   \end{bmatrix}
1853     *   \begin{bmatrix}
1854     *   X_{w} \\
1855     *   Y_{w} \\
1856     *   Z_{w} \\
1857     *   1
1858     *   \end{bmatrix}
1859     *   \end{align*}
1860     * \)
1861     *
1862     * <b>Note:</b>
1863     * <ul>
1864     *   <li>
1865     *       An example of how to use solvePnP for planar augmented reality can be found at
1866     *         opencv_source_code/samples/python/plane_ar.py
1867     *   </li>
1868     *   <li>
1869     *       If you are using Python:
1870     *   <ul>
1871     *     <li>
1872     *          Numpy array slices won't work as input because solvePnP requires contiguous
1873     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
1874     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
1875     *     </li>
1876     *     <li>
1877     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
1878     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
1879     *         which requires 2-channel information.
1880     *     </li>
1881     *     <li>
1882     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
1883     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
1884     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
1885     *     </li>
1886     *   </ul>
1887     *   <li>
1888     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
1889     *        unstable and sometimes give completely wrong results. If you pass one of these two
1890     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
1891     *   </li>
1892     *   <li>
1893     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
1894     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
1895     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
1896     *   </li>
1897     *   <li>
1898     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
1899     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
1900     *        global solution to converge.
1901     *   </li>
1902     *   <li>
1903     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
1904     *   </li>
1905     *   <li>
1906     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
1907     *        Number of input points must be 4. Object points must be defined in the following order:
1908     *   <ul>
1909     *     <li>
1910     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
1911     *     </li>
1912     *     <li>
1913     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
1914     *     </li>
1915     *     <li>
1916     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
1917     *     </li>
1918     *     <li>
1919     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
1920     *     </li>
1921     *   </ul>
1922     *   <ul>
1923     *     <li>
1924     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
1925     *     </li>
1926     *   </ul>
1927     *   </li>
1928     * </ul>
1929     * @return automatically generated
1930     */
1931    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) {
1932        Mat objectPoints_mat = objectPoints;
1933        Mat imagePoints_mat = imagePoints;
1934        Mat distCoeffs_mat = distCoeffs;
1935        return solvePnP_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess);
1936    }
1937
1938    /**
1939     * Finds an object pose from 3D-2D point correspondences.
1940     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1941     * coordinate frame to the camera coordinate frame, using different methods:
1942     * <ul>
1943     *   <li>
1944     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1945     *   </li>
1946     *   <li>
1947     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1948     *   </li>
1949     *   <li>
1950     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1951     * Number of input points must be 4. Object points must be defined in the following order:
1952     *   <ul>
1953     *     <li>
1954     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1955     *     </li>
1956     *     <li>
1957     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1958     *     </li>
1959     *     <li>
1960     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1961     *     </li>
1962     *     <li>
1963     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1964     *     </li>
1965     *   </ul>
1966     *   <li>
1967     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1968     *   </li>
1969     * </ul>
1970     *
1971     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1972     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1973     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1974     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1975     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1976     * @param distCoeffs Input vector of distortion coefficients
1977     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1978     * assumed.
1979     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1980     * the model coordinate system to the camera coordinate system.
1981     * @param tvec Output translation vector.
1982     * the provided rvec and tvec values as initial approximations of the rotation and translation
1983     * vectors, respectively, and further optimizes them.
1984     * <ul>
1985     *   <li>
1986     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
1987     * this case the function finds such a pose that minimizes reprojection error, that is the sum
1988     * of squared distances between the observed projections imagePoints and the projected (using
1989     * REF: projectPoints ) objectPoints .
1990     *   </li>
1991     *   <li>
1992     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
1993     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
1994     * In this case the function requires exactly four object and image points.
1995     *   </li>
1996     *   <li>
1997     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
1998     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
1999     * In this case the function requires exactly four object and image points.
2000     *   </li>
2001     *   <li>
2002     *    REF: SOLVEPNP_EPNP Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
2003     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
2004     *   </li>
2005     *   <li>
2006     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
2007     * Method is based on the paper of J. Hesch and S. Roumeliotis.
2008     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
2009     *   </li>
2010     *   <li>
2011     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
2012     * Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
2013     * F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
2014     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
2015     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
2016     * focal length.
2017     *   </li>
2018     *   <li>
2019     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
2020     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
2021     *   </li>
2022     *   <li>
2023     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
2024     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
2025     * It requires 4 coplanar object points defined in the following order:
2026     *   <ul>
2027     *     <li>
2028     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2029     *     </li>
2030     *     <li>
2031     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2032     *     </li>
2033     *     <li>
2034     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2035     *     </li>
2036     *     <li>
2037     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2038     *     </li>
2039     *   </ul>
2040     *   <li>
2041     *    REF: SOLVEPNP_SQPNP Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
2042     * Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (CITE: Terzakis20). It requires 3 or more points.
2043     *   </li>
2044     * </ul>
2045     *
2046     *
2047     * The function estimates the object pose given a set of object points, their corresponding image
2048     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
2049     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
2050     * and the Z-axis forward).
2051     *
2052     * ![](pnp.jpg)
2053     *
2054     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
2055     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
2056     *
2057     * \(
2058     *   \begin{align*}
2059     *   \begin{bmatrix}
2060     *   u \\
2061     *   v \\
2062     *   1
2063     *   \end{bmatrix} &amp;=
2064     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
2065     *   \begin{bmatrix}
2066     *   X_{w} \\
2067     *   Y_{w} \\
2068     *   Z_{w} \\
2069     *   1
2070     *   \end{bmatrix} \\
2071     *   \begin{bmatrix}
2072     *   u \\
2073     *   v \\
2074     *   1
2075     *   \end{bmatrix} &amp;=
2076     *   \begin{bmatrix}
2077     *   f_x &amp; 0 &amp; c_x \\
2078     *   0 &amp; f_y &amp; c_y \\
2079     *   0 &amp; 0 &amp; 1
2080     *   \end{bmatrix}
2081     *   \begin{bmatrix}
2082     *   1 &amp; 0 &amp; 0 &amp; 0 \\
2083     *   0 &amp; 1 &amp; 0 &amp; 0 \\
2084     *   0 &amp; 0 &amp; 1 &amp; 0
2085     *   \end{bmatrix}
2086     *   \begin{bmatrix}
2087     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
2088     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
2089     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
2090     *   0 &amp; 0 &amp; 0 &amp; 1
2091     *   \end{bmatrix}
2092     *   \begin{bmatrix}
2093     *   X_{w} \\
2094     *   Y_{w} \\
2095     *   Z_{w} \\
2096     *   1
2097     *   \end{bmatrix}
2098     *   \end{align*}
2099     * \)
2100     *
2101     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
2102     * a 3D point expressed in the world frame into the camera frame:
2103     *
2104     * \(
2105     *   \begin{align*}
2106     *   \begin{bmatrix}
2107     *   X_c \\
2108     *   Y_c \\
2109     *   Z_c \\
2110     *   1
2111     *   \end{bmatrix} &amp;=
2112     *   \hspace{0.2em} ^{c}\bf{T}_w
2113     *   \begin{bmatrix}
2114     *   X_{w} \\
2115     *   Y_{w} \\
2116     *   Z_{w} \\
2117     *   1
2118     *   \end{bmatrix} \\
2119     *   \begin{bmatrix}
2120     *   X_c \\
2121     *   Y_c \\
2122     *   Z_c \\
2123     *   1
2124     *   \end{bmatrix} &amp;=
2125     *   \begin{bmatrix}
2126     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
2127     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
2128     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
2129     *   0 &amp; 0 &amp; 0 &amp; 1
2130     *   \end{bmatrix}
2131     *   \begin{bmatrix}
2132     *   X_{w} \\
2133     *   Y_{w} \\
2134     *   Z_{w} \\
2135     *   1
2136     *   \end{bmatrix}
2137     *   \end{align*}
2138     * \)
2139     *
2140     * <b>Note:</b>
2141     * <ul>
2142     *   <li>
2143     *       An example of how to use solvePnP for planar augmented reality can be found at
2144     *         opencv_source_code/samples/python/plane_ar.py
2145     *   </li>
2146     *   <li>
2147     *       If you are using Python:
2148     *   <ul>
2149     *     <li>
2150     *          Numpy array slices won't work as input because solvePnP requires contiguous
2151     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
2152     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
2153     *     </li>
2154     *     <li>
2155     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
2156     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
2157     *         which requires 2-channel information.
2158     *     </li>
2159     *     <li>
2160     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
2161     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
2162     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
2163     *     </li>
2164     *   </ul>
2165     *   <li>
2166     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
2167     *        unstable and sometimes give completely wrong results. If you pass one of these two
2168     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
2169     *   </li>
2170     *   <li>
2171     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
2172     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
2173     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
2174     *   </li>
2175     *   <li>
2176     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
2177     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
2178     *        global solution to converge.
2179     *   </li>
2180     *   <li>
2181     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
2182     *   </li>
2183     *   <li>
2184     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
2185     *        Number of input points must be 4. Object points must be defined in the following order:
2186     *   <ul>
2187     *     <li>
2188     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
2189     *     </li>
2190     *     <li>
2191     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
2192     *     </li>
2193     *     <li>
2194     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
2195     *     </li>
2196     *     <li>
2197     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
2198     *     </li>
2199     *   </ul>
2200     *   <ul>
2201     *     <li>
2202     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
2203     *     </li>
2204     *   </ul>
2205     *   </li>
2206     * </ul>
2207     * @return automatically generated
2208     */
2209    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) {
2210        Mat objectPoints_mat = objectPoints;
2211        Mat imagePoints_mat = imagePoints;
2212        Mat distCoeffs_mat = distCoeffs;
2213        return solvePnP_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj);
2214    }
2215
2216
2217    //
2218    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
2219    //
2220
2221    /**
2222     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2223     *
2224     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2225     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2226     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2227     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2228     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2229     * @param distCoeffs Input vector of distortion coefficients
2230     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2231     * assumed.
2232     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2233     * the model coordinate system to the camera coordinate system.
2234     * @param tvec Output translation vector.
2235     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2236     * the provided rvec and tvec values as initial approximations of the rotation and translation
2237     * vectors, respectively, and further optimizes them.
2238     * @param iterationsCount Number of iterations.
2239     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
2240     * is the maximum allowed distance between the observed and computed point projections to consider it
2241     * an inlier.
2242     * @param confidence The probability that the algorithm produces a useful result.
2243     * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
2244     * @param flags Method for solving a PnP problem (see REF: solvePnP ).
2245     *
2246     * The function estimates an object pose given a set of object points, their corresponding image
2247     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2248     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2249     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2250     * makes the function resistant to outliers.
2251     *
2252     * <b>Note:</b>
2253     * <ul>
2254     *   <li>
2255     *       An example of how to use solvePNPRansac for object detection can be found at
2256     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2257     *   </li>
2258     *   <li>
2259     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2260     *        is #SOLVEPNP_EPNP. Exceptions are:
2261     *   <ul>
2262     *     <li>
2263     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2264     *     </li>
2265     *     <li>
2266     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2267     *     </li>
2268     *   </ul>
2269     *   <li>
2270     *       The method used to estimate the camera pose using all the inliers is defined by the
2271     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2272     *        the method #SOLVEPNP_EPNP will be used instead.
2273     *   </li>
2274     * </ul>
2275     * @return automatically generated
2276     */
2277    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers, int flags) {
2278        Mat objectPoints_mat = objectPoints;
2279        Mat imagePoints_mat = imagePoints;
2280        Mat distCoeffs_mat = distCoeffs;
2281        return solvePnPRansac_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj, flags);
2282    }
2283
2284    /**
2285     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2286     *
2287     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2288     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2289     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2290     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2291     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2292     * @param distCoeffs Input vector of distortion coefficients
2293     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2294     * assumed.
2295     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2296     * the model coordinate system to the camera coordinate system.
2297     * @param tvec Output translation vector.
2298     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2299     * the provided rvec and tvec values as initial approximations of the rotation and translation
2300     * vectors, respectively, and further optimizes them.
2301     * @param iterationsCount Number of iterations.
2302     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
2303     * is the maximum allowed distance between the observed and computed point projections to consider it
2304     * an inlier.
2305     * @param confidence The probability that the algorithm produces a useful result.
2306     * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
2307     *
2308     * The function estimates an object pose given a set of object points, their corresponding image
2309     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2310     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2311     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2312     * makes the function resistant to outliers.
2313     *
2314     * <b>Note:</b>
2315     * <ul>
2316     *   <li>
2317     *       An example of how to use solvePNPRansac for object detection can be found at
2318     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2319     *   </li>
2320     *   <li>
2321     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2322     *        is #SOLVEPNP_EPNP. Exceptions are:
2323     *   <ul>
2324     *     <li>
2325     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2326     *     </li>
2327     *     <li>
2328     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2329     *     </li>
2330     *   </ul>
2331     *   <li>
2332     *       The method used to estimate the camera pose using all the inliers is defined by the
2333     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2334     *        the method #SOLVEPNP_EPNP will be used instead.
2335     *   </li>
2336     * </ul>
2337     * @return automatically generated
2338     */
2339    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers) {
2340        Mat objectPoints_mat = objectPoints;
2341        Mat imagePoints_mat = imagePoints;
2342        Mat distCoeffs_mat = distCoeffs;
2343        return solvePnPRansac_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj);
2344    }
2345
2346    /**
2347     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2348     *
2349     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2350     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2351     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2352     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2353     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2354     * @param distCoeffs Input vector of distortion coefficients
2355     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2356     * assumed.
2357     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2358     * the model coordinate system to the camera coordinate system.
2359     * @param tvec Output translation vector.
2360     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2361     * the provided rvec and tvec values as initial approximations of the rotation and translation
2362     * vectors, respectively, and further optimizes them.
2363     * @param iterationsCount Number of iterations.
2364     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
2365     * is the maximum allowed distance between the observed and computed point projections to consider it
2366     * an inlier.
2367     * @param confidence The probability that the algorithm produces a useful result.
2368     *
2369     * The function estimates an object pose given a set of object points, their corresponding image
2370     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2371     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2372     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2373     * makes the function resistant to outliers.
2374     *
2375     * <b>Note:</b>
2376     * <ul>
2377     *   <li>
2378     *       An example of how to use solvePNPRansac for object detection can be found at
2379     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2380     *   </li>
2381     *   <li>
2382     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2383     *        is #SOLVEPNP_EPNP. Exceptions are:
2384     *   <ul>
2385     *     <li>
2386     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2387     *     </li>
2388     *     <li>
2389     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2390     *     </li>
2391     *   </ul>
2392     *   <li>
2393     *       The method used to estimate the camera pose using all the inliers is defined by the
2394     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2395     *        the method #SOLVEPNP_EPNP will be used instead.
2396     *   </li>
2397     * </ul>
2398     * @return automatically generated
2399     */
2400    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence) {
2401        Mat objectPoints_mat = objectPoints;
2402        Mat imagePoints_mat = imagePoints;
2403        Mat distCoeffs_mat = distCoeffs;
2404        return solvePnPRansac_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence);
2405    }
2406
2407    /**
2408     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2409     *
2410     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2411     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2412     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2413     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2414     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2415     * @param distCoeffs Input vector of distortion coefficients
2416     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2417     * assumed.
2418     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2419     * the model coordinate system to the camera coordinate system.
2420     * @param tvec Output translation vector.
2421     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2422     * the provided rvec and tvec values as initial approximations of the rotation and translation
2423     * vectors, respectively, and further optimizes them.
2424     * @param iterationsCount Number of iterations.
2425     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
2426     * is the maximum allowed distance between the observed and computed point projections to consider it
2427     * an inlier.
2428     *
2429     * The function estimates an object pose given a set of object points, their corresponding image
2430     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2431     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2432     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2433     * makes the function resistant to outliers.
2434     *
2435     * <b>Note:</b>
2436     * <ul>
2437     *   <li>
2438     *       An example of how to use solvePNPRansac for object detection can be found at
2439     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2440     *   </li>
2441     *   <li>
2442     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2443     *        is #SOLVEPNP_EPNP. Exceptions are:
2444     *   <ul>
2445     *     <li>
2446     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2447     *     </li>
2448     *     <li>
2449     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2450     *     </li>
2451     *   </ul>
2452     *   <li>
2453     *       The method used to estimate the camera pose using all the inliers is defined by the
2454     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2455     *        the method #SOLVEPNP_EPNP will be used instead.
2456     *   </li>
2457     * </ul>
2458     * @return automatically generated
2459     */
2460    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError) {
2461        Mat objectPoints_mat = objectPoints;
2462        Mat imagePoints_mat = imagePoints;
2463        Mat distCoeffs_mat = distCoeffs;
2464        return solvePnPRansac_3(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError);
2465    }
2466
2467    /**
2468     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2469     *
2470     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2471     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2472     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2473     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2474     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2475     * @param distCoeffs Input vector of distortion coefficients
2476     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2477     * assumed.
2478     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2479     * the model coordinate system to the camera coordinate system.
2480     * @param tvec Output translation vector.
2481     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2482     * the provided rvec and tvec values as initial approximations of the rotation and translation
2483     * vectors, respectively, and further optimizes them.
2484     * @param iterationsCount Number of iterations.
2485     * is the maximum allowed distance between the observed and computed point projections to consider it
2486     * an inlier.
2487     *
2488     * The function estimates an object pose given a set of object points, their corresponding image
2489     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2490     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2491     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2492     * makes the function resistant to outliers.
2493     *
2494     * <b>Note:</b>
2495     * <ul>
2496     *   <li>
2497     *       An example of how to use solvePNPRansac for object detection can be found at
2498     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2499     *   </li>
2500     *   <li>
2501     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2502     *        is #SOLVEPNP_EPNP. Exceptions are:
2503     *   <ul>
2504     *     <li>
2505     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2506     *     </li>
2507     *     <li>
2508     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2509     *     </li>
2510     *   </ul>
2511     *   <li>
2512     *       The method used to estimate the camera pose using all the inliers is defined by the
2513     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2514     *        the method #SOLVEPNP_EPNP will be used instead.
2515     *   </li>
2516     * </ul>
2517     * @return automatically generated
2518     */
2519    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount) {
2520        Mat objectPoints_mat = objectPoints;
2521        Mat imagePoints_mat = imagePoints;
2522        Mat distCoeffs_mat = distCoeffs;
2523        return solvePnPRansac_4(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount);
2524    }
2525
2526    /**
2527     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2528     *
2529     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2530     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2531     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2532     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2533     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2534     * @param distCoeffs Input vector of distortion coefficients
2535     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2536     * assumed.
2537     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2538     * the model coordinate system to the camera coordinate system.
2539     * @param tvec Output translation vector.
2540     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2541     * the provided rvec and tvec values as initial approximations of the rotation and translation
2542     * vectors, respectively, and further optimizes them.
2543     * is the maximum allowed distance between the observed and computed point projections to consider it
2544     * an inlier.
2545     *
2546     * The function estimates an object pose given a set of object points, their corresponding image
2547     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2548     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2549     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2550     * makes the function resistant to outliers.
2551     *
2552     * <b>Note:</b>
2553     * <ul>
2554     *   <li>
2555     *       An example of how to use solvePNPRansac for object detection can be found at
2556     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2557     *   </li>
2558     *   <li>
2559     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2560     *        is #SOLVEPNP_EPNP. Exceptions are:
2561     *   <ul>
2562     *     <li>
2563     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2564     *     </li>
2565     *     <li>
2566     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2567     *     </li>
2568     *   </ul>
2569     *   <li>
2570     *       The method used to estimate the camera pose using all the inliers is defined by the
2571     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2572     *        the method #SOLVEPNP_EPNP will be used instead.
2573     *   </li>
2574     * </ul>
2575     * @return automatically generated
2576     */
2577    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) {
2578        Mat objectPoints_mat = objectPoints;
2579        Mat imagePoints_mat = imagePoints;
2580        Mat distCoeffs_mat = distCoeffs;
2581        return solvePnPRansac_5(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess);
2582    }
2583
2584    /**
2585     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2586     *
2587     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2588     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2589     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2590     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2591     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2592     * @param distCoeffs Input vector of distortion coefficients
2593     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2594     * assumed.
2595     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2596     * the model coordinate system to the camera coordinate system.
2597     * @param tvec Output translation vector.
2598     * the provided rvec and tvec values as initial approximations of the rotation and translation
2599     * vectors, respectively, and further optimizes them.
2600     * is the maximum allowed distance between the observed and computed point projections to consider it
2601     * an inlier.
2602     *
2603     * The function estimates an object pose given a set of object points, their corresponding image
2604     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2605     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2606     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2607     * makes the function resistant to outliers.
2608     *
2609     * <b>Note:</b>
2610     * <ul>
2611     *   <li>
2612     *       An example of how to use solvePNPRansac for object detection can be found at
2613     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2614     *   </li>
2615     *   <li>
2616     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2617     *        is #SOLVEPNP_EPNP. Exceptions are:
2618     *   <ul>
2619     *     <li>
2620     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2621     *     </li>
2622     *     <li>
2623     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2624     *     </li>
2625     *   </ul>
2626     *   <li>
2627     *       The method used to estimate the camera pose using all the inliers is defined by the
2628     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2629     *        the method #SOLVEPNP_EPNP will be used instead.
2630     *   </li>
2631     * </ul>
2632     * @return automatically generated
2633     */
2634    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) {
2635        Mat objectPoints_mat = objectPoints;
2636        Mat imagePoints_mat = imagePoints;
2637        Mat distCoeffs_mat = distCoeffs;
2638        return solvePnPRansac_6(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj);
2639    }
2640
2641
2642    //
2643    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
2644    //
2645
2646    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers, UsacParams params) {
2647        Mat objectPoints_mat = objectPoints;
2648        Mat imagePoints_mat = imagePoints;
2649        Mat distCoeffs_mat = distCoeffs;
2650        return solvePnPRansac_7(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj, params.nativeObj);
2651    }
2652
2653    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers) {
2654        Mat objectPoints_mat = objectPoints;
2655        Mat imagePoints_mat = imagePoints;
2656        Mat distCoeffs_mat = distCoeffs;
2657        return solvePnPRansac_8(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj);
2658    }
2659
2660
2661    //
2662    // C++:  int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
2663    //
2664
2665    /**
2666     * Finds an object pose from 3 3D-2D point correspondences.
2667     *
2668     * @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
2669     * 1x3/3x1 3-channel. vector&lt;Point3f&gt; can be also passed here.
2670     * @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
2671     *  vector&lt;Point2f&gt; can be also passed here.
2672     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2673     * @param distCoeffs Input vector of distortion coefficients
2674     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2675     * assumed.
2676     * @param rvecs Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2677     * the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
2678     * @param tvecs Output translation vectors.
2679     * @param flags Method for solving a P3P problem:
2680     * <ul>
2681     *   <li>
2682     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
2683     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
2684     *   </li>
2685     *   <li>
2686     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
2687     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
2688     *   </li>
2689     * </ul>
2690     *
2691     * The function estimates the object pose given 3 object points, their corresponding image
2692     * projections, as well as the camera intrinsic matrix and the distortion coefficients.
2693     *
2694     * <b>Note:</b>
2695     * The solutions are sorted by reprojection errors (lowest to highest).
2696     * @return automatically generated
2697     */
2698    public static int solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) {
2699        Mat rvecs_mat = new Mat();
2700        Mat tvecs_mat = new Mat();
2701        int retVal = solveP3P_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
2702        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2703        rvecs_mat.release();
2704        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2705        tvecs_mat.release();
2706        return retVal;
2707    }
2708
2709
2710    //
2711    // C++:  void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
2712    //
2713
2714    /**
2715     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2716     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2717     *
2718     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2719     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2720     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2721     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2722     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2723     * @param distCoeffs Input vector of distortion coefficients
2724     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2725     * assumed.
2726     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2727     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2728     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2729     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2730     *
2731     * The function refines the object pose given at least 3 object points, their corresponding image
2732     * projections, an initial solution for the rotation and translation vector,
2733     * as well as the camera intrinsic matrix and the distortion coefficients.
2734     * The function minimizes the projection error with respect to the rotation and the translation vectors, according
2735     * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
2736     */
2737    public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) {
2738        solvePnPRefineLM_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
2739    }
2740
2741    /**
2742     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2743     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2744     *
2745     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2746     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2747     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2748     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2749     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2750     * @param distCoeffs Input vector of distortion coefficients
2751     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2752     * assumed.
2753     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2754     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2755     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2756     *
2757     * The function refines the object pose given at least 3 object points, their corresponding image
2758     * projections, an initial solution for the rotation and translation vector,
2759     * as well as the camera intrinsic matrix and the distortion coefficients.
2760     * The function minimizes the projection error with respect to the rotation and the translation vectors, according
2761     * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
2762     */
2763    public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) {
2764        solvePnPRefineLM_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj);
2765    }
2766
2767
2768    //
2769    // C++:  void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
2770    //
2771
2772    /**
2773     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2774     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2775     *
2776     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2777     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2778     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2779     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2780     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2781     * @param distCoeffs Input vector of distortion coefficients
2782     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2783     * assumed.
2784     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2785     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2786     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2787     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2788     * @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \(\alpha\)
2789     * gain in the Damped Gauss-Newton formulation.
2790     *
2791     * The function refines the object pose given at least 3 object points, their corresponding image
2792     * projections, an initial solution for the rotation and translation vector,
2793     * as well as the camera intrinsic matrix and the distortion coefficients.
2794     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2795     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2796     */
2797    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria, double VVSlambda) {
2798        solvePnPRefineVVS_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, VVSlambda);
2799    }
2800
2801    /**
2802     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2803     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2804     *
2805     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2806     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2807     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2808     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2809     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2810     * @param distCoeffs Input vector of distortion coefficients
2811     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2812     * assumed.
2813     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2814     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2815     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2816     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2817     * gain in the Damped Gauss-Newton formulation.
2818     *
2819     * The function refines the object pose given at least 3 object points, their corresponding image
2820     * projections, an initial solution for the rotation and translation vector,
2821     * as well as the camera intrinsic matrix and the distortion coefficients.
2822     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2823     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2824     */
2825    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) {
2826        solvePnPRefineVVS_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
2827    }
2828
2829    /**
2830     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2831     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2832     *
2833     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2834     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2835     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2836     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2837     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2838     * @param distCoeffs Input vector of distortion coefficients
2839     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2840     * assumed.
2841     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2842     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2843     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2844     * gain in the Damped Gauss-Newton formulation.
2845     *
2846     * The function refines the object pose given at least 3 object points, their corresponding image
2847     * projections, an initial solution for the rotation and translation vector,
2848     * as well as the camera intrinsic matrix and the distortion coefficients.
2849     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2850     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2851     */
2852    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) {
2853        solvePnPRefineVVS_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj);
2854    }
2855
2856
2857    //
2858    // C++:  int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
2859    //
2860
2861    /**
2862     * Finds an object pose from 3D-2D point correspondences.
2863     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2864     * couple), depending on the number of input points and the chosen method:
2865     * <ul>
2866     *   <li>
2867     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2868     *   </li>
2869     *   <li>
2870     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
2871     *   </li>
2872     *   <li>
2873     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
2874     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
2875     *   <ul>
2876     *     <li>
2877     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2878     *     </li>
2879     *     <li>
2880     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2881     *     </li>
2882     *     <li>
2883     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2884     *     </li>
2885     *     <li>
2886     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2887     *     </li>
2888     *   </ul>
2889     *   <li>
2890     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
2891     * Only 1 solution is returned.
2892     *   </li>
2893     * </ul>
2894     *
2895     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2896     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2897     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2898     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2899     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2900     * @param distCoeffs Input vector of distortion coefficients
2901     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2902     * assumed.
2903     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2904     * the model coordinate system to the camera coordinate system.
2905     * @param tvecs Vector of output translation vectors.
2906     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
2907     * the provided rvec and tvec values as initial approximations of the rotation and translation
2908     * vectors, respectively, and further optimizes them.
2909     * @param flags Method for solving a PnP problem:
2910     * <ul>
2911     *   <li>
2912     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
2913     * this case the function finds such a pose that minimizes reprojection error, that is the sum
2914     * of squared distances between the observed projections imagePoints and the projected (using
2915     * projectPoints ) objectPoints .
2916     *   </li>
2917     *   <li>
2918     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
2919     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
2920     * In this case the function requires exactly four object and image points.
2921     *   </li>
2922     *   <li>
2923     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
2924     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
2925     * In this case the function requires exactly four object and image points.
2926     *   </li>
2927     *   <li>
2928     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
2929     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
2930     *   </li>
2931     *   <li>
2932     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
2933     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
2934     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
2935     *   </li>
2936     *   <li>
2937     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
2938     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
2939     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
2940     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
2941     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
2942     * focal length.
2943     *   </li>
2944     *   <li>
2945     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
2946     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
2947     *   </li>
2948     *   <li>
2949     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
2950     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
2951     * It requires 4 coplanar object points defined in the following order:
2952     *   <ul>
2953     *     <li>
2954     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2955     *     </li>
2956     *     <li>
2957     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2958     *     </li>
2959     *     <li>
2960     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2961     *     </li>
2962     *     <li>
2963     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2964     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2965     * and useExtrinsicGuess is set to true.
2966     * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2967     * and useExtrinsicGuess is set to true.
2968     * @param reprojectionError Optional vector of reprojection error, that is the RMS error
2969     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
2970     * and the 3D object points projected with the estimated pose.
2971     *     </li>
2972     *   </ul>
2973     *
2974     * The function estimates the object pose given a set of object points, their corresponding image
2975     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
2976     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
2977     * and the Z-axis forward).
2978     *   </li>
2979     * </ul>
2980     *
2981     * ![](pnp.jpg)
2982     *
2983     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
2984     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
2985     *
2986     * \(
2987     *   \begin{align*}
2988     *   \begin{bmatrix}
2989     *   u \\
2990     *   v \\
2991     *   1
2992     *   \end{bmatrix} &amp;=
2993     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
2994     *   \begin{bmatrix}
2995     *   X_{w} \\
2996     *   Y_{w} \\
2997     *   Z_{w} \\
2998     *   1
2999     *   \end{bmatrix} \\
3000     *   \begin{bmatrix}
3001     *   u \\
3002     *   v \\
3003     *   1
3004     *   \end{bmatrix} &amp;=
3005     *   \begin{bmatrix}
3006     *   f_x &amp; 0 &amp; c_x \\
3007     *   0 &amp; f_y &amp; c_y \\
3008     *   0 &amp; 0 &amp; 1
3009     *   \end{bmatrix}
3010     *   \begin{bmatrix}
3011     *   1 &amp; 0 &amp; 0 &amp; 0 \\
3012     *   0 &amp; 1 &amp; 0 &amp; 0 \\
3013     *   0 &amp; 0 &amp; 1 &amp; 0
3014     *   \end{bmatrix}
3015     *   \begin{bmatrix}
3016     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3017     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3018     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3019     *   0 &amp; 0 &amp; 0 &amp; 1
3020     *   \end{bmatrix}
3021     *   \begin{bmatrix}
3022     *   X_{w} \\
3023     *   Y_{w} \\
3024     *   Z_{w} \\
3025     *   1
3026     *   \end{bmatrix}
3027     *   \end{align*}
3028     * \)
3029     *
3030     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
3031     * a 3D point expressed in the world frame into the camera frame:
3032     *
3033     * \(
3034     *   \begin{align*}
3035     *   \begin{bmatrix}
3036     *   X_c \\
3037     *   Y_c \\
3038     *   Z_c \\
3039     *   1
3040     *   \end{bmatrix} &amp;=
3041     *   \hspace{0.2em} ^{c}\bf{T}_w
3042     *   \begin{bmatrix}
3043     *   X_{w} \\
3044     *   Y_{w} \\
3045     *   Z_{w} \\
3046     *   1
3047     *   \end{bmatrix} \\
3048     *   \begin{bmatrix}
3049     *   X_c \\
3050     *   Y_c \\
3051     *   Z_c \\
3052     *   1
3053     *   \end{bmatrix} &amp;=
3054     *   \begin{bmatrix}
3055     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3056     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3057     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3058     *   0 &amp; 0 &amp; 0 &amp; 1
3059     *   \end{bmatrix}
3060     *   \begin{bmatrix}
3061     *   X_{w} \\
3062     *   Y_{w} \\
3063     *   Z_{w} \\
3064     *   1
3065     *   \end{bmatrix}
3066     *   \end{align*}
3067     * \)
3068     *
3069     * <b>Note:</b>
3070     * <ul>
3071     *   <li>
3072     *       An example of how to use solvePnP for planar augmented reality can be found at
3073     *         opencv_source_code/samples/python/plane_ar.py
3074     *   </li>
3075     *   <li>
3076     *       If you are using Python:
3077     *   <ul>
3078     *     <li>
3079     *          Numpy array slices won't work as input because solvePnP requires contiguous
3080     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3081     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3082     *     </li>
3083     *     <li>
3084     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3085     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3086     *         which requires 2-channel information.
3087     *     </li>
3088     *     <li>
3089     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3090     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3091     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3092     *     </li>
3093     *   </ul>
3094     *   <li>
3095     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3096     *        unstable and sometimes give completely wrong results. If you pass one of these two
3097     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3098     *   </li>
3099     *   <li>
3100     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3101     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3102     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3103     *   </li>
3104     *   <li>
3105     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3106     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3107     *        global solution to converge.
3108     *   </li>
3109     *   <li>
3110     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3111     *   </li>
3112     *   <li>
3113     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3114     *        Number of input points must be 4. Object points must be defined in the following order:
3115     *   <ul>
3116     *     <li>
3117     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3118     *     </li>
3119     *     <li>
3120     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3121     *     </li>
3122     *     <li>
3123     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3124     *     </li>
3125     *     <li>
3126     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3127     *     </li>
3128     *   </ul>
3129     *   </li>
3130     * </ul>
3131     * @return automatically generated
3132     */
3133    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec, Mat reprojectionError) {
3134        Mat rvecs_mat = new Mat();
3135        Mat tvecs_mat = new Mat();
3136        int retVal = solvePnPGeneric_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj, reprojectionError.nativeObj);
3137        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3138        rvecs_mat.release();
3139        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3140        tvecs_mat.release();
3141        return retVal;
3142    }
3143
3144    /**
3145     * Finds an object pose from 3D-2D point correspondences.
3146     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
3147     * couple), depending on the number of input points and the chosen method:
3148     * <ul>
3149     *   <li>
3150     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
3151     *   </li>
3152     *   <li>
3153     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3154     *   </li>
3155     *   <li>
3156     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
3157     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
3158     *   <ul>
3159     *     <li>
3160     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3161     *     </li>
3162     *     <li>
3163     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3164     *     </li>
3165     *     <li>
3166     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3167     *     </li>
3168     *     <li>
3169     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3170     *     </li>
3171     *   </ul>
3172     *   <li>
3173     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
3174     * Only 1 solution is returned.
3175     *   </li>
3176     * </ul>
3177     *
3178     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
3179     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
3180     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
3181     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
3182     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
3183     * @param distCoeffs Input vector of distortion coefficients
3184     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
3185     * assumed.
3186     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
3187     * the model coordinate system to the camera coordinate system.
3188     * @param tvecs Vector of output translation vectors.
3189     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
3190     * the provided rvec and tvec values as initial approximations of the rotation and translation
3191     * vectors, respectively, and further optimizes them.
3192     * @param flags Method for solving a PnP problem:
3193     * <ul>
3194     *   <li>
3195     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
3196     * this case the function finds such a pose that minimizes reprojection error, that is the sum
3197     * of squared distances between the observed projections imagePoints and the projected (using
3198     * projectPoints ) objectPoints .
3199     *   </li>
3200     *   <li>
3201     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
3202     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
3203     * In this case the function requires exactly four object and image points.
3204     *   </li>
3205     *   <li>
3206     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
3207     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
3208     * In this case the function requires exactly four object and image points.
3209     *   </li>
3210     *   <li>
3211     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
3212     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
3213     *   </li>
3214     *   <li>
3215     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3216     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
3217     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
3218     *   </li>
3219     *   <li>
3220     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3221     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
3222     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
3223     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
3224     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
3225     * focal length.
3226     *   </li>
3227     *   <li>
3228     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
3229     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
3230     *   </li>
3231     *   <li>
3232     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
3233     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
3234     * It requires 4 coplanar object points defined in the following order:
3235     *   <ul>
3236     *     <li>
3237     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3238     *     </li>
3239     *     <li>
3240     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3241     *     </li>
3242     *     <li>
3243     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3244     *     </li>
3245     *     <li>
3246     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3247     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
3248     * and useExtrinsicGuess is set to true.
3249     * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
3250     * and useExtrinsicGuess is set to true.
3251     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
3252     * and the 3D object points projected with the estimated pose.
3253     *     </li>
3254     *   </ul>
3255     *
3256     * The function estimates the object pose given a set of object points, their corresponding image
3257     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
3258     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
3259     * and the Z-axis forward).
3260     *   </li>
3261     * </ul>
3262     *
3263     * ![](pnp.jpg)
3264     *
3265     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
3266     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
3267     *
3268     * \(
3269     *   \begin{align*}
3270     *   \begin{bmatrix}
3271     *   u \\
3272     *   v \\
3273     *   1
3274     *   \end{bmatrix} &amp;=
3275     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
3276     *   \begin{bmatrix}
3277     *   X_{w} \\
3278     *   Y_{w} \\
3279     *   Z_{w} \\
3280     *   1
3281     *   \end{bmatrix} \\
3282     *   \begin{bmatrix}
3283     *   u \\
3284     *   v \\
3285     *   1
3286     *   \end{bmatrix} &amp;=
3287     *   \begin{bmatrix}
3288     *   f_x &amp; 0 &amp; c_x \\
3289     *   0 &amp; f_y &amp; c_y \\
3290     *   0 &amp; 0 &amp; 1
3291     *   \end{bmatrix}
3292     *   \begin{bmatrix}
3293     *   1 &amp; 0 &amp; 0 &amp; 0 \\
3294     *   0 &amp; 1 &amp; 0 &amp; 0 \\
3295     *   0 &amp; 0 &amp; 1 &amp; 0
3296     *   \end{bmatrix}
3297     *   \begin{bmatrix}
3298     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3299     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3300     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3301     *   0 &amp; 0 &amp; 0 &amp; 1
3302     *   \end{bmatrix}
3303     *   \begin{bmatrix}
3304     *   X_{w} \\
3305     *   Y_{w} \\
3306     *   Z_{w} \\
3307     *   1
3308     *   \end{bmatrix}
3309     *   \end{align*}
3310     * \)
3311     *
3312     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
3313     * a 3D point expressed in the world frame into the camera frame:
3314     *
3315     * \(
3316     *   \begin{align*}
3317     *   \begin{bmatrix}
3318     *   X_c \\
3319     *   Y_c \\
3320     *   Z_c \\
3321     *   1
3322     *   \end{bmatrix} &amp;=
3323     *   \hspace{0.2em} ^{c}\bf{T}_w
3324     *   \begin{bmatrix}
3325     *   X_{w} \\
3326     *   Y_{w} \\
3327     *   Z_{w} \\
3328     *   1
3329     *   \end{bmatrix} \\
3330     *   \begin{bmatrix}
3331     *   X_c \\
3332     *   Y_c \\
3333     *   Z_c \\
3334     *   1
3335     *   \end{bmatrix} &amp;=
3336     *   \begin{bmatrix}
3337     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3338     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3339     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3340     *   0 &amp; 0 &amp; 0 &amp; 1
3341     *   \end{bmatrix}
3342     *   \begin{bmatrix}
3343     *   X_{w} \\
3344     *   Y_{w} \\
3345     *   Z_{w} \\
3346     *   1
3347     *   \end{bmatrix}
3348     *   \end{align*}
3349     * \)
3350     *
3351     * <b>Note:</b>
3352     * <ul>
3353     *   <li>
3354     *       An example of how to use solvePnP for planar augmented reality can be found at
3355     *         opencv_source_code/samples/python/plane_ar.py
3356     *   </li>
3357     *   <li>
3358     *       If you are using Python:
3359     *   <ul>
3360     *     <li>
3361     *          Numpy array slices won't work as input because solvePnP requires contiguous
3362     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3363     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3364     *     </li>
3365     *     <li>
3366     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3367     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3368     *         which requires 2-channel information.
3369     *     </li>
3370     *     <li>
3371     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3372     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3373     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3374     *     </li>
3375     *   </ul>
3376     *   <li>
3377     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3378     *        unstable and sometimes give completely wrong results. If you pass one of these two
3379     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3380     *   </li>
3381     *   <li>
3382     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3383     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3384     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3385     *   </li>
3386     *   <li>
3387     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3388     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3389     *        global solution to converge.
3390     *   </li>
3391     *   <li>
3392     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3393     *   </li>
3394     *   <li>
3395     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3396     *        Number of input points must be 4. Object points must be defined in the following order:
3397     *   <ul>
3398     *     <li>
3399     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3400     *     </li>
3401     *     <li>
3402     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3403     *     </li>
3404     *     <li>
3405     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3406     *     </li>
3407     *     <li>
3408     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3409     *     </li>
3410     *   </ul>
3411     *   </li>
3412     * </ul>
3413     * @return automatically generated
3414     */
3415    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec) {
3416        Mat rvecs_mat = new Mat();
3417        Mat tvecs_mat = new Mat();
3418        int retVal = solvePnPGeneric_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj);
3419        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3420        rvecs_mat.release();
3421        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3422        tvecs_mat.release();
3423        return retVal;
3424    }
3425
3426    /**
3427     * Finds an object pose from 3D-2D point correspondences.
3428     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
3429     * couple), depending on the number of input points and the chosen method:
3430     * <ul>
3431     *   <li>
3432     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
3433     *   </li>
3434     *   <li>
3435     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3436     *   </li>
3437     *   <li>
3438     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
3439     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
3440     *   <ul>
3441     *     <li>
3442     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3443     *     </li>
3444     *     <li>
3445     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3446     *     </li>
3447     *     <li>
3448     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3449     *     </li>
3450     *     <li>
3451     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3452     *     </li>
3453     *   </ul>
3454     *   <li>
3455     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
3456     * Only 1 solution is returned.
3457     *   </li>
3458     * </ul>
3459     *
3460     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
3461     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
3462     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
3463     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
3464     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
3465     * @param distCoeffs Input vector of distortion coefficients
3466     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
3467     * assumed.
3468     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
3469     * the model coordinate system to the camera coordinate system.
3470     * @param tvecs Vector of output translation vectors.
3471     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
3472     * the provided rvec and tvec values as initial approximations of the rotation and translation
3473     * vectors, respectively, and further optimizes them.
3474     * @param flags Method for solving a PnP problem:
3475     * <ul>
3476     *   <li>
3477     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
3478     * this case the function finds such a pose that minimizes reprojection error, that is the sum
3479     * of squared distances between the observed projections imagePoints and the projected (using
3480     * projectPoints ) objectPoints .
3481     *   </li>
3482     *   <li>
3483     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
3484     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
3485     * In this case the function requires exactly four object and image points.
3486     *   </li>
3487     *   <li>
3488     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
3489     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
3490     * In this case the function requires exactly four object and image points.
3491     *   </li>
3492     *   <li>
3493     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
3494     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
3495     *   </li>
3496     *   <li>
3497     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3498     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
3499     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
3500     *   </li>
3501     *   <li>
3502     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3503     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
3504     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
3505     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
3506     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
3507     * focal length.
3508     *   </li>
3509     *   <li>
3510     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
3511     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
3512     *   </li>
3513     *   <li>
3514     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
3515     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
3516     * It requires 4 coplanar object points defined in the following order:
3517     *   <ul>
3518     *     <li>
3519     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3520     *     </li>
3521     *     <li>
3522     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3523     *     </li>
3524     *     <li>
3525     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3526     *     </li>
3527     *     <li>
3528     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3529     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
3530     * and useExtrinsicGuess is set to true.
3531     * and useExtrinsicGuess is set to true.
3532     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
3533     * and the 3D object points projected with the estimated pose.
3534     *     </li>
3535     *   </ul>
3536     *
3537     * The function estimates the object pose given a set of object points, their corresponding image
3538     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
3539     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
3540     * and the Z-axis forward).
3541     *   </li>
3542     * </ul>
3543     *
3544     * ![](pnp.jpg)
3545     *
3546     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
3547     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
3548     *
3549     * \(
3550     *   \begin{align*}
3551     *   \begin{bmatrix}
3552     *   u \\
3553     *   v \\
3554     *   1
3555     *   \end{bmatrix} &amp;=
3556     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
3557     *   \begin{bmatrix}
3558     *   X_{w} \\
3559     *   Y_{w} \\
3560     *   Z_{w} \\
3561     *   1
3562     *   \end{bmatrix} \\
3563     *   \begin{bmatrix}
3564     *   u \\
3565     *   v \\
3566     *   1
3567     *   \end{bmatrix} &amp;=
3568     *   \begin{bmatrix}
3569     *   f_x &amp; 0 &amp; c_x \\
3570     *   0 &amp; f_y &amp; c_y \\
3571     *   0 &amp; 0 &amp; 1
3572     *   \end{bmatrix}
3573     *   \begin{bmatrix}
3574     *   1 &amp; 0 &amp; 0 &amp; 0 \\
3575     *   0 &amp; 1 &amp; 0 &amp; 0 \\
3576     *   0 &amp; 0 &amp; 1 &amp; 0
3577     *   \end{bmatrix}
3578     *   \begin{bmatrix}
3579     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3580     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3581     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3582     *   0 &amp; 0 &amp; 0 &amp; 1
3583     *   \end{bmatrix}
3584     *   \begin{bmatrix}
3585     *   X_{w} \\
3586     *   Y_{w} \\
3587     *   Z_{w} \\
3588     *   1
3589     *   \end{bmatrix}
3590     *   \end{align*}
3591     * \)
3592     *
3593     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
3594     * a 3D point expressed in the world frame into the camera frame:
3595     *
3596     * \(
3597     *   \begin{align*}
3598     *   \begin{bmatrix}
3599     *   X_c \\
3600     *   Y_c \\
3601     *   Z_c \\
3602     *   1
3603     *   \end{bmatrix} &amp;=
3604     *   \hspace{0.2em} ^{c}\bf{T}_w
3605     *   \begin{bmatrix}
3606     *   X_{w} \\
3607     *   Y_{w} \\
3608     *   Z_{w} \\
3609     *   1
3610     *   \end{bmatrix} \\
3611     *   \begin{bmatrix}
3612     *   X_c \\
3613     *   Y_c \\
3614     *   Z_c \\
3615     *   1
3616     *   \end{bmatrix} &amp;=
3617     *   \begin{bmatrix}
3618     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3619     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3620     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3621     *   0 &amp; 0 &amp; 0 &amp; 1
3622     *   \end{bmatrix}
3623     *   \begin{bmatrix}
3624     *   X_{w} \\
3625     *   Y_{w} \\
3626     *   Z_{w} \\
3627     *   1
3628     *   \end{bmatrix}
3629     *   \end{align*}
3630     * \)
3631     *
3632     * <b>Note:</b>
3633     * <ul>
3634     *   <li>
3635     *       An example of how to use solvePnP for planar augmented reality can be found at
3636     *         opencv_source_code/samples/python/plane_ar.py
3637     *   </li>
3638     *   <li>
3639     *       If you are using Python:
3640     *   <ul>
3641     *     <li>
3642     *          Numpy array slices won't work as input because solvePnP requires contiguous
3643     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3644     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3645     *     </li>
3646     *     <li>
3647     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3648     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3649     *         which requires 2-channel information.
3650     *     </li>
3651     *     <li>
3652     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3653     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3654     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3655     *     </li>
3656     *   </ul>
3657     *   <li>
3658     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3659     *        unstable and sometimes give completely wrong results. If you pass one of these two
3660     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3661     *   </li>
3662     *   <li>
3663     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3664     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3665     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3666     *   </li>
3667     *   <li>
3668     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3669     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3670     *        global solution to converge.
3671     *   </li>
3672     *   <li>
3673     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3674     *   </li>
3675     *   <li>
3676     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3677     *        Number of input points must be 4. Object points must be defined in the following order:
3678     *   <ul>
3679     *     <li>
3680     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3681     *     </li>
3682     *     <li>
3683     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3684     *     </li>
3685     *     <li>
3686     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3687     *     </li>
3688     *     <li>
3689     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3690     *     </li>
3691     *   </ul>
3692     *   </li>
3693     * </ul>
3694     * @return automatically generated
3695     */
3696    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec) {
3697        Mat rvecs_mat = new Mat();
3698        Mat tvecs_mat = new Mat();
3699        int retVal = solvePnPGeneric_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj);
3700        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3701        rvecs_mat.release();
3702        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3703        tvecs_mat.release();
3704        return retVal;
3705    }
3706
3707    /**
3708     * Finds an object pose from 3D-2D point correspondences.
3709     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
3710     * couple), depending on the number of input points and the chosen method:
3711     * <ul>
3712     *   <li>
3713     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
3714     *   </li>
3715     *   <li>
3716     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3717     *   </li>
3718     *   <li>
3719     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
3720     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
3721     *   <ul>
3722     *     <li>
3723     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3724     *     </li>
3725     *     <li>
3726     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3727     *     </li>
3728     *     <li>
3729     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3730     *     </li>
3731     *     <li>
3732     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3733     *     </li>
3734     *   </ul>
3735     *   <li>
3736     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
3737     * Only 1 solution is returned.
3738     *   </li>
3739     * </ul>
3740     *
3741     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
3742     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
3743     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
3744     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
3745     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
3746     * @param distCoeffs Input vector of distortion coefficients
3747     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
3748     * assumed.
3749     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
3750     * the model coordinate system to the camera coordinate system.
3751     * @param tvecs Vector of output translation vectors.
3752     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
3753     * the provided rvec and tvec values as initial approximations of the rotation and translation
3754     * vectors, respectively, and further optimizes them.
3755     * @param flags Method for solving a PnP problem:
3756     * <ul>
3757     *   <li>
3758     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
3759     * this case the function finds such a pose that minimizes reprojection error, that is the sum
3760     * of squared distances between the observed projections imagePoints and the projected (using
3761     * projectPoints ) objectPoints .
3762     *   </li>
3763     *   <li>
3764     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
3765     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
3766     * In this case the function requires exactly four object and image points.
3767     *   </li>
3768     *   <li>
3769     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
3770     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
3771     * In this case the function requires exactly four object and image points.
3772     *   </li>
3773     *   <li>
3774     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
3775     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
3776     *   </li>
3777     *   <li>
3778     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3779     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
3780     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
3781     *   </li>
3782     *   <li>
3783     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
3784     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
3785     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
3786     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
3787     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
3788     * focal length.
3789     *   </li>
3790     *   <li>
3791     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
3792     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
3793     *   </li>
3794     *   <li>
3795     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
3796     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
3797     * It requires 4 coplanar object points defined in the following order:
3798     *   <ul>
3799     *     <li>
3800     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3801     *     </li>
3802     *     <li>
3803     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3804     *     </li>
3805     *     <li>
3806     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3807     *     </li>
3808     *     <li>
3809     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3810     * and useExtrinsicGuess is set to true.
3811     * and useExtrinsicGuess is set to true.
3812     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
3813     * and the 3D object points projected with the estimated pose.
3814     *     </li>
3815     *   </ul>
3816     *
3817     * The function estimates the object pose given a set of object points, their corresponding image
3818     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
3819     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
3820     * and the Z-axis forward).
3821     *   </li>
3822     * </ul>
3823     *
3824     * ![](pnp.jpg)
3825     *
3826     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
3827     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
3828     *
3829     * \(
3830     *   \begin{align*}
3831     *   \begin{bmatrix}
3832     *   u \\
3833     *   v \\
3834     *   1
3835     *   \end{bmatrix} &amp;=
3836     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
3837     *   \begin{bmatrix}
3838     *   X_{w} \\
3839     *   Y_{w} \\
3840     *   Z_{w} \\
3841     *   1
3842     *   \end{bmatrix} \\
3843     *   \begin{bmatrix}
3844     *   u \\
3845     *   v \\
3846     *   1
3847     *   \end{bmatrix} &amp;=
3848     *   \begin{bmatrix}
3849     *   f_x &amp; 0 &amp; c_x \\
3850     *   0 &amp; f_y &amp; c_y \\
3851     *   0 &amp; 0 &amp; 1
3852     *   \end{bmatrix}
3853     *   \begin{bmatrix}
3854     *   1 &amp; 0 &amp; 0 &amp; 0 \\
3855     *   0 &amp; 1 &amp; 0 &amp; 0 \\
3856     *   0 &amp; 0 &amp; 1 &amp; 0
3857     *   \end{bmatrix}
3858     *   \begin{bmatrix}
3859     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3860     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3861     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3862     *   0 &amp; 0 &amp; 0 &amp; 1
3863     *   \end{bmatrix}
3864     *   \begin{bmatrix}
3865     *   X_{w} \\
3866     *   Y_{w} \\
3867     *   Z_{w} \\
3868     *   1
3869     *   \end{bmatrix}
3870     *   \end{align*}
3871     * \)
3872     *
3873     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
3874     * a 3D point expressed in the world frame into the camera frame:
3875     *
3876     * \(
3877     *   \begin{align*}
3878     *   \begin{bmatrix}
3879     *   X_c \\
3880     *   Y_c \\
3881     *   Z_c \\
3882     *   1
3883     *   \end{bmatrix} &amp;=
3884     *   \hspace{0.2em} ^{c}\bf{T}_w
3885     *   \begin{bmatrix}
3886     *   X_{w} \\
3887     *   Y_{w} \\
3888     *   Z_{w} \\
3889     *   1
3890     *   \end{bmatrix} \\
3891     *   \begin{bmatrix}
3892     *   X_c \\
3893     *   Y_c \\
3894     *   Z_c \\
3895     *   1
3896     *   \end{bmatrix} &amp;=
3897     *   \begin{bmatrix}
3898     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
3899     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
3900     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
3901     *   0 &amp; 0 &amp; 0 &amp; 1
3902     *   \end{bmatrix}
3903     *   \begin{bmatrix}
3904     *   X_{w} \\
3905     *   Y_{w} \\
3906     *   Z_{w} \\
3907     *   1
3908     *   \end{bmatrix}
3909     *   \end{align*}
3910     * \)
3911     *
3912     * <b>Note:</b>
3913     * <ul>
3914     *   <li>
3915     *       An example of how to use solvePnP for planar augmented reality can be found at
3916     *         opencv_source_code/samples/python/plane_ar.py
3917     *   </li>
3918     *   <li>
3919     *       If you are using Python:
3920     *   <ul>
3921     *     <li>
3922     *          Numpy array slices won't work as input because solvePnP requires contiguous
3923     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3924     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3925     *     </li>
3926     *     <li>
3927     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3928     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3929     *         which requires 2-channel information.
3930     *     </li>
3931     *     <li>
3932     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3933     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3934     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3935     *     </li>
3936     *   </ul>
3937     *   <li>
3938     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3939     *        unstable and sometimes give completely wrong results. If you pass one of these two
3940     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3941     *   </li>
3942     *   <li>
3943     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3944     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3945     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3946     *   </li>
3947     *   <li>
3948     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3949     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3950     *        global solution to converge.
3951     *   </li>
3952     *   <li>
3953     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3954     *   </li>
3955     *   <li>
3956     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3957     *        Number of input points must be 4. Object points must be defined in the following order:
3958     *   <ul>
3959     *     <li>
3960     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3961     *     </li>
3962     *     <li>
3963     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3964     *     </li>
3965     *     <li>
3966     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3967     *     </li>
3968     *     <li>
3969     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3970     *     </li>
3971     *   </ul>
3972     *   </li>
3973     * </ul>
3974     * @return automatically generated
3975     */
3976    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags) {
3977        Mat rvecs_mat = new Mat();
3978        Mat tvecs_mat = new Mat();
3979        int retVal = solvePnPGeneric_3(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags);
3980        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3981        rvecs_mat.release();
3982        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3983        tvecs_mat.release();
3984        return retVal;
3985    }
3986
3987    /**
3988     * Finds an object pose from 3D-2D point correspondences.
3989     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
3990     * couple), depending on the number of input points and the chosen method:
3991     * <ul>
3992     *   <li>
3993     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
3994     *   </li>
3995     *   <li>
3996     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3997     *   </li>
3998     *   <li>
3999     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
4000     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
4001     *   <ul>
4002     *     <li>
4003     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
4004     *     </li>
4005     *     <li>
4006     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
4007     *     </li>
4008     *     <li>
4009     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
4010     *     </li>
4011     *     <li>
4012     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
4013     *     </li>
4014     *   </ul>
4015     *   <li>
4016     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
4017     * Only 1 solution is returned.
4018     *   </li>
4019     * </ul>
4020     *
4021     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
4022     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
4023     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
4024     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
4025     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
4026     * @param distCoeffs Input vector of distortion coefficients
4027     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
4028     * assumed.
4029     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
4030     * the model coordinate system to the camera coordinate system.
4031     * @param tvecs Vector of output translation vectors.
4032     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
4033     * the provided rvec and tvec values as initial approximations of the rotation and translation
4034     * vectors, respectively, and further optimizes them.
4035     * <ul>
4036     *   <li>
4037     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
4038     * this case the function finds such a pose that minimizes reprojection error, that is the sum
4039     * of squared distances between the observed projections imagePoints and the projected (using
4040     * projectPoints ) objectPoints .
4041     *   </li>
4042     *   <li>
4043     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
4044     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
4045     * In this case the function requires exactly four object and image points.
4046     *   </li>
4047     *   <li>
4048     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
4049     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
4050     * In this case the function requires exactly four object and image points.
4051     *   </li>
4052     *   <li>
4053     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
4054     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
4055     *   </li>
4056     *   <li>
4057     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
4058     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
4059     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
4060     *   </li>
4061     *   <li>
4062     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
4063     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
4064     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
4065     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
4066     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
4067     * focal length.
4068     *   </li>
4069     *   <li>
4070     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
4071     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
4072     *   </li>
4073     *   <li>
4074     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
4075     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
4076     * It requires 4 coplanar object points defined in the following order:
4077     *   <ul>
4078     *     <li>
4079     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
4080     *     </li>
4081     *     <li>
4082     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
4083     *     </li>
4084     *     <li>
4085     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
4086     *     </li>
4087     *     <li>
4088     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
4089     * and useExtrinsicGuess is set to true.
4090     * and useExtrinsicGuess is set to true.
4091     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
4092     * and the 3D object points projected with the estimated pose.
4093     *     </li>
4094     *   </ul>
4095     *
4096     * The function estimates the object pose given a set of object points, their corresponding image
4097     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
4098     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
4099     * and the Z-axis forward).
4100     *   </li>
4101     * </ul>
4102     *
4103     * ![](pnp.jpg)
4104     *
4105     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
4106     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
4107     *
4108     * \(
4109     *   \begin{align*}
4110     *   \begin{bmatrix}
4111     *   u \\
4112     *   v \\
4113     *   1
4114     *   \end{bmatrix} &amp;=
4115     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
4116     *   \begin{bmatrix}
4117     *   X_{w} \\
4118     *   Y_{w} \\
4119     *   Z_{w} \\
4120     *   1
4121     *   \end{bmatrix} \\
4122     *   \begin{bmatrix}
4123     *   u \\
4124     *   v \\
4125     *   1
4126     *   \end{bmatrix} &amp;=
4127     *   \begin{bmatrix}
4128     *   f_x &amp; 0 &amp; c_x \\
4129     *   0 &amp; f_y &amp; c_y \\
4130     *   0 &amp; 0 &amp; 1
4131     *   \end{bmatrix}
4132     *   \begin{bmatrix}
4133     *   1 &amp; 0 &amp; 0 &amp; 0 \\
4134     *   0 &amp; 1 &amp; 0 &amp; 0 \\
4135     *   0 &amp; 0 &amp; 1 &amp; 0
4136     *   \end{bmatrix}
4137     *   \begin{bmatrix}
4138     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
4139     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
4140     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
4141     *   0 &amp; 0 &amp; 0 &amp; 1
4142     *   \end{bmatrix}
4143     *   \begin{bmatrix}
4144     *   X_{w} \\
4145     *   Y_{w} \\
4146     *   Z_{w} \\
4147     *   1
4148     *   \end{bmatrix}
4149     *   \end{align*}
4150     * \)
4151     *
4152     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
4153     * a 3D point expressed in the world frame into the camera frame:
4154     *
4155     * \(
4156     *   \begin{align*}
4157     *   \begin{bmatrix}
4158     *   X_c \\
4159     *   Y_c \\
4160     *   Z_c \\
4161     *   1
4162     *   \end{bmatrix} &amp;=
4163     *   \hspace{0.2em} ^{c}\bf{T}_w
4164     *   \begin{bmatrix}
4165     *   X_{w} \\
4166     *   Y_{w} \\
4167     *   Z_{w} \\
4168     *   1
4169     *   \end{bmatrix} \\
4170     *   \begin{bmatrix}
4171     *   X_c \\
4172     *   Y_c \\
4173     *   Z_c \\
4174     *   1
4175     *   \end{bmatrix} &amp;=
4176     *   \begin{bmatrix}
4177     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
4178     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
4179     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
4180     *   0 &amp; 0 &amp; 0 &amp; 1
4181     *   \end{bmatrix}
4182     *   \begin{bmatrix}
4183     *   X_{w} \\
4184     *   Y_{w} \\
4185     *   Z_{w} \\
4186     *   1
4187     *   \end{bmatrix}
4188     *   \end{align*}
4189     * \)
4190     *
4191     * <b>Note:</b>
4192     * <ul>
4193     *   <li>
4194     *       An example of how to use solvePnP for planar augmented reality can be found at
4195     *         opencv_source_code/samples/python/plane_ar.py
4196     *   </li>
4197     *   <li>
4198     *       If you are using Python:
4199     *   <ul>
4200     *     <li>
4201     *          Numpy array slices won't work as input because solvePnP requires contiguous
4202     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
4203     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
4204     *     </li>
4205     *     <li>
4206     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
4207     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
4208     *         which requires 2-channel information.
4209     *     </li>
4210     *     <li>
4211     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
4212     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
4213     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
4214     *     </li>
4215     *   </ul>
4216     *   <li>
4217     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
4218     *        unstable and sometimes give completely wrong results. If you pass one of these two
4219     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
4220     *   </li>
4221     *   <li>
4222     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
4223     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
4224     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
4225     *   </li>
4226     *   <li>
4227     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
4228     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
4229     *        global solution to converge.
4230     *   </li>
4231     *   <li>
4232     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
4233     *   </li>
4234     *   <li>
4235     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
4236     *        Number of input points must be 4. Object points must be defined in the following order:
4237     *   <ul>
4238     *     <li>
4239     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
4240     *     </li>
4241     *     <li>
4242     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
4243     *     </li>
4244     *     <li>
4245     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
4246     *     </li>
4247     *     <li>
4248     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
4249     *     </li>
4250     *   </ul>
4251     *   </li>
4252     * </ul>
4253     * @return automatically generated
4254     */
4255    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess) {
4256        Mat rvecs_mat = new Mat();
4257        Mat tvecs_mat = new Mat();
4258        int retVal = solvePnPGeneric_4(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess);
4259        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4260        rvecs_mat.release();
4261        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4262        tvecs_mat.release();
4263        return retVal;
4264    }
4265
4266    /**
4267     * Finds an object pose from 3D-2D point correspondences.
4268     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
4269     * couple), depending on the number of input points and the chosen method:
4270     * <ul>
4271     *   <li>
4272     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
4273     *   </li>
4274     *   <li>
4275     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
4276     *   </li>
4277     *   <li>
4278     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
4279     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
4280     *   <ul>
4281     *     <li>
4282     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
4283     *     </li>
4284     *     <li>
4285     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
4286     *     </li>
4287     *     <li>
4288     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
4289     *     </li>
4290     *     <li>
4291     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
4292     *     </li>
4293     *   </ul>
4294     *   <li>
4295     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
4296     * Only 1 solution is returned.
4297     *   </li>
4298     * </ul>
4299     *
4300     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
4301     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
4302     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
4303     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
4304     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
4305     * @param distCoeffs Input vector of distortion coefficients
4306     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
4307     * assumed.
4308     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
4309     * the model coordinate system to the camera coordinate system.
4310     * @param tvecs Vector of output translation vectors.
4311     * the provided rvec and tvec values as initial approximations of the rotation and translation
4312     * vectors, respectively, and further optimizes them.
4313     * <ul>
4314     *   <li>
4315     *    REF: SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
4316     * this case the function finds such a pose that minimizes reprojection error, that is the sum
4317     * of squared distances between the observed projections imagePoints and the projected (using
4318     * projectPoints ) objectPoints .
4319     *   </li>
4320     *   <li>
4321     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
4322     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
4323     * In this case the function requires exactly four object and image points.
4324     *   </li>
4325     *   <li>
4326     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
4327     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
4328     * In this case the function requires exactly four object and image points.
4329     *   </li>
4330     *   <li>
4331     *    REF: SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
4332     * paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (CITE: lepetit2009epnp).
4333     *   </li>
4334     *   <li>
4335     *    REF: SOLVEPNP_DLS <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
4336     * Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
4337     * "A Direct Least-Squares (DLS) Method for PnP" (CITE: hesch2011direct).
4338     *   </li>
4339     *   <li>
4340     *    REF: SOLVEPNP_UPNP <b>Broken implementation. Using this flag will fallback to EPnP.</b> \n
4341     * Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
4342     * F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
4343     * Estimation" (CITE: penate2013exhaustive). In this case the function also estimates the parameters \(f_x\) and \(f_y\)
4344     * assuming that both have the same value. Then the cameraMatrix is updated with the estimated
4345     * focal length.
4346     *   </li>
4347     *   <li>
4348     *    REF: SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
4349     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method requires coplanar object points.
4350     *   </li>
4351     *   <li>
4352     *    REF: SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
4353     * "Infinitesimal Plane-Based Pose Estimation" (CITE: Collins14). This method is suitable for marker pose estimation.
4354     * It requires 4 coplanar object points defined in the following order:
4355     *   <ul>
4356     *     <li>
4357     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
4358     *     </li>
4359     *     <li>
4360     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
4361     *     </li>
4362     *     <li>
4363     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
4364     *     </li>
4365     *     <li>
4366     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
4367     * and useExtrinsicGuess is set to true.
4368     * and useExtrinsicGuess is set to true.
4369     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
4370     * and the 3D object points projected with the estimated pose.
4371     *     </li>
4372     *   </ul>
4373     *
4374     * The function estimates the object pose given a set of object points, their corresponding image
4375     * projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
4376     * (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
4377     * and the Z-axis forward).
4378     *   </li>
4379     * </ul>
4380     *
4381     * ![](pnp.jpg)
4382     *
4383     * Points expressed in the world frame \( \bf{X}_w \) are projected into the image plane \( \left[ u, v \right] \)
4384     * using the perspective projection model \( \Pi \) and the camera intrinsic parameters matrix \( \bf{A} \):
4385     *
4386     * \(
4387     *   \begin{align*}
4388     *   \begin{bmatrix}
4389     *   u \\
4390     *   v \\
4391     *   1
4392     *   \end{bmatrix} &amp;=
4393     *   \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
4394     *   \begin{bmatrix}
4395     *   X_{w} \\
4396     *   Y_{w} \\
4397     *   Z_{w} \\
4398     *   1
4399     *   \end{bmatrix} \\
4400     *   \begin{bmatrix}
4401     *   u \\
4402     *   v \\
4403     *   1
4404     *   \end{bmatrix} &amp;=
4405     *   \begin{bmatrix}
4406     *   f_x &amp; 0 &amp; c_x \\
4407     *   0 &amp; f_y &amp; c_y \\
4408     *   0 &amp; 0 &amp; 1
4409     *   \end{bmatrix}
4410     *   \begin{bmatrix}
4411     *   1 &amp; 0 &amp; 0 &amp; 0 \\
4412     *   0 &amp; 1 &amp; 0 &amp; 0 \\
4413     *   0 &amp; 0 &amp; 1 &amp; 0
4414     *   \end{bmatrix}
4415     *   \begin{bmatrix}
4416     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
4417     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
4418     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
4419     *   0 &amp; 0 &amp; 0 &amp; 1
4420     *   \end{bmatrix}
4421     *   \begin{bmatrix}
4422     *   X_{w} \\
4423     *   Y_{w} \\
4424     *   Z_{w} \\
4425     *   1
4426     *   \end{bmatrix}
4427     *   \end{align*}
4428     * \)
4429     *
4430     * The estimated pose is thus the rotation ({@code rvec}) and the translation ({@code tvec}) vectors that allow transforming
4431     * a 3D point expressed in the world frame into the camera frame:
4432     *
4433     * \(
4434     *   \begin{align*}
4435     *   \begin{bmatrix}
4436     *   X_c \\
4437     *   Y_c \\
4438     *   Z_c \\
4439     *   1
4440     *   \end{bmatrix} &amp;=
4441     *   \hspace{0.2em} ^{c}\bf{T}_w
4442     *   \begin{bmatrix}
4443     *   X_{w} \\
4444     *   Y_{w} \\
4445     *   Z_{w} \\
4446     *   1
4447     *   \end{bmatrix} \\
4448     *   \begin{bmatrix}
4449     *   X_c \\
4450     *   Y_c \\
4451     *   Z_c \\
4452     *   1
4453     *   \end{bmatrix} &amp;=
4454     *   \begin{bmatrix}
4455     *   r_{11} &amp; r_{12} &amp; r_{13} &amp; t_x \\
4456     *   r_{21} &amp; r_{22} &amp; r_{23} &amp; t_y \\
4457     *   r_{31} &amp; r_{32} &amp; r_{33} &amp; t_z \\
4458     *   0 &amp; 0 &amp; 0 &amp; 1
4459     *   \end{bmatrix}
4460     *   \begin{bmatrix}
4461     *   X_{w} \\
4462     *   Y_{w} \\
4463     *   Z_{w} \\
4464     *   1
4465     *   \end{bmatrix}
4466     *   \end{align*}
4467     * \)
4468     *
4469     * <b>Note:</b>
4470     * <ul>
4471     *   <li>
4472     *       An example of how to use solvePnP for planar augmented reality can be found at
4473     *         opencv_source_code/samples/python/plane_ar.py
4474     *   </li>
4475     *   <li>
4476     *       If you are using Python:
4477     *   <ul>
4478     *     <li>
4479     *          Numpy array slices won't work as input because solvePnP requires contiguous
4480     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
4481     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
4482     *     </li>
4483     *     <li>
4484     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
4485     *         to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
4486     *         which requires 2-channel information.
4487     *     </li>
4488     *     <li>
4489     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
4490     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
4491     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
4492     *     </li>
4493     *   </ul>
4494     *   <li>
4495     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
4496     *        unstable and sometimes give completely wrong results. If you pass one of these two
4497     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
4498     *   </li>
4499     *   <li>
4500     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
4501     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
4502     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
4503     *   </li>
4504     *   <li>
4505     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
4506     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
4507     *        global solution to converge.
4508     *   </li>
4509     *   <li>
4510     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
4511     *   </li>
4512     *   <li>
4513     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
4514     *        Number of input points must be 4. Object points must be defined in the following order:
4515     *   <ul>
4516     *     <li>
4517     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
4518     *     </li>
4519     *     <li>
4520     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
4521     *     </li>
4522     *     <li>
4523     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
4524     *     </li>
4525     *     <li>
4526     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
4527     *     </li>
4528     *   </ul>
4529     *   </li>
4530     * </ul>
4531     * @return automatically generated
4532     */
4533    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) {
4534        Mat rvecs_mat = new Mat();
4535        Mat tvecs_mat = new Mat();
4536        int retVal = solvePnPGeneric_5(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
4537        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4538        rvecs_mat.release();
4539        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4540        tvecs_mat.release();
4541        return retVal;
4542    }
4543
4544
4545    //
4546    // C++:  Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0)
4547    //
4548
4549    /**
4550     * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
4551     *
4552     * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
4553     * coordinate space. In the old interface all the per-view vectors are concatenated. See
4554     * calibrateCamera for details.
4555     * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
4556     * old interface all the per-view vectors are concatenated.
4557     * @param imageSize Image size in pixels used to initialize the principal point.
4558     * @param aspectRatio If it is zero or negative, both \(f_x\) and \(f_y\) are estimated independently.
4559     * Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) .
4560     *
4561     * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
4562     * Currently, the function only supports planar calibration patterns, which are patterns where each
4563     * object point has z-coordinate =0.
4564     * @return automatically generated
4565     */
4566    public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio) {
4567        List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
4568        Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
4569        List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
4570        Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
4571        return new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio));
4572    }
4573
4574    /**
4575     * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
4576     *
4577     * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
4578     * coordinate space. In the old interface all the per-view vectors are concatenated. See
4579     * calibrateCamera for details.
4580     * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
4581     * old interface all the per-view vectors are concatenated.
4582     * @param imageSize Image size in pixels used to initialize the principal point.
4583     * Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) .
4584     *
4585     * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
4586     * Currently, the function only supports planar calibration patterns, which are patterns where each
4587     * object point has z-coordinate =0.
4588     * @return automatically generated
4589     */
4590    public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize) {
4591        List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
4592        Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
4593        List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
4594        Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
4595        return new Mat(initCameraMatrix2D_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height));
4596    }
4597
4598
4599    //
4600    // C++:  bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
4601    //
4602
4603    /**
4604     * Finds the positions of internal corners of the chessboard.
4605     *
4606     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
4607     * @param patternSize Number of inner corners per a chessboard row and column
4608     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
4609     * @param corners Output array of detected corners.
4610     * @param flags Various operation flags that can be zero or a combination of the following values:
4611     * <ul>
4612     *   <li>
4613     *    REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
4614     * and white, rather than a fixed threshold level (computed from the average image brightness).
4615     *   </li>
4616     *   <li>
4617     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
4618     * applying fixed or adaptive thresholding.
4619     *   </li>
4620     *   <li>
4621     *    REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
4622     * square-like shape) to filter out false quads extracted at the contour retrieval stage.
4623     *   </li>
4624     *   <li>
4625     *    REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
4626     * and shortcut the call if none is found. This can drastically speed up the call in the
4627     * degenerate condition when no chessboard is observed.
4628     *   </li>
4629     * </ul>
4630     *
4631     * The function attempts to determine whether the input image is a view of the chessboard pattern and
4632     * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
4633     * are found and they are placed in a certain order (row by row, left to right in every row).
4634     * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
4635     * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
4636     * squares touch each other. The detected coordinates are approximate, and to determine their positions
4637     * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
4638     * different parameters if returned coordinates are not accurate enough.
4639     *
4640     * Sample usage of detecting and drawing chessboard corners: :
4641     * <code>
4642     *     Size patternsize(8,6); //interior number of corners
4643     *     Mat gray = ....; //source image
4644     *     vector&lt;Point2f&gt; corners; //this will be filled by the detected corners
4645     *
4646     *     //CALIB_CB_FAST_CHECK saves a lot of time on images
4647     *     //that do not contain any chessboard corners
4648     *     bool patternfound = findChessboardCorners(gray, patternsize, corners,
4649     *             CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
4650     *             + CALIB_CB_FAST_CHECK);
4651     *
4652     *     if(patternfound)
4653     *       cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
4654     *         TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
4655     *
4656     *     drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
4657     * </code>
4658     * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
4659     * the board to make the detection more robust in various environments. Otherwise, if there is no
4660     * border and the background is dark, the outer black squares cannot be segmented properly and so the
4661     * square grouping and ordering algorithm fails.
4662     * @return automatically generated
4663     */
4664    public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags) {
4665        Mat corners_mat = corners;
4666        return findChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, flags);
4667    }
4668
4669    /**
4670     * Finds the positions of internal corners of the chessboard.
4671     *
4672     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
4673     * @param patternSize Number of inner corners per a chessboard row and column
4674     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
4675     * @param corners Output array of detected corners.
4676     * <ul>
4677     *   <li>
4678     *    REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
4679     * and white, rather than a fixed threshold level (computed from the average image brightness).
4680     *   </li>
4681     *   <li>
4682     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
4683     * applying fixed or adaptive thresholding.
4684     *   </li>
4685     *   <li>
4686     *    REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
4687     * square-like shape) to filter out false quads extracted at the contour retrieval stage.
4688     *   </li>
4689     *   <li>
4690     *    REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
4691     * and shortcut the call if none is found. This can drastically speed up the call in the
4692     * degenerate condition when no chessboard is observed.
4693     *   </li>
4694     * </ul>
4695     *
4696     * The function attempts to determine whether the input image is a view of the chessboard pattern and
4697     * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
4698     * are found and they are placed in a certain order (row by row, left to right in every row).
4699     * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
4700     * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
4701     * squares touch each other. The detected coordinates are approximate, and to determine their positions
4702     * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
4703     * different parameters if returned coordinates are not accurate enough.
4704     *
4705     * Sample usage of detecting and drawing chessboard corners: :
4706     * <code>
4707     *     Size patternsize(8,6); //interior number of corners
4708     *     Mat gray = ....; //source image
4709     *     vector&lt;Point2f&gt; corners; //this will be filled by the detected corners
4710     *
4711     *     //CALIB_CB_FAST_CHECK saves a lot of time on images
4712     *     //that do not contain any chessboard corners
4713     *     bool patternfound = findChessboardCorners(gray, patternsize, corners,
4714     *             CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
4715     *             + CALIB_CB_FAST_CHECK);
4716     *
4717     *     if(patternfound)
4718     *       cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
4719     *         TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
4720     *
4721     *     drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
4722     * </code>
4723     * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
4724     * the board to make the detection more robust in various environments. Otherwise, if there is no
4725     * border and the background is dark, the outer black squares cannot be segmented properly and so the
4726     * square grouping and ordering algorithm fails.
4727     * @return automatically generated
4728     */
4729    public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners) {
4730        Mat corners_mat = corners;
4731        return findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj);
4732    }
4733
4734
4735    //
4736    // C++:  bool cv::checkChessboard(Mat img, Size size)
4737    //
4738
4739    public static boolean checkChessboard(Mat img, Size size) {
4740        return checkChessboard_0(img.nativeObj, size.width, size.height);
4741    }
4742
4743
4744    //
4745    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
4746    //
4747
4748    /**
4749     * Finds the positions of internal corners of the chessboard using a sector based approach.
4750     *
4751     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
4752     * @param patternSize Number of inner corners per a chessboard row and column
4753     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
4754     * @param corners Output array of detected corners.
4755     * @param flags Various operation flags that can be zero or a combination of the following values:
4756     * <ul>
4757     *   <li>
4758     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
4759     *   </li>
4760     *   <li>
4761     *    REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
4762     *   </li>
4763     *   <li>
4764     *    REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
4765     *   </li>
4766     *   <li>
4767     *    REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
4768     *   </li>
4769     *   <li>
4770     *    REF: CALIB_CB_MARKER The detected pattern must have a marker (see description).
4771     * This should be used if an accurate camera calibration is required.
4772     * @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
4773     * Each entry stands for one corner of the pattern and can have one of the following values:
4774     *   </li>
4775     *   <li>
4776     *    0 = no meta data attached
4777     *   </li>
4778     *   <li>
4779     *    1 = left-top corner of a black cell
4780     *   </li>
4781     *   <li>
4782     *    2 = left-top corner of a white cell
4783     *   </li>
4784     *   <li>
4785     *    3 = left-top corner of a black cell with a white marker dot
4786     *   </li>
4787     *   <li>
4788     *    4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
4789     *   </li>
4790     * </ul>
4791     *
4792     * The function is analog to findchessboardCorners but uses a localized radon
4793     * transformation approximated by box filters being more robust to all sort of
4794     * noise, faster on larger images and is able to directly return the sub-pixel
4795     * position of the internal chessboard corners. The Method is based on the paper
4796     * CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for
4797     * Calibration" demonstrating that the returned sub-pixel positions are more
4798     * accurate than the one returned by cornerSubPix allowing a precise camera
4799     * calibration for demanding applications.
4800     *
4801     * In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given,
4802     * the result can be recovered from the optional meta array. Both flags are
4803     * helpful to use calibration patterns exceeding the field of view of the camera.
4804     * These oversized patterns allow more accurate calibrations as corners can be
4805     * utilized, which are as close as possible to the image borders.  For a
4806     * consistent coordinate system across all images, the optional marker (see image
4807     * below) can be used to move the origin of the board to the location where the
4808     * black circle is located.
4809     *
4810     * <b>Note:</b> The function requires a white boarder with roughly the same width as one
4811     * of the checkerboard fields around the whole board to improve the detection in
4812     * various environments. In addition, because of the localized radon
4813     * transformation it is beneficial to use round corners for the field corners
4814     * which are located on the outside of the board. The following figure illustrates
4815     * a sample checkerboard optimized for the detection. However, any other checkerboard
4816     * can be used as well.
4817     * ![Checkerboard](pics/checkerboard_radon.png)
4818     * @return automatically generated
4819     */
4820    public static boolean findChessboardCornersSBWithMeta(Mat image, Size patternSize, Mat corners, int flags, Mat meta) {
4821        return findChessboardCornersSBWithMeta_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags, meta.nativeObj);
4822    }
4823
4824
4825    //
4826    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
4827    //
4828
4829    public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags) {
4830        return findChessboardCornersSB_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags);
4831    }
4832
4833    public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners) {
4834        return findChessboardCornersSB_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj);
4835    }
4836
4837
4838    //
4839    // C++:  Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
4840    //
4841
4842    /**
4843     * Estimates the sharpness of a detected chessboard.
4844     *
4845     * Image sharpness, as well as brightness, are a critical parameter for accuracte
4846     * camera calibration. For accessing these parameters for filtering out
4847     * problematic calibraiton images, this method calculates edge profiles by traveling from
4848     * black to white chessboard cell centers. Based on this, the number of pixels is
4849     * calculated required to transit from black to white. This width of the
4850     * transition area is a good indication of how sharp the chessboard is imaged
4851     * and should be below ~3.0 pixels.
4852     *
4853     * @param image Gray image used to find chessboard corners
4854     * @param patternSize Size of a found chessboard pattern
4855     * @param corners Corners found by findChessboardCorners(SB)
4856     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
4857     * @param vertical By default edge responses for horizontal lines are calculated
4858     * @param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
4859     *
4860     * The optional sharpness array is of type CV_32FC1 and has for each calculated
4861     * profile one row with the following five entries:
4862     * 0 = x coordinate of the underlying edge in the image
4863     * 1 = y coordinate of the underlying edge in the image
4864     * 2 = width of the transition area (sharpness)
4865     * 3 = signal strength in the black cell (min brightness)
4866     * 4 = signal strength in the white cell (max brightness)
4867     *
4868     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
4869     */
4870    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical, Mat sharpness) {
4871        return new Scalar(estimateChessboardSharpness_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical, sharpness.nativeObj));
4872    }
4873
4874    /**
4875     * Estimates the sharpness of a detected chessboard.
4876     *
4877     * Image sharpness, as well as brightness, are a critical parameter for accuracte
4878     * camera calibration. For accessing these parameters for filtering out
4879     * problematic calibraiton images, this method calculates edge profiles by traveling from
4880     * black to white chessboard cell centers. Based on this, the number of pixels is
4881     * calculated required to transit from black to white. This width of the
4882     * transition area is a good indication of how sharp the chessboard is imaged
4883     * and should be below ~3.0 pixels.
4884     *
4885     * @param image Gray image used to find chessboard corners
4886     * @param patternSize Size of a found chessboard pattern
4887     * @param corners Corners found by findChessboardCorners(SB)
4888     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
4889     * @param vertical By default edge responses for horizontal lines are calculated
4890     *
4891     * The optional sharpness array is of type CV_32FC1 and has for each calculated
4892     * profile one row with the following five entries:
4893     * 0 = x coordinate of the underlying edge in the image
4894     * 1 = y coordinate of the underlying edge in the image
4895     * 2 = width of the transition area (sharpness)
4896     * 3 = signal strength in the black cell (min brightness)
4897     * 4 = signal strength in the white cell (max brightness)
4898     *
4899     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
4900     */
4901    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical) {
4902        return new Scalar(estimateChessboardSharpness_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical));
4903    }
4904
4905    /**
4906     * Estimates the sharpness of a detected chessboard.
4907     *
4908     * Image sharpness, as well as brightness, are a critical parameter for accuracte
4909     * camera calibration. For accessing these parameters for filtering out
4910     * problematic calibraiton images, this method calculates edge profiles by traveling from
4911     * black to white chessboard cell centers. Based on this, the number of pixels is
4912     * calculated required to transit from black to white. This width of the
4913     * transition area is a good indication of how sharp the chessboard is imaged
4914     * and should be below ~3.0 pixels.
4915     *
4916     * @param image Gray image used to find chessboard corners
4917     * @param patternSize Size of a found chessboard pattern
4918     * @param corners Corners found by findChessboardCorners(SB)
4919     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
4920     *
4921     * The optional sharpness array is of type CV_32FC1 and has for each calculated
4922     * profile one row with the following five entries:
4923     * 0 = x coordinate of the underlying edge in the image
4924     * 1 = y coordinate of the underlying edge in the image
4925     * 2 = width of the transition area (sharpness)
4926     * 3 = signal strength in the black cell (min brightness)
4927     * 4 = signal strength in the white cell (max brightness)
4928     *
4929     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
4930     */
4931    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance) {
4932        return new Scalar(estimateChessboardSharpness_2(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance));
4933    }
4934
4935    /**
4936     * Estimates the sharpness of a detected chessboard.
4937     *
4938     * Image sharpness, as well as brightness, are a critical parameter for accuracte
4939     * camera calibration. For accessing these parameters for filtering out
4940     * problematic calibraiton images, this method calculates edge profiles by traveling from
4941     * black to white chessboard cell centers. Based on this, the number of pixels is
4942     * calculated required to transit from black to white. This width of the
4943     * transition area is a good indication of how sharp the chessboard is imaged
4944     * and should be below ~3.0 pixels.
4945     *
4946     * @param image Gray image used to find chessboard corners
4947     * @param patternSize Size of a found chessboard pattern
4948     * @param corners Corners found by findChessboardCorners(SB)
4949     *
4950     * The optional sharpness array is of type CV_32FC1 and has for each calculated
4951     * profile one row with the following five entries:
4952     * 0 = x coordinate of the underlying edge in the image
4953     * 1 = y coordinate of the underlying edge in the image
4954     * 2 = width of the transition area (sharpness)
4955     * 3 = signal strength in the black cell (min brightness)
4956     * 4 = signal strength in the white cell (max brightness)
4957     *
4958     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
4959     */
4960    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners) {
4961        return new Scalar(estimateChessboardSharpness_3(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj));
4962    }
4963
4964
4965    //
4966    // C++:  bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
4967    //
4968
4969    public static boolean find4QuadCornerSubpix(Mat img, Mat corners, Size region_size) {
4970        return find4QuadCornerSubpix_0(img.nativeObj, corners.nativeObj, region_size.width, region_size.height);
4971    }
4972
4973
4974    //
4975    // C++:  void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound)
4976    //
4977
4978    /**
4979     * Renders the detected chessboard corners.
4980     *
4981     * @param image Destination image. It must be an 8-bit color image.
4982     * @param patternSize Number of inner corners per a chessboard row and column
4983     * (patternSize = cv::Size(points_per_row,points_per_column)).
4984     * @param corners Array of detected corners, the output of findChessboardCorners.
4985     * @param patternWasFound Parameter indicating whether the complete board was found or not. The
4986     * return value of findChessboardCorners should be passed here.
4987     *
4988     * The function draws individual chessboard corners detected either as red circles if the board was not
4989     * found, or as colored corners connected with lines if the board was found.
4990     */
4991    public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound) {
4992        Mat corners_mat = corners;
4993        drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound);
4994    }
4995
4996
4997    //
4998    // C++:  void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
4999    //
5000
5001    /**
5002     * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP
5003     *
5004     * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
5005     * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
5006     * \(\cameramatrix{A}\)
5007     * @param distCoeffs Input vector of distortion coefficients
5008     * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.
5009     * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
5010     * the model coordinate system to the camera coordinate system.
5011     * @param tvec Translation vector.
5012     * @param length Length of the painted axes in the same unit than tvec (usually in meters).
5013     * @param thickness Line thickness of the painted axes.
5014     *
5015     * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
5016     * OX is drawn in red, OY in green and OZ in blue.
5017     */
5018    public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness) {
5019        drawFrameAxes_0(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length, thickness);
5020    }
5021
5022    /**
5023     * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP
5024     *
5025     * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
5026     * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
5027     * \(\cameramatrix{A}\)
5028     * @param distCoeffs Input vector of distortion coefficients
5029     * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.
5030     * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
5031     * the model coordinate system to the camera coordinate system.
5032     * @param tvec Translation vector.
5033     * @param length Length of the painted axes in the same unit than tvec (usually in meters).
5034     *
5035     * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
5036     * OX is drawn in red, OY in green and OZ in blue.
5037     */
5038    public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length) {
5039        drawFrameAxes_1(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length);
5040    }
5041
5042
5043    //
5044    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags, Ptr_FeatureDetector blobDetector, CirclesGridFinderParameters parameters)
5045    //
5046
5047    // Unknown type 'Ptr_FeatureDetector' (I), skipping the function
5048
5049
5050    //
5051    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create())
5052    //
5053
5054    public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers, int flags) {
5055        return findCirclesGrid_0(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj, flags);
5056    }
5057
5058    public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers) {
5059        return findCirclesGrid_2(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj);
5060    }
5061
5062
5063    //
5064    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
5065    //
5066
5067    /**
5068     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
5069     * pattern.
5070     *
5071     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
5072     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
5073     * vector contains as many elements as the number of pattern views. If the same calibration pattern
5074     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
5075     * possible to use partially occluded patterns or even different patterns in different views. Then,
5076     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
5077     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
5078     * In the old interface all the vectors of object points from different views are concatenated
5079     * together.
5080     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
5081     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
5082     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
5083     * respectively. In the old interface all the vectors of object points from different views are
5084     * concatenated together.
5085     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
5086     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
5087     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
5088     * and/or REF: CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
5089     * initialized before calling the function.
5090     * @param distCoeffs Input/output vector of distortion coefficients
5091     * \(\distcoeffs\).
5092     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
5093     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
5094     * i-th translation vector (see the next output parameter description) brings the calibration pattern
5095     * from the object coordinate space (in which object points are specified) to the camera coordinate
5096     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
5097     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
5098     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
5099     * space.
5100     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
5101     * describtion above.
5102     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
5103     * parameters. Order of deviations values:
5104     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
5105     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
5106     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
5107     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
5108     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
5109     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5110     * @param flags Different flags that may be zero or a combination of the following values:
5111     * <ul>
5112     *   <li>
5113     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
5114     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
5115     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
5116     * Note, that if intrinsic parameters are known, there is no need to use this function just to
5117     * estimate extrinsic parameters. Use solvePnP instead.
5118     *   </li>
5119     *   <li>
5120     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
5121     * optimization. It stays at the center or at a different location specified when
5122     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
5123     *   </li>
5124     *   <li>
5125     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
5126     * ratio fx/fy stays the same as in the input cameraMatrix . When
5127     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
5128     * ignored, only their ratio is computed and used further.
5129     *   </li>
5130     *   <li>
5131     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
5132     * to zeros and stay zero.
5133     *   </li>
5134     *   <li>
5135     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
5136     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
5137     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5138     *   </li>
5139     *   <li>
5140     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
5141     * backward compatibility, this extra flag should be explicitly specified to make the
5142     * calibration function use the rational model and return 8 coefficients. If the flag is not
5143     * set, the function computes and returns only 5 distortion coefficients.
5144     *   </li>
5145     *   <li>
5146     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
5147     * backward compatibility, this extra flag should be explicitly specified to make the
5148     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
5149     * set, the function computes and returns only 5 distortion coefficients.
5150     *   </li>
5151     *   <li>
5152     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
5153     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5154     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5155     *   </li>
5156     *   <li>
5157     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
5158     * backward compatibility, this extra flag should be explicitly specified to make the
5159     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
5160     * set, the function computes and returns only 5 distortion coefficients.
5161     *   </li>
5162     *   <li>
5163     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
5164     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5165     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5166     * @param criteria Termination criteria for the iterative optimization algorithm.
5167     *   </li>
5168     * </ul>
5169     *
5170     * @return the overall RMS re-projection error.
5171     *
5172     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5173     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
5174     * points and their corresponding 2D projections in each view must be specified. That may be achieved
5175     * by using an object with known geometry and easily detectable feature points. Such an object is
5176     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
5177     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
5178     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
5179     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
5180     * be used as long as initial cameraMatrix is provided.
5181     *
5182     * The algorithm performs the following steps:
5183     *
5184     * <ul>
5185     *   <li>
5186     *    Compute the initial intrinsic parameters (the option only available for planar calibration
5187     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
5188     *     zeros initially unless some of CALIB_FIX_K? are specified.
5189     *   </li>
5190     * </ul>
5191     *
5192     * <ul>
5193     *   <li>
5194     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
5195     *     done using solvePnP .
5196     *   </li>
5197     * </ul>
5198     *
5199     * <ul>
5200     *   <li>
5201     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
5202     *     that is, the total sum of squared distances between the observed feature points imagePoints and
5203     *     the projected (using the current estimates for camera parameters and the poses) object points
5204     *     objectPoints. See projectPoints for details.
5205     *   </li>
5206     * </ul>
5207     *
5208     * <b>Note:</b>
5209     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
5210     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
5211     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
5212     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
5213     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
5214     *
5215     * SEE:
5216     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
5217     *    undistort
5218     */
5219    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags, TermCriteria criteria) {
5220        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5221        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5222        Mat rvecs_mat = new Mat();
5223        Mat tvecs_mat = new Mat();
5224        double retVal = calibrateCameraExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5225        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5226        rvecs_mat.release();
5227        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5228        tvecs_mat.release();
5229        return retVal;
5230    }
5231
5232    /**
5233     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
5234     * pattern.
5235     *
5236     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
5237     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
5238     * vector contains as many elements as the number of pattern views. If the same calibration pattern
5239     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
5240     * possible to use partially occluded patterns or even different patterns in different views. Then,
5241     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
5242     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
5243     * In the old interface all the vectors of object points from different views are concatenated
5244     * together.
5245     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
5246     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
5247     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
5248     * respectively. In the old interface all the vectors of object points from different views are
5249     * concatenated together.
5250     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
5251     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
5252     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
5253     * and/or REF: CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
5254     * initialized before calling the function.
5255     * @param distCoeffs Input/output vector of distortion coefficients
5256     * \(\distcoeffs\).
5257     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
5258     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
5259     * i-th translation vector (see the next output parameter description) brings the calibration pattern
5260     * from the object coordinate space (in which object points are specified) to the camera coordinate
5261     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
5262     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
5263     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
5264     * space.
5265     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
5266     * describtion above.
5267     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
5268     * parameters. Order of deviations values:
5269     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
5270     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
5271     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
5272     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
5273     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
5274     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5275     * @param flags Different flags that may be zero or a combination of the following values:
5276     * <ul>
5277     *   <li>
5278     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
5279     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
5280     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
5281     * Note, that if intrinsic parameters are known, there is no need to use this function just to
5282     * estimate extrinsic parameters. Use solvePnP instead.
5283     *   </li>
5284     *   <li>
5285     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
5286     * optimization. It stays at the center or at a different location specified when
5287     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
5288     *   </li>
5289     *   <li>
5290     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
5291     * ratio fx/fy stays the same as in the input cameraMatrix . When
5292     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
5293     * ignored, only their ratio is computed and used further.
5294     *   </li>
5295     *   <li>
5296     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
5297     * to zeros and stay zero.
5298     *   </li>
5299     *   <li>
5300     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
5301     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
5302     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5303     *   </li>
5304     *   <li>
5305     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
5306     * backward compatibility, this extra flag should be explicitly specified to make the
5307     * calibration function use the rational model and return 8 coefficients. If the flag is not
5308     * set, the function computes and returns only 5 distortion coefficients.
5309     *   </li>
5310     *   <li>
5311     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
5312     * backward compatibility, this extra flag should be explicitly specified to make the
5313     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
5314     * set, the function computes and returns only 5 distortion coefficients.
5315     *   </li>
5316     *   <li>
5317     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
5318     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5319     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5320     *   </li>
5321     *   <li>
5322     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
5323     * backward compatibility, this extra flag should be explicitly specified to make the
5324     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
5325     * set, the function computes and returns only 5 distortion coefficients.
5326     *   </li>
5327     *   <li>
5328     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
5329     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5330     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5331     *   </li>
5332     * </ul>
5333     *
5334     * @return the overall RMS re-projection error.
5335     *
5336     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5337     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
5338     * points and their corresponding 2D projections in each view must be specified. That may be achieved
5339     * by using an object with known geometry and easily detectable feature points. Such an object is
5340     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
5341     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
5342     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
5343     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
5344     * be used as long as initial cameraMatrix is provided.
5345     *
5346     * The algorithm performs the following steps:
5347     *
5348     * <ul>
5349     *   <li>
5350     *    Compute the initial intrinsic parameters (the option only available for planar calibration
5351     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
5352     *     zeros initially unless some of CALIB_FIX_K? are specified.
5353     *   </li>
5354     * </ul>
5355     *
5356     * <ul>
5357     *   <li>
5358     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
5359     *     done using solvePnP .
5360     *   </li>
5361     * </ul>
5362     *
5363     * <ul>
5364     *   <li>
5365     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
5366     *     that is, the total sum of squared distances between the observed feature points imagePoints and
5367     *     the projected (using the current estimates for camera parameters and the poses) object points
5368     *     objectPoints. See projectPoints for details.
5369     *   </li>
5370     * </ul>
5371     *
5372     * <b>Note:</b>
5373     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
5374     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
5375     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
5376     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
5377     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
5378     *
5379     * SEE:
5380     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
5381     *    undistort
5382     */
5383    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags) {
5384        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5385        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5386        Mat rvecs_mat = new Mat();
5387        Mat tvecs_mat = new Mat();
5388        double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags);
5389        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5390        rvecs_mat.release();
5391        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5392        tvecs_mat.release();
5393        return retVal;
5394    }
5395
5396    /**
5397     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
5398     * pattern.
5399     *
5400     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
5401     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
5402     * vector contains as many elements as the number of pattern views. If the same calibration pattern
5403     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
5404     * possible to use partially occluded patterns or even different patterns in different views. Then,
5405     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
5406     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
5407     * In the old interface all the vectors of object points from different views are concatenated
5408     * together.
5409     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
5410     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
5411     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
5412     * respectively. In the old interface all the vectors of object points from different views are
5413     * concatenated together.
5414     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
5415     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
5416     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
5417     * and/or REF: CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
5418     * initialized before calling the function.
5419     * @param distCoeffs Input/output vector of distortion coefficients
5420     * \(\distcoeffs\).
5421     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
5422     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
5423     * i-th translation vector (see the next output parameter description) brings the calibration pattern
5424     * from the object coordinate space (in which object points are specified) to the camera coordinate
5425     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
5426     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
5427     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
5428     * space.
5429     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
5430     * describtion above.
5431     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
5432     * parameters. Order of deviations values:
5433     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
5434     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
5435     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
5436     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
5437     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
5438     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5439     * <ul>
5440     *   <li>
5441     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
5442     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
5443     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
5444     * Note, that if intrinsic parameters are known, there is no need to use this function just to
5445     * estimate extrinsic parameters. Use solvePnP instead.
5446     *   </li>
5447     *   <li>
5448     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
5449     * optimization. It stays at the center or at a different location specified when
5450     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
5451     *   </li>
5452     *   <li>
5453     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
5454     * ratio fx/fy stays the same as in the input cameraMatrix . When
5455     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
5456     * ignored, only their ratio is computed and used further.
5457     *   </li>
5458     *   <li>
5459     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
5460     * to zeros and stay zero.
5461     *   </li>
5462     *   <li>
5463     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
5464     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
5465     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5466     *   </li>
5467     *   <li>
5468     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
5469     * backward compatibility, this extra flag should be explicitly specified to make the
5470     * calibration function use the rational model and return 8 coefficients. If the flag is not
5471     * set, the function computes and returns only 5 distortion coefficients.
5472     *   </li>
5473     *   <li>
5474     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
5475     * backward compatibility, this extra flag should be explicitly specified to make the
5476     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
5477     * set, the function computes and returns only 5 distortion coefficients.
5478     *   </li>
5479     *   <li>
5480     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
5481     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5482     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5483     *   </li>
5484     *   <li>
5485     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
5486     * backward compatibility, this extra flag should be explicitly specified to make the
5487     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
5488     * set, the function computes and returns only 5 distortion coefficients.
5489     *   </li>
5490     *   <li>
5491     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
5492     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5493     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5494     *   </li>
5495     * </ul>
5496     *
5497     * @return the overall RMS re-projection error.
5498     *
5499     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5500     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
5501     * points and their corresponding 2D projections in each view must be specified. That may be achieved
5502     * by using an object with known geometry and easily detectable feature points. Such an object is
5503     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
5504     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
5505     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
5506     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
5507     * be used as long as initial cameraMatrix is provided.
5508     *
5509     * The algorithm performs the following steps:
5510     *
5511     * <ul>
5512     *   <li>
5513     *    Compute the initial intrinsic parameters (the option only available for planar calibration
5514     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
5515     *     zeros initially unless some of CALIB_FIX_K? are specified.
5516     *   </li>
5517     * </ul>
5518     *
5519     * <ul>
5520     *   <li>
5521     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
5522     *     done using solvePnP .
5523     *   </li>
5524     * </ul>
5525     *
5526     * <ul>
5527     *   <li>
5528     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
5529     *     that is, the total sum of squared distances between the observed feature points imagePoints and
5530     *     the projected (using the current estimates for camera parameters and the poses) object points
5531     *     objectPoints. See projectPoints for details.
5532     *   </li>
5533     * </ul>
5534     *
5535     * <b>Note:</b>
5536     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
5537     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
5538     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
5539     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
5540     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
5541     *
5542     * SEE:
5543     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
5544     *    undistort
5545     */
5546    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors) {
5547        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5548        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5549        Mat rvecs_mat = new Mat();
5550        Mat tvecs_mat = new Mat();
5551        double retVal = calibrateCameraExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj);
5552        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5553        rvecs_mat.release();
5554        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5555        tvecs_mat.release();
5556        return retVal;
5557    }
5558
5559
5560    //
5561    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
5562    //
5563
5564    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) {
5565        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5566        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5567        Mat rvecs_mat = new Mat();
5568        Mat tvecs_mat = new Mat();
5569        double retVal = calibrateCamera_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5570        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5571        rvecs_mat.release();
5572        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5573        tvecs_mat.release();
5574        return retVal;
5575    }
5576
5577    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) {
5578        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5579        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5580        Mat rvecs_mat = new Mat();
5581        Mat tvecs_mat = new Mat();
5582        double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
5583        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5584        rvecs_mat.release();
5585        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5586        tvecs_mat.release();
5587        return retVal;
5588    }
5589
5590    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) {
5591        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5592        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5593        Mat rvecs_mat = new Mat();
5594        Mat tvecs_mat = new Mat();
5595        double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
5596        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5597        rvecs_mat.release();
5598        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5599        tvecs_mat.release();
5600        return retVal;
5601    }
5602
5603
5604    //
5605    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
5606    //
5607
5608    /**
5609     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
5610     *
5611     * This function is an extension of calibrateCamera() with the method of releasing object which was
5612     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
5613     * targets (calibration plates), this method can dramatically improve the precision of the estimated
5614     * camera parameters. Both the object-releasing method and standard method are supported by this
5615     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
5616     * calibrateCamera() is a wrapper for this function.
5617     *
5618     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
5619     * coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
5620     * the identical calibration board must be used in each view and it must be fully visible, and all
5621     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
5622     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
5623     * shifted for grabbing images.</b>
5624     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
5625     * calibrateCamera() for details.
5626     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
5627     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
5628     * a switch for calibration method selection. If object-releasing method to be used, pass in the
5629     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
5630     * make standard calibration method selected. Usually the top-right corner point of the calibration
5631     * board grid is recommended to be fixed when object-releasing method being utilized. According to
5632     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
5633     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
5634     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
5635     * @param cameraMatrix Output 3x3 floating-point camera matrix. See calibrateCamera() for details.
5636     * @param distCoeffs Output vector of distortion coefficients. See calibrateCamera() for details.
5637     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
5638     * for details.
5639     * @param tvecs Output vector of translation vectors estimated for each pattern view.
5640     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
5641     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
5642     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
5643     * is ignored with standard calibration method.
5644     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
5645     * See calibrateCamera() for details.
5646     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
5647     * See calibrateCamera() for details.
5648     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
5649     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
5650     * parameter is ignored with standard calibration method.
5651     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5652     * @param flags Different flags that may be zero or a combination of some predefined values. See
5653     * calibrateCamera() for details. If the method of releasing object is used, the calibration time may
5654     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
5655     * less precise and less stable in some rare cases.
5656     * @param criteria Termination criteria for the iterative optimization algorithm.
5657     *
5658     * @return the overall RMS re-projection error.
5659     *
5660     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5661     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
5662     * calibrateCamera() for other detailed explanations.
5663     * SEE:
5664     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
5665     */
5666    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags, TermCriteria criteria) {
5667        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5668        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5669        Mat rvecs_mat = new Mat();
5670        Mat tvecs_mat = new Mat();
5671        double retVal = calibrateCameraROExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5672        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5673        rvecs_mat.release();
5674        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5675        tvecs_mat.release();
5676        return retVal;
5677    }
5678
5679    /**
5680     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
5681     *
5682     * This function is an extension of calibrateCamera() with the method of releasing object which was
5683     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
5684     * targets (calibration plates), this method can dramatically improve the precision of the estimated
5685     * camera parameters. Both the object-releasing method and standard method are supported by this
5686     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
5687     * calibrateCamera() is a wrapper for this function.
5688     *
5689     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
5690     * coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
5691     * the identical calibration board must be used in each view and it must be fully visible, and all
5692     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
5693     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
5694     * shifted for grabbing images.</b>
5695     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
5696     * calibrateCamera() for details.
5697     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
5698     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
5699     * a switch for calibration method selection. If object-releasing method to be used, pass in the
5700     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
5701     * make standard calibration method selected. Usually the top-right corner point of the calibration
5702     * board grid is recommended to be fixed when object-releasing method being utilized. According to
5703     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
5704     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
5705     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
5706     * @param cameraMatrix Output 3x3 floating-point camera matrix. See calibrateCamera() for details.
5707     * @param distCoeffs Output vector of distortion coefficients. See calibrateCamera() for details.
5708     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
5709     * for details.
5710     * @param tvecs Output vector of translation vectors estimated for each pattern view.
5711     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
5712     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
5713     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
5714     * is ignored with standard calibration method.
5715     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
5716     * See calibrateCamera() for details.
5717     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
5718     * See calibrateCamera() for details.
5719     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
5720     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
5721     * parameter is ignored with standard calibration method.
5722     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5723     * @param flags Different flags that may be zero or a combination of some predefined values. See
5724     * calibrateCamera() for details. If the method of releasing object is used, the calibration time may
5725     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
5726     * less precise and less stable in some rare cases.
5727     *
5728     * @return the overall RMS re-projection error.
5729     *
5730     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5731     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
5732     * calibrateCamera() for other detailed explanations.
5733     * SEE:
5734     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
5735     */
5736    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags) {
5737        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5738        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5739        Mat rvecs_mat = new Mat();
5740        Mat tvecs_mat = new Mat();
5741        double retVal = calibrateCameraROExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags);
5742        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5743        rvecs_mat.release();
5744        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5745        tvecs_mat.release();
5746        return retVal;
5747    }
5748
5749    /**
5750     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
5751     *
5752     * This function is an extension of calibrateCamera() with the method of releasing object which was
5753     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
5754     * targets (calibration plates), this method can dramatically improve the precision of the estimated
5755     * camera parameters. Both the object-releasing method and standard method are supported by this
5756     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
5757     * calibrateCamera() is a wrapper for this function.
5758     *
5759     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
5760     * coordinate space. See calibrateCamera() for details. If the method of releasing object to be used,
5761     * the identical calibration board must be used in each view and it must be fully visible, and all
5762     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
5763     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
5764     * shifted for grabbing images.</b>
5765     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
5766     * calibrateCamera() for details.
5767     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
5768     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
5769     * a switch for calibration method selection. If object-releasing method to be used, pass in the
5770     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
5771     * make standard calibration method selected. Usually the top-right corner point of the calibration
5772     * board grid is recommended to be fixed when object-releasing method being utilized. According to
5773     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
5774     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
5775     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
5776     * @param cameraMatrix Output 3x3 floating-point camera matrix. See calibrateCamera() for details.
5777     * @param distCoeffs Output vector of distortion coefficients. See calibrateCamera() for details.
5778     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See calibrateCamera()
5779     * for details.
5780     * @param tvecs Output vector of translation vectors estimated for each pattern view.
5781     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
5782     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
5783     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
5784     * is ignored with standard calibration method.
5785     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
5786     * See calibrateCamera() for details.
5787     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
5788     * See calibrateCamera() for details.
5789     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
5790     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
5791     * parameter is ignored with standard calibration method.
5792     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5793     * calibrateCamera() for details. If the method of releasing object is used, the calibration time may
5794     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
5795     * less precise and less stable in some rare cases.
5796     *
5797     * @return the overall RMS re-projection error.
5798     *
5799     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
5800     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
5801     * calibrateCamera() for other detailed explanations.
5802     * SEE:
5803     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
5804     */
5805    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors) {
5806        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5807        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5808        Mat rvecs_mat = new Mat();
5809        Mat tvecs_mat = new Mat();
5810        double retVal = calibrateCameraROExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj);
5811        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5812        rvecs_mat.release();
5813        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5814        tvecs_mat.release();
5815        return retVal;
5816    }
5817
5818
5819    //
5820    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
5821    //
5822
5823    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags, TermCriteria criteria) {
5824        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5825        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5826        Mat rvecs_mat = new Mat();
5827        Mat tvecs_mat = new Mat();
5828        double retVal = calibrateCameraRO_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5829        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5830        rvecs_mat.release();
5831        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5832        tvecs_mat.release();
5833        return retVal;
5834    }
5835
5836    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags) {
5837        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5838        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5839        Mat rvecs_mat = new Mat();
5840        Mat tvecs_mat = new Mat();
5841        double retVal = calibrateCameraRO_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags);
5842        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5843        rvecs_mat.release();
5844        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5845        tvecs_mat.release();
5846        return retVal;
5847    }
5848
5849    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints) {
5850        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5851        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
5852        Mat rvecs_mat = new Mat();
5853        Mat tvecs_mat = new Mat();
5854        double retVal = calibrateCameraRO_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj);
5855        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5856        rvecs_mat.release();
5857        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5858        tvecs_mat.release();
5859        return retVal;
5860    }
5861
5862
5863    //
5864    // C++:  void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
5865    //
5866
5867    /**
5868     * Computes useful camera characteristics from the camera intrinsic matrix.
5869     *
5870     * @param cameraMatrix Input camera intrinsic matrix that can be estimated by calibrateCamera or
5871     * stereoCalibrate .
5872     * @param imageSize Input image size in pixels.
5873     * @param apertureWidth Physical width in mm of the sensor.
5874     * @param apertureHeight Physical height in mm of the sensor.
5875     * @param fovx Output field of view in degrees along the horizontal sensor axis.
5876     * @param fovy Output field of view in degrees along the vertical sensor axis.
5877     * @param focalLength Focal length of the lens in mm.
5878     * @param principalPoint Principal point in mm.
5879     * @param aspectRatio \(f_y/f_x\)
5880     *
5881     * The function computes various useful camera characteristics from the previously estimated camera
5882     * matrix.
5883     *
5884     * <b>Note:</b>
5885     *    Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
5886     *     the chessboard pitch (it can thus be any value).
5887     */
5888    public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio) {
5889        double[] fovx_out = new double[1];
5890        double[] fovy_out = new double[1];
5891        double[] focalLength_out = new double[1];
5892        double[] principalPoint_out = new double[2];
5893        double[] aspectRatio_out = new double[1];
5894        calibrationMatrixValues_0(cameraMatrix.nativeObj, imageSize.width, imageSize.height, apertureWidth, apertureHeight, fovx_out, fovy_out, focalLength_out, principalPoint_out, aspectRatio_out);
5895        if(fovx!=null) fovx[0] = (double)fovx_out[0];
5896        if(fovy!=null) fovy[0] = (double)fovy_out[0];
5897        if(focalLength!=null) focalLength[0] = (double)focalLength_out[0];
5898        if(principalPoint!=null){ principalPoint.x = principalPoint_out[0]; principalPoint.y = principalPoint_out[1]; } 
5899        if(aspectRatio!=null) aspectRatio[0] = (double)aspectRatio_out[0];
5900    }
5901
5902
5903    //
5904    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
5905    //
5906
5907    /**
5908     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
5909     * for each of the two cameras and the extrinsic parameters between the two cameras.
5910     *
5911     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
5912     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
5913     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
5914     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
5915     * be equal for each i.
5916     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
5917     * observed by the first camera. The same structure as in REF: calibrateCamera.
5918     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
5919     * observed by the second camera. The same structure as in REF: calibrateCamera.
5920     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
5921     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
5922     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
5923     * REF: calibrateCamera.
5924     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
5925     * cameraMatrix1.
5926     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
5927     * description for distCoeffs1.
5928     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
5929     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
5930     * points given in the first camera's coordinate system to points in the second camera's
5931     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
5932     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
5933     * duality, this tuple is equivalent to the position of the first camera with respect to the
5934     * second camera coordinate system.
5935     * @param T Output translation vector, see description above.
5936     * @param E Output essential matrix.
5937     * @param F Output fundamental matrix.
5938     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5939     * @param flags Different flags that may be zero or a combination of the following values:
5940     * <ul>
5941     *   <li>
5942     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
5943     * matrices are estimated.
5944     *   </li>
5945     *   <li>
5946     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
5947     * according to the specified flags. Initial values are provided by the user.
5948     *   </li>
5949     *   <li>
5950     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
5951     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
5952     *   </li>
5953     *   <li>
5954     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
5955     *   </li>
5956     *   <li>
5957     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
5958     *   </li>
5959     *   <li>
5960     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
5961     * .
5962     *   </li>
5963     *   <li>
5964     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
5965     *   </li>
5966     *   <li>
5967     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
5968     * zeros and fix there.
5969     *   </li>
5970     *   <li>
5971     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
5972     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
5973     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5974     *   </li>
5975     *   <li>
5976     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
5977     * compatibility, this extra flag should be explicitly specified to make the calibration
5978     * function use the rational model and return 8 coefficients. If the flag is not set, the
5979     * function computes and returns only 5 distortion coefficients.
5980     *   </li>
5981     *   <li>
5982     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
5983     * backward compatibility, this extra flag should be explicitly specified to make the
5984     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
5985     * set, the function computes and returns only 5 distortion coefficients.
5986     *   </li>
5987     *   <li>
5988     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
5989     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5990     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5991     *   </li>
5992     *   <li>
5993     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
5994     * backward compatibility, this extra flag should be explicitly specified to make the
5995     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
5996     * set, the function computes and returns only 5 distortion coefficients.
5997     *   </li>
5998     *   <li>
5999     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
6000     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
6001     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6002     * @param criteria Termination criteria for the iterative optimization algorithm.
6003     *   </li>
6004     * </ul>
6005     *
6006     * The function estimates the transformation between two cameras making a stereo pair. If one computes
6007     * the poses of an object relative to the first camera and to the second camera,
6008     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
6009     * relative position and orientation between the two cameras are fixed, then those poses definitely
6010     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
6011     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
6012     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
6013     *
6014     * \(R_2=R R_1\)
6015     * \(T_2=R T_1 + T.\)
6016     *
6017     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
6018     * coordinate system when given the point's coordinate representation in the first camera's coordinate
6019     * system:
6020     *
6021     * \(\begin{bmatrix}
6022     * X_2 \\
6023     * Y_2 \\
6024     * Z_2 \\
6025     * 1
6026     * \end{bmatrix} = \begin{bmatrix}
6027     * R &amp; T \\
6028     * 0 &amp; 1
6029     * \end{bmatrix} \begin{bmatrix}
6030     * X_1 \\
6031     * Y_1 \\
6032     * Z_1 \\
6033     * 1
6034     * \end{bmatrix}.\)
6035     *
6036     *
6037     * Optionally, it computes the essential matrix E:
6038     *
6039     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
6040     *
6041     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
6042     * And the function can also compute the fundamental matrix F:
6043     *
6044     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
6045     *
6046     * Besides the stereo-related information, the function can also perform a full calibration of each of
6047     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
6048     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
6049     * estimated with high accuracy for each of the cameras individually (for example, using
6050     * calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
6051     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
6052     * estimated at once, it makes sense to restrict some parameters, for example, pass
6053     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
6054     * reasonable assumption.
6055     *
6056     * Similarly to calibrateCamera, the function minimizes the total re-projection error for all the
6057     * points in all the available views from both cameras. The function returns the final value of the
6058     * re-projection error.
6059     * @return automatically generated
6060     */
6061    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags, TermCriteria criteria) {
6062        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6063        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6064        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6065        return stereoCalibrateExtended_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
6066    }
6067
6068    /**
6069     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
6070     * for each of the two cameras and the extrinsic parameters between the two cameras.
6071     *
6072     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
6073     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
6074     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
6075     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
6076     * be equal for each i.
6077     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
6078     * observed by the first camera. The same structure as in REF: calibrateCamera.
6079     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
6080     * observed by the second camera. The same structure as in REF: calibrateCamera.
6081     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
6082     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
6083     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
6084     * REF: calibrateCamera.
6085     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
6086     * cameraMatrix1.
6087     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
6088     * description for distCoeffs1.
6089     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
6090     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
6091     * points given in the first camera's coordinate system to points in the second camera's
6092     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
6093     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
6094     * duality, this tuple is equivalent to the position of the first camera with respect to the
6095     * second camera coordinate system.
6096     * @param T Output translation vector, see description above.
6097     * @param E Output essential matrix.
6098     * @param F Output fundamental matrix.
6099     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
6100     * @param flags Different flags that may be zero or a combination of the following values:
6101     * <ul>
6102     *   <li>
6103     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
6104     * matrices are estimated.
6105     *   </li>
6106     *   <li>
6107     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
6108     * according to the specified flags. Initial values are provided by the user.
6109     *   </li>
6110     *   <li>
6111     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
6112     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
6113     *   </li>
6114     *   <li>
6115     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
6116     *   </li>
6117     *   <li>
6118     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
6119     *   </li>
6120     *   <li>
6121     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
6122     * .
6123     *   </li>
6124     *   <li>
6125     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
6126     *   </li>
6127     *   <li>
6128     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
6129     * zeros and fix there.
6130     *   </li>
6131     *   <li>
6132     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
6133     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
6134     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6135     *   </li>
6136     *   <li>
6137     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
6138     * compatibility, this extra flag should be explicitly specified to make the calibration
6139     * function use the rational model and return 8 coefficients. If the flag is not set, the
6140     * function computes and returns only 5 distortion coefficients.
6141     *   </li>
6142     *   <li>
6143     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
6144     * backward compatibility, this extra flag should be explicitly specified to make the
6145     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
6146     * set, the function computes and returns only 5 distortion coefficients.
6147     *   </li>
6148     *   <li>
6149     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
6150     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
6151     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6152     *   </li>
6153     *   <li>
6154     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
6155     * backward compatibility, this extra flag should be explicitly specified to make the
6156     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
6157     * set, the function computes and returns only 5 distortion coefficients.
6158     *   </li>
6159     *   <li>
6160     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
6161     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
6162     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6163     *   </li>
6164     * </ul>
6165     *
6166     * The function estimates the transformation between two cameras making a stereo pair. If one computes
6167     * the poses of an object relative to the first camera and to the second camera,
6168     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
6169     * relative position and orientation between the two cameras are fixed, then those poses definitely
6170     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
6171     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
6172     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
6173     *
6174     * \(R_2=R R_1\)
6175     * \(T_2=R T_1 + T.\)
6176     *
6177     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
6178     * coordinate system when given the point's coordinate representation in the first camera's coordinate
6179     * system:
6180     *
6181     * \(\begin{bmatrix}
6182     * X_2 \\
6183     * Y_2 \\
6184     * Z_2 \\
6185     * 1
6186     * \end{bmatrix} = \begin{bmatrix}
6187     * R &amp; T \\
6188     * 0 &amp; 1
6189     * \end{bmatrix} \begin{bmatrix}
6190     * X_1 \\
6191     * Y_1 \\
6192     * Z_1 \\
6193     * 1
6194     * \end{bmatrix}.\)
6195     *
6196     *
6197     * Optionally, it computes the essential matrix E:
6198     *
6199     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
6200     *
6201     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
6202     * And the function can also compute the fundamental matrix F:
6203     *
6204     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
6205     *
6206     * Besides the stereo-related information, the function can also perform a full calibration of each of
6207     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
6208     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
6209     * estimated with high accuracy for each of the cameras individually (for example, using
6210     * calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
6211     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
6212     * estimated at once, it makes sense to restrict some parameters, for example, pass
6213     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
6214     * reasonable assumption.
6215     *
6216     * Similarly to calibrateCamera, the function minimizes the total re-projection error for all the
6217     * points in all the available views from both cameras. The function returns the final value of the
6218     * re-projection error.
6219     * @return automatically generated
6220     */
6221    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags) {
6222        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6223        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6224        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6225        return stereoCalibrateExtended_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags);
6226    }
6227
6228    /**
6229     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
6230     * for each of the two cameras and the extrinsic parameters between the two cameras.
6231     *
6232     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
6233     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
6234     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
6235     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
6236     * be equal for each i.
6237     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
6238     * observed by the first camera. The same structure as in REF: calibrateCamera.
6239     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
6240     * observed by the second camera. The same structure as in REF: calibrateCamera.
6241     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
6242     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
6243     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
6244     * REF: calibrateCamera.
6245     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
6246     * cameraMatrix1.
6247     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
6248     * description for distCoeffs1.
6249     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
6250     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
6251     * points given in the first camera's coordinate system to points in the second camera's
6252     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
6253     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
6254     * duality, this tuple is equivalent to the position of the first camera with respect to the
6255     * second camera coordinate system.
6256     * @param T Output translation vector, see description above.
6257     * @param E Output essential matrix.
6258     * @param F Output fundamental matrix.
6259     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
6260     * <ul>
6261     *   <li>
6262     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
6263     * matrices are estimated.
6264     *   </li>
6265     *   <li>
6266     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
6267     * according to the specified flags. Initial values are provided by the user.
6268     *   </li>
6269     *   <li>
6270     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
6271     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
6272     *   </li>
6273     *   <li>
6274     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
6275     *   </li>
6276     *   <li>
6277     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
6278     *   </li>
6279     *   <li>
6280     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
6281     * .
6282     *   </li>
6283     *   <li>
6284     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
6285     *   </li>
6286     *   <li>
6287     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
6288     * zeros and fix there.
6289     *   </li>
6290     *   <li>
6291     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
6292     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
6293     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6294     *   </li>
6295     *   <li>
6296     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
6297     * compatibility, this extra flag should be explicitly specified to make the calibration
6298     * function use the rational model and return 8 coefficients. If the flag is not set, the
6299     * function computes and returns only 5 distortion coefficients.
6300     *   </li>
6301     *   <li>
6302     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
6303     * backward compatibility, this extra flag should be explicitly specified to make the
6304     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
6305     * set, the function computes and returns only 5 distortion coefficients.
6306     *   </li>
6307     *   <li>
6308     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
6309     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
6310     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6311     *   </li>
6312     *   <li>
6313     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
6314     * backward compatibility, this extra flag should be explicitly specified to make the
6315     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
6316     * set, the function computes and returns only 5 distortion coefficients.
6317     *   </li>
6318     *   <li>
6319     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
6320     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
6321     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
6322     *   </li>
6323     * </ul>
6324     *
6325     * The function estimates the transformation between two cameras making a stereo pair. If one computes
6326     * the poses of an object relative to the first camera and to the second camera,
6327     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
6328     * relative position and orientation between the two cameras are fixed, then those poses definitely
6329     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
6330     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
6331     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
6332     *
6333     * \(R_2=R R_1\)
6334     * \(T_2=R T_1 + T.\)
6335     *
6336     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
6337     * coordinate system when given the point's coordinate representation in the first camera's coordinate
6338     * system:
6339     *
6340     * \(\begin{bmatrix}
6341     * X_2 \\
6342     * Y_2 \\
6343     * Z_2 \\
6344     * 1
6345     * \end{bmatrix} = \begin{bmatrix}
6346     * R &amp; T \\
6347     * 0 &amp; 1
6348     * \end{bmatrix} \begin{bmatrix}
6349     * X_1 \\
6350     * Y_1 \\
6351     * Z_1 \\
6352     * 1
6353     * \end{bmatrix}.\)
6354     *
6355     *
6356     * Optionally, it computes the essential matrix E:
6357     *
6358     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
6359     *
6360     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
6361     * And the function can also compute the fundamental matrix F:
6362     *
6363     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
6364     *
6365     * Besides the stereo-related information, the function can also perform a full calibration of each of
6366     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
6367     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
6368     * estimated with high accuracy for each of the cameras individually (for example, using
6369     * calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
6370     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
6371     * estimated at once, it makes sense to restrict some parameters, for example, pass
6372     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
6373     * reasonable assumption.
6374     *
6375     * Similarly to calibrateCamera, the function minimizes the total re-projection error for all the
6376     * points in all the available views from both cameras. The function returns the final value of the
6377     * re-projection error.
6378     * @return automatically generated
6379     */
6380    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors) {
6381        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6382        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6383        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6384        return stereoCalibrateExtended_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj);
6385    }
6386
6387
6388    //
6389    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
6390    //
6391
6392    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria) {
6393        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6394        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6395        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6396        return stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
6397    }
6398
6399    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags) {
6400        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6401        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6402        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6403        return stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags);
6404    }
6405
6406    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F) {
6407        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
6408        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
6409        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
6410        return stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj);
6411    }
6412
6413
6414    //
6415    // C++:  void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
6416    //
6417
6418    /**
6419     * Computes rectification transforms for each head of a calibrated stereo camera.
6420     *
6421     * @param cameraMatrix1 First camera intrinsic matrix.
6422     * @param distCoeffs1 First camera distortion parameters.
6423     * @param cameraMatrix2 Second camera intrinsic matrix.
6424     * @param distCoeffs2 Second camera distortion parameters.
6425     * @param imageSize Size of the image used for stereo calibration.
6426     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
6427     * see REF: stereoCalibrate.
6428     * @param T Translation vector from the coordinate system of the first camera to the second camera,
6429     * see REF: stereoCalibrate.
6430     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
6431     * brings points given in the unrectified first camera's coordinate system to points in the rectified
6432     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
6433     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
6434     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
6435     * brings points given in the unrectified second camera's coordinate system to points in the rectified
6436     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
6437     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
6438     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
6439     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6440     * rectified first camera's image.
6441     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
6442     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6443     * rectified second camera's image.
6444     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
6445     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
6446     * the function makes the principal points of each camera have the same pixel coordinates in the
6447     * rectified views. And if the flag is not set, the function may still shift the images in the
6448     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
6449     * useful image area.
6450     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
6451     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
6452     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
6453     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
6454     * pixels from the original images from the cameras are retained in the rectified images (no source
6455     * image pixels are lost). Any intermediate value yields an intermediate result between
6456     * those two extreme cases.
6457     * @param newImageSize New image resolution after rectification. The same size should be passed to
6458     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
6459     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
6460     * preserve details in the original image, especially when there is a big radial distortion.
6461     * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
6462     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6463     * (see the picture below).
6464     * @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
6465     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6466     * (see the picture below).
6467     *
6468     * The function computes the rotation matrices for each camera that (virtually) make both camera image
6469     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
6470     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
6471     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
6472     * coordinates. The function distinguishes the following two cases:
6473     *
6474     * <ul>
6475     *   <li>
6476     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
6477     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
6478     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
6479     *     y-coordinate. P1 and P2 look like:
6480     *   </li>
6481     * </ul>
6482     *
6483     *     \(\texttt{P1} = \begin{bmatrix}
6484     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
6485     *                         0 &amp; f &amp; cy &amp; 0 \\
6486     *                         0 &amp; 0 &amp; 1 &amp; 0
6487     *                      \end{bmatrix}\)
6488     *
6489     *     \(\texttt{P2} = \begin{bmatrix}
6490     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
6491     *                         0 &amp; f &amp; cy &amp; 0 \\
6492     *                         0 &amp; 0 &amp; 1 &amp; 0
6493     *                      \end{bmatrix} ,\)
6494     *
6495     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
6496     *     REF: CALIB_ZERO_DISPARITY is set.
6497     *
6498     * <ul>
6499     *   <li>
6500     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
6501     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
6502     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
6503     *   </li>
6504     * </ul>
6505     *
6506     *     \(\texttt{P1} = \begin{bmatrix}
6507     *                         f &amp; 0 &amp; cx &amp; 0 \\
6508     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
6509     *                         0 &amp; 0 &amp; 1 &amp; 0
6510     *                      \end{bmatrix}\)
6511     *
6512     *     \(\texttt{P2} = \begin{bmatrix}
6513     *                         f &amp; 0 &amp; cx &amp; 0 \\
6514     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
6515     *                         0 &amp; 0 &amp; 1 &amp; 0
6516     *                      \end{bmatrix},\)
6517     *
6518     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
6519     *     REF: CALIB_ZERO_DISPARITY is set.
6520     *
6521     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
6522     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
6523     * initialize the rectification map for each camera.
6524     *
6525     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
6526     * the corresponding image regions. This means that the images are well rectified, which is what most
6527     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
6528     * their interiors are all valid pixels.
6529     *
6530     * ![image](pics/stereo_undistort.jpg)
6531     */
6532    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2) {
6533        double[] validPixROI1_out = new double[4];
6534        double[] validPixROI2_out = new double[4];
6535        stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out);
6536        if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 
6537        if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } 
6538    }
6539
6540    /**
6541     * Computes rectification transforms for each head of a calibrated stereo camera.
6542     *
6543     * @param cameraMatrix1 First camera intrinsic matrix.
6544     * @param distCoeffs1 First camera distortion parameters.
6545     * @param cameraMatrix2 Second camera intrinsic matrix.
6546     * @param distCoeffs2 Second camera distortion parameters.
6547     * @param imageSize Size of the image used for stereo calibration.
6548     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
6549     * see REF: stereoCalibrate.
6550     * @param T Translation vector from the coordinate system of the first camera to the second camera,
6551     * see REF: stereoCalibrate.
6552     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
6553     * brings points given in the unrectified first camera's coordinate system to points in the rectified
6554     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
6555     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
6556     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
6557     * brings points given in the unrectified second camera's coordinate system to points in the rectified
6558     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
6559     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
6560     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
6561     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6562     * rectified first camera's image.
6563     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
6564     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6565     * rectified second camera's image.
6566     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
6567     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
6568     * the function makes the principal points of each camera have the same pixel coordinates in the
6569     * rectified views. And if the flag is not set, the function may still shift the images in the
6570     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
6571     * useful image area.
6572     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
6573     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
6574     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
6575     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
6576     * pixels from the original images from the cameras are retained in the rectified images (no source
6577     * image pixels are lost). Any intermediate value yields an intermediate result between
6578     * those two extreme cases.
6579     * @param newImageSize New image resolution after rectification. The same size should be passed to
6580     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
6581     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
6582     * preserve details in the original image, especially when there is a big radial distortion.
6583     * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
6584     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6585     * (see the picture below).
6586     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6587     * (see the picture below).
6588     *
6589     * The function computes the rotation matrices for each camera that (virtually) make both camera image
6590     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
6591     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
6592     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
6593     * coordinates. The function distinguishes the following two cases:
6594     *
6595     * <ul>
6596     *   <li>
6597     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
6598     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
6599     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
6600     *     y-coordinate. P1 and P2 look like:
6601     *   </li>
6602     * </ul>
6603     *
6604     *     \(\texttt{P1} = \begin{bmatrix}
6605     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
6606     *                         0 &amp; f &amp; cy &amp; 0 \\
6607     *                         0 &amp; 0 &amp; 1 &amp; 0
6608     *                      \end{bmatrix}\)
6609     *
6610     *     \(\texttt{P2} = \begin{bmatrix}
6611     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
6612     *                         0 &amp; f &amp; cy &amp; 0 \\
6613     *                         0 &amp; 0 &amp; 1 &amp; 0
6614     *                      \end{bmatrix} ,\)
6615     *
6616     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
6617     *     REF: CALIB_ZERO_DISPARITY is set.
6618     *
6619     * <ul>
6620     *   <li>
6621     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
6622     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
6623     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
6624     *   </li>
6625     * </ul>
6626     *
6627     *     \(\texttt{P1} = \begin{bmatrix}
6628     *                         f &amp; 0 &amp; cx &amp; 0 \\
6629     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
6630     *                         0 &amp; 0 &amp; 1 &amp; 0
6631     *                      \end{bmatrix}\)
6632     *
6633     *     \(\texttt{P2} = \begin{bmatrix}
6634     *                         f &amp; 0 &amp; cx &amp; 0 \\
6635     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
6636     *                         0 &amp; 0 &amp; 1 &amp; 0
6637     *                      \end{bmatrix},\)
6638     *
6639     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
6640     *     REF: CALIB_ZERO_DISPARITY is set.
6641     *
6642     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
6643     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
6644     * initialize the rectification map for each camera.
6645     *
6646     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
6647     * the corresponding image regions. This means that the images are well rectified, which is what most
6648     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
6649     * their interiors are all valid pixels.
6650     *
6651     * ![image](pics/stereo_undistort.jpg)
6652     */
6653    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1) {
6654        double[] validPixROI1_out = new double[4];
6655        stereoRectify_1(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out);
6656        if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 
6657    }
6658
6659    /**
6660     * Computes rectification transforms for each head of a calibrated stereo camera.
6661     *
6662     * @param cameraMatrix1 First camera intrinsic matrix.
6663     * @param distCoeffs1 First camera distortion parameters.
6664     * @param cameraMatrix2 Second camera intrinsic matrix.
6665     * @param distCoeffs2 Second camera distortion parameters.
6666     * @param imageSize Size of the image used for stereo calibration.
6667     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
6668     * see REF: stereoCalibrate.
6669     * @param T Translation vector from the coordinate system of the first camera to the second camera,
6670     * see REF: stereoCalibrate.
6671     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
6672     * brings points given in the unrectified first camera's coordinate system to points in the rectified
6673     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
6674     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
6675     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
6676     * brings points given in the unrectified second camera's coordinate system to points in the rectified
6677     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
6678     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
6679     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
6680     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6681     * rectified first camera's image.
6682     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
6683     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6684     * rectified second camera's image.
6685     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
6686     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
6687     * the function makes the principal points of each camera have the same pixel coordinates in the
6688     * rectified views. And if the flag is not set, the function may still shift the images in the
6689     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
6690     * useful image area.
6691     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
6692     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
6693     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
6694     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
6695     * pixels from the original images from the cameras are retained in the rectified images (no source
6696     * image pixels are lost). Any intermediate value yields an intermediate result between
6697     * those two extreme cases.
6698     * @param newImageSize New image resolution after rectification. The same size should be passed to
6699     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
6700     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
6701     * preserve details in the original image, especially when there is a big radial distortion.
6702     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6703     * (see the picture below).
6704     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6705     * (see the picture below).
6706     *
6707     * The function computes the rotation matrices for each camera that (virtually) make both camera image
6708     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
6709     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
6710     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
6711     * coordinates. The function distinguishes the following two cases:
6712     *
6713     * <ul>
6714     *   <li>
6715     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
6716     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
6717     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
6718     *     y-coordinate. P1 and P2 look like:
6719     *   </li>
6720     * </ul>
6721     *
6722     *     \(\texttt{P1} = \begin{bmatrix}
6723     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
6724     *                         0 &amp; f &amp; cy &amp; 0 \\
6725     *                         0 &amp; 0 &amp; 1 &amp; 0
6726     *                      \end{bmatrix}\)
6727     *
6728     *     \(\texttt{P2} = \begin{bmatrix}
6729     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
6730     *                         0 &amp; f &amp; cy &amp; 0 \\
6731     *                         0 &amp; 0 &amp; 1 &amp; 0
6732     *                      \end{bmatrix} ,\)
6733     *
6734     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
6735     *     REF: CALIB_ZERO_DISPARITY is set.
6736     *
6737     * <ul>
6738     *   <li>
6739     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
6740     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
6741     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
6742     *   </li>
6743     * </ul>
6744     *
6745     *     \(\texttt{P1} = \begin{bmatrix}
6746     *                         f &amp; 0 &amp; cx &amp; 0 \\
6747     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
6748     *                         0 &amp; 0 &amp; 1 &amp; 0
6749     *                      \end{bmatrix}\)
6750     *
6751     *     \(\texttt{P2} = \begin{bmatrix}
6752     *                         f &amp; 0 &amp; cx &amp; 0 \\
6753     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
6754     *                         0 &amp; 0 &amp; 1 &amp; 0
6755     *                      \end{bmatrix},\)
6756     *
6757     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
6758     *     REF: CALIB_ZERO_DISPARITY is set.
6759     *
6760     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
6761     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
6762     * initialize the rectification map for each camera.
6763     *
6764     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
6765     * the corresponding image regions. This means that the images are well rectified, which is what most
6766     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
6767     * their interiors are all valid pixels.
6768     *
6769     * ![image](pics/stereo_undistort.jpg)
6770     */
6771    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize) {
6772        stereoRectify_2(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height);
6773    }
6774
6775    /**
6776     * Computes rectification transforms for each head of a calibrated stereo camera.
6777     *
6778     * @param cameraMatrix1 First camera intrinsic matrix.
6779     * @param distCoeffs1 First camera distortion parameters.
6780     * @param cameraMatrix2 Second camera intrinsic matrix.
6781     * @param distCoeffs2 Second camera distortion parameters.
6782     * @param imageSize Size of the image used for stereo calibration.
6783     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
6784     * see REF: stereoCalibrate.
6785     * @param T Translation vector from the coordinate system of the first camera to the second camera,
6786     * see REF: stereoCalibrate.
6787     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
6788     * brings points given in the unrectified first camera's coordinate system to points in the rectified
6789     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
6790     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
6791     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
6792     * brings points given in the unrectified second camera's coordinate system to points in the rectified
6793     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
6794     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
6795     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
6796     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6797     * rectified first camera's image.
6798     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
6799     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6800     * rectified second camera's image.
6801     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
6802     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
6803     * the function makes the principal points of each camera have the same pixel coordinates in the
6804     * rectified views. And if the flag is not set, the function may still shift the images in the
6805     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
6806     * useful image area.
6807     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
6808     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
6809     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
6810     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
6811     * pixels from the original images from the cameras are retained in the rectified images (no source
6812     * image pixels are lost). Any intermediate value yields an intermediate result between
6813     * those two extreme cases.
6814     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
6815     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
6816     * preserve details in the original image, especially when there is a big radial distortion.
6817     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6818     * (see the picture below).
6819     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6820     * (see the picture below).
6821     *
6822     * The function computes the rotation matrices for each camera that (virtually) make both camera image
6823     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
6824     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
6825     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
6826     * coordinates. The function distinguishes the following two cases:
6827     *
6828     * <ul>
6829     *   <li>
6830     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
6831     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
6832     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
6833     *     y-coordinate. P1 and P2 look like:
6834     *   </li>
6835     * </ul>
6836     *
6837     *     \(\texttt{P1} = \begin{bmatrix}
6838     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
6839     *                         0 &amp; f &amp; cy &amp; 0 \\
6840     *                         0 &amp; 0 &amp; 1 &amp; 0
6841     *                      \end{bmatrix}\)
6842     *
6843     *     \(\texttt{P2} = \begin{bmatrix}
6844     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
6845     *                         0 &amp; f &amp; cy &amp; 0 \\
6846     *                         0 &amp; 0 &amp; 1 &amp; 0
6847     *                      \end{bmatrix} ,\)
6848     *
6849     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
6850     *     REF: CALIB_ZERO_DISPARITY is set.
6851     *
6852     * <ul>
6853     *   <li>
6854     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
6855     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
6856     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
6857     *   </li>
6858     * </ul>
6859     *
6860     *     \(\texttt{P1} = \begin{bmatrix}
6861     *                         f &amp; 0 &amp; cx &amp; 0 \\
6862     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
6863     *                         0 &amp; 0 &amp; 1 &amp; 0
6864     *                      \end{bmatrix}\)
6865     *
6866     *     \(\texttt{P2} = \begin{bmatrix}
6867     *                         f &amp; 0 &amp; cx &amp; 0 \\
6868     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
6869     *                         0 &amp; 0 &amp; 1 &amp; 0
6870     *                      \end{bmatrix},\)
6871     *
6872     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
6873     *     REF: CALIB_ZERO_DISPARITY is set.
6874     *
6875     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
6876     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
6877     * initialize the rectification map for each camera.
6878     *
6879     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
6880     * the corresponding image regions. This means that the images are well rectified, which is what most
6881     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
6882     * their interiors are all valid pixels.
6883     *
6884     * ![image](pics/stereo_undistort.jpg)
6885     */
6886    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha) {
6887        stereoRectify_3(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha);
6888    }
6889
6890    /**
6891     * Computes rectification transforms for each head of a calibrated stereo camera.
6892     *
6893     * @param cameraMatrix1 First camera intrinsic matrix.
6894     * @param distCoeffs1 First camera distortion parameters.
6895     * @param cameraMatrix2 Second camera intrinsic matrix.
6896     * @param distCoeffs2 Second camera distortion parameters.
6897     * @param imageSize Size of the image used for stereo calibration.
6898     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
6899     * see REF: stereoCalibrate.
6900     * @param T Translation vector from the coordinate system of the first camera to the second camera,
6901     * see REF: stereoCalibrate.
6902     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
6903     * brings points given in the unrectified first camera's coordinate system to points in the rectified
6904     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
6905     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
6906     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
6907     * brings points given in the unrectified second camera's coordinate system to points in the rectified
6908     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
6909     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
6910     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
6911     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6912     * rectified first camera's image.
6913     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
6914     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
6915     * rectified second camera's image.
6916     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
6917     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
6918     * the function makes the principal points of each camera have the same pixel coordinates in the
6919     * rectified views. And if the flag is not set, the function may still shift the images in the
6920     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
6921     * useful image area.
6922     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
6923     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
6924     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
6925     * pixels from the original images from the cameras are retained in the rectified images (no source
6926     * image pixels are lost). Any intermediate value yields an intermediate result between
6927     * those two extreme cases.
6928     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
6929     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
6930     * preserve details in the original image, especially when there is a big radial distortion.
6931     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6932     * (see the picture below).
6933     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
6934     * (see the picture below).
6935     *
6936     * The function computes the rotation matrices for each camera that (virtually) make both camera image
6937     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
6938     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
6939     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
6940     * coordinates. The function distinguishes the following two cases:
6941     *
6942     * <ul>
6943     *   <li>
6944     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
6945     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
6946     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
6947     *     y-coordinate. P1 and P2 look like:
6948     *   </li>
6949     * </ul>
6950     *
6951     *     \(\texttt{P1} = \begin{bmatrix}
6952     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
6953     *                         0 &amp; f &amp; cy &amp; 0 \\
6954     *                         0 &amp; 0 &amp; 1 &amp; 0
6955     *                      \end{bmatrix}\)
6956     *
6957     *     \(\texttt{P2} = \begin{bmatrix}
6958     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
6959     *                         0 &amp; f &amp; cy &amp; 0 \\
6960     *                         0 &amp; 0 &amp; 1 &amp; 0
6961     *                      \end{bmatrix} ,\)
6962     *
6963     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
6964     *     REF: CALIB_ZERO_DISPARITY is set.
6965     *
6966     * <ul>
6967     *   <li>
6968     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
6969     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
6970     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
6971     *   </li>
6972     * </ul>
6973     *
6974     *     \(\texttt{P1} = \begin{bmatrix}
6975     *                         f &amp; 0 &amp; cx &amp; 0 \\
6976     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
6977     *                         0 &amp; 0 &amp; 1 &amp; 0
6978     *                      \end{bmatrix}\)
6979     *
6980     *     \(\texttt{P2} = \begin{bmatrix}
6981     *                         f &amp; 0 &amp; cx &amp; 0 \\
6982     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
6983     *                         0 &amp; 0 &amp; 1 &amp; 0
6984     *                      \end{bmatrix},\)
6985     *
6986     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
6987     *     REF: CALIB_ZERO_DISPARITY is set.
6988     *
6989     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
6990     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
6991     * initialize the rectification map for each camera.
6992     *
6993     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
6994     * the corresponding image regions. This means that the images are well rectified, which is what most
6995     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
6996     * their interiors are all valid pixels.
6997     *
6998     * ![image](pics/stereo_undistort.jpg)
6999     */
7000    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) {
7001        stereoRectify_4(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags);
7002    }
7003
7004    /**
7005     * Computes rectification transforms for each head of a calibrated stereo camera.
7006     *
7007     * @param cameraMatrix1 First camera intrinsic matrix.
7008     * @param distCoeffs1 First camera distortion parameters.
7009     * @param cameraMatrix2 Second camera intrinsic matrix.
7010     * @param distCoeffs2 Second camera distortion parameters.
7011     * @param imageSize Size of the image used for stereo calibration.
7012     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
7013     * see REF: stereoCalibrate.
7014     * @param T Translation vector from the coordinate system of the first camera to the second camera,
7015     * see REF: stereoCalibrate.
7016     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
7017     * brings points given in the unrectified first camera's coordinate system to points in the rectified
7018     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
7019     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
7020     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
7021     * brings points given in the unrectified second camera's coordinate system to points in the rectified
7022     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
7023     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
7024     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
7025     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
7026     * rectified first camera's image.
7027     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
7028     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
7029     * rectified second camera's image.
7030     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
7031     * the function makes the principal points of each camera have the same pixel coordinates in the
7032     * rectified views. And if the flag is not set, the function may still shift the images in the
7033     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
7034     * useful image area.
7035     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
7036     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
7037     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
7038     * pixels from the original images from the cameras are retained in the rectified images (no source
7039     * image pixels are lost). Any intermediate value yields an intermediate result between
7040     * those two extreme cases.
7041     * initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
7042     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
7043     * preserve details in the original image, especially when there is a big radial distortion.
7044     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
7045     * (see the picture below).
7046     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
7047     * (see the picture below).
7048     *
7049     * The function computes the rotation matrices for each camera that (virtually) make both camera image
7050     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
7051     * the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate
7052     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
7053     * coordinates. The function distinguishes the following two cases:
7054     *
7055     * <ul>
7056     *   <li>
7057     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
7058     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
7059     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
7060     *     y-coordinate. P1 and P2 look like:
7061     *   </li>
7062     * </ul>
7063     *
7064     *     \(\texttt{P1} = \begin{bmatrix}
7065     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
7066     *                         0 &amp; f &amp; cy &amp; 0 \\
7067     *                         0 &amp; 0 &amp; 1 &amp; 0
7068     *                      \end{bmatrix}\)
7069     *
7070     *     \(\texttt{P2} = \begin{bmatrix}
7071     *                         f &amp; 0 &amp; cx_2 &amp; T_x*f \\
7072     *                         0 &amp; f &amp; cy &amp; 0 \\
7073     *                         0 &amp; 0 &amp; 1 &amp; 0
7074     *                      \end{bmatrix} ,\)
7075     *
7076     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
7077     *     REF: CALIB_ZERO_DISPARITY is set.
7078     *
7079     * <ul>
7080     *   <li>
7081     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
7082     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
7083     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
7084     *   </li>
7085     * </ul>
7086     *
7087     *     \(\texttt{P1} = \begin{bmatrix}
7088     *                         f &amp; 0 &amp; cx &amp; 0 \\
7089     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
7090     *                         0 &amp; 0 &amp; 1 &amp; 0
7091     *                      \end{bmatrix}\)
7092     *
7093     *     \(\texttt{P2} = \begin{bmatrix}
7094     *                         f &amp; 0 &amp; cx &amp; 0 \\
7095     *                         0 &amp; f &amp; cy_2 &amp; T_y*f \\
7096     *                         0 &amp; 0 &amp; 1 &amp; 0
7097     *                      \end{bmatrix},\)
7098     *
7099     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
7100     *     REF: CALIB_ZERO_DISPARITY is set.
7101     *
7102     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
7103     * matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to
7104     * initialize the rectification map for each camera.
7105     *
7106     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
7107     * the corresponding image regions. This means that the images are well rectified, which is what most
7108     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
7109     * their interiors are all valid pixels.
7110     *
7111     * ![image](pics/stereo_undistort.jpg)
7112     */
7113    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) {
7114        stereoRectify_5(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj);
7115    }
7116
7117
7118    //
7119    // C++:  bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
7120    //
7121
7122    /**
7123     * Computes a rectification transform for an uncalibrated stereo camera.
7124     *
7125     * @param points1 Array of feature points in the first image.
7126     * @param points2 The corresponding points in the second image. The same formats as in
7127     * findFundamentalMat are supported.
7128     * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
7129     * findFundamentalMat .
7130     * @param imgSize Size of the image.
7131     * @param H1 Output rectification homography matrix for the first image.
7132     * @param H2 Output rectification homography matrix for the second image.
7133     * @param threshold Optional threshold used to filter out the outliers. If the parameter is greater
7134     * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
7135     * for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|&gt;\texttt{threshold}\) ) are
7136     * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
7137     *
7138     * The function computes the rectification transformations without knowing intrinsic parameters of the
7139     * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
7140     * related difference from stereoRectify is that the function outputs not the rectification
7141     * transformations in the object (3D) space, but the planar perspective transformations encoded by the
7142     * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
7143     *
7144     * <b>Note:</b>
7145     *    While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
7146     *     depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
7147     *     it would be better to correct it before computing the fundamental matrix and calling this
7148     *     function. For example, distortion coefficients can be estimated for each head of stereo camera
7149     *     separately by using calibrateCamera . Then, the images can be corrected using undistort , or
7150     *     just the point coordinates can be corrected with undistortPoints .
7151     * @return automatically generated
7152     */
7153    public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold) {
7154        return stereoRectifyUncalibrated_0(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj, threshold);
7155    }
7156
7157    /**
7158     * Computes a rectification transform for an uncalibrated stereo camera.
7159     *
7160     * @param points1 Array of feature points in the first image.
7161     * @param points2 The corresponding points in the second image. The same formats as in
7162     * findFundamentalMat are supported.
7163     * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
7164     * findFundamentalMat .
7165     * @param imgSize Size of the image.
7166     * @param H1 Output rectification homography matrix for the first image.
7167     * @param H2 Output rectification homography matrix for the second image.
7168     * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
7169     * for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|&gt;\texttt{threshold}\) ) are
7170     * rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
7171     *
7172     * The function computes the rectification transformations without knowing intrinsic parameters of the
7173     * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
7174     * related difference from stereoRectify is that the function outputs not the rectification
7175     * transformations in the object (3D) space, but the planar perspective transformations encoded by the
7176     * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
7177     *
7178     * <b>Note:</b>
7179     *    While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
7180     *     depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
7181     *     it would be better to correct it before computing the fundamental matrix and calling this
7182     *     function. For example, distortion coefficients can be estimated for each head of stereo camera
7183     *     separately by using calibrateCamera . Then, the images can be corrected using undistort , or
7184     *     just the point coordinates can be corrected with undistortPoints .
7185     * @return automatically generated
7186     */
7187    public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2) {
7188        return stereoRectifyUncalibrated_1(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj);
7189    }
7190
7191
7192    //
7193    // C++:  float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
7194    //
7195
7196    public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List<Mat> imgpt1, List<Mat> imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags) {
7197        Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1);
7198        Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3);
7199        double[] roi1_out = new double[4];
7200        double[] roi2_out = new double[4];
7201        float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags);
7202        if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } 
7203        if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } 
7204        return retVal;
7205    }
7206
7207
7208    //
7209    // C++:  Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
7210    //
7211
7212    /**
7213     * Returns the new camera intrinsic matrix based on the free scaling parameter.
7214     *
7215     * @param cameraMatrix Input camera intrinsic matrix.
7216     * @param distCoeffs Input vector of distortion coefficients
7217     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
7218     * assumed.
7219     * @param imageSize Original image size.
7220     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
7221     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
7222     * stereoRectify for details.
7223     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
7224     * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
7225     * undistorted image. See roi1, roi2 description in stereoRectify .
7226     * @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
7227     * principal point should be at the image center or not. By default, the principal point is chosen to
7228     * best fit a subset of the source image (determined by alpha) to the corrected image.
7229     * @return new_camera_matrix Output new camera intrinsic matrix.
7230     *
7231     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
7232     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
7233     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
7234     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
7235     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
7236     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
7237     * initUndistortRectifyMap to produce the maps for remap .
7238     */
7239    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint) {
7240        double[] validPixROI_out = new double[4];
7241        Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint));
7242        if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 
7243        return retVal;
7244    }
7245
7246    /**
7247     * Returns the new camera intrinsic matrix based on the free scaling parameter.
7248     *
7249     * @param cameraMatrix Input camera intrinsic matrix.
7250     * @param distCoeffs Input vector of distortion coefficients
7251     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
7252     * assumed.
7253     * @param imageSize Original image size.
7254     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
7255     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
7256     * stereoRectify for details.
7257     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
7258     * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
7259     * undistorted image. See roi1, roi2 description in stereoRectify .
7260     * principal point should be at the image center or not. By default, the principal point is chosen to
7261     * best fit a subset of the source image (determined by alpha) to the corrected image.
7262     * @return new_camera_matrix Output new camera intrinsic matrix.
7263     *
7264     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
7265     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
7266     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
7267     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
7268     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
7269     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
7270     * initUndistortRectifyMap to produce the maps for remap .
7271     */
7272    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI) {
7273        double[] validPixROI_out = new double[4];
7274        Mat retVal = new Mat(getOptimalNewCameraMatrix_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out));
7275        if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 
7276        return retVal;
7277    }
7278
7279    /**
7280     * Returns the new camera intrinsic matrix based on the free scaling parameter.
7281     *
7282     * @param cameraMatrix Input camera intrinsic matrix.
7283     * @param distCoeffs Input vector of distortion coefficients
7284     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
7285     * assumed.
7286     * @param imageSize Original image size.
7287     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
7288     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
7289     * stereoRectify for details.
7290     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
7291     * undistorted image. See roi1, roi2 description in stereoRectify .
7292     * principal point should be at the image center or not. By default, the principal point is chosen to
7293     * best fit a subset of the source image (determined by alpha) to the corrected image.
7294     * @return new_camera_matrix Output new camera intrinsic matrix.
7295     *
7296     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
7297     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
7298     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
7299     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
7300     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
7301     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
7302     * initUndistortRectifyMap to produce the maps for remap .
7303     */
7304    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize) {
7305        return new Mat(getOptimalNewCameraMatrix_2(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height));
7306    }
7307
7308    /**
7309     * Returns the new camera intrinsic matrix based on the free scaling parameter.
7310     *
7311     * @param cameraMatrix Input camera intrinsic matrix.
7312     * @param distCoeffs Input vector of distortion coefficients
7313     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
7314     * assumed.
7315     * @param imageSize Original image size.
7316     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
7317     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
7318     * stereoRectify for details.
7319     * undistorted image. See roi1, roi2 description in stereoRectify .
7320     * principal point should be at the image center or not. By default, the principal point is chosen to
7321     * best fit a subset of the source image (determined by alpha) to the corrected image.
7322     * @return new_camera_matrix Output new camera intrinsic matrix.
7323     *
7324     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
7325     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
7326     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
7327     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
7328     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
7329     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
7330     * initUndistortRectifyMap to produce the maps for remap .
7331     */
7332    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha) {
7333        return new Mat(getOptimalNewCameraMatrix_3(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha));
7334    }
7335
7336
7337    //
7338    // C++:  void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
7339    //
7340
7341    /**
7342     * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
7343     *
7344     * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
7345     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
7346     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7347     * for all the transformations from gripper frame to robot base frame.
7348     * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
7349     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
7350     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7351     * from gripper frame to robot base frame.
7352     * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
7353     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
7354     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7355     * for all the transformations from calibration target frame to camera frame.
7356     * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
7357     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
7358     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7359     * from calibration target frame to camera frame.
7360     * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7361     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
7362     * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7363     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
7364     * @param method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
7365     *
7366     * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
7367     * rotation then the translation (separable solutions) and the following methods are implemented:
7368     * <ul>
7369     *   <li>
7370     *    R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
7371     *   </li>
7372     *   <li>
7373     *    F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
7374     *   </li>
7375     *   <li>
7376     *    R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
7377     *   </li>
7378     * </ul>
7379     *
7380     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
7381     * with the following implemented methods:
7382     * <ul>
7383     *   <li>
7384     *    N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
7385     *   </li>
7386     *   <li>
7387     *    K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
7388     *   </li>
7389     * </ul>
7390     *
7391     * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
7392     * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
7393     *
7394     * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
7395     * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
7396     * the suitable transformations to the function, see below.
7397     *
7398     * ![](pics/hand-eye_figure.png)
7399     *
7400     * The calibration procedure is the following:
7401     * <ul>
7402     *   <li>
7403     *    a static calibration pattern is used to estimate the transformation between the target frame
7404     *   and the camera frame
7405     *   </li>
7406     *   <li>
7407     *    the robot gripper is moved in order to acquire several poses
7408     *   </li>
7409     *   <li>
7410     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
7411     *   instance the robot kinematics
7412     * \(
7413     *     \begin{bmatrix}
7414     *     X_b\\
7415     *     Y_b\\
7416     *     Z_b\\
7417     *     1
7418     *     \end{bmatrix}
7419     *     =
7420     *     \begin{bmatrix}
7421     *     _{}^{b}\textrm{R}_g &amp; _{}^{b}\textrm{t}_g \\
7422     *     0_{1 \times 3} &amp; 1
7423     *     \end{bmatrix}
7424     *     \begin{bmatrix}
7425     *     X_g\\
7426     *     Y_g\\
7427     *     Z_g\\
7428     *     1
7429     *     \end{bmatrix}
7430     * \)
7431     *   </li>
7432     *   <li>
7433     *    for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
7434     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
7435     * \(
7436     *     \begin{bmatrix}
7437     *     X_c\\
7438     *     Y_c\\
7439     *     Z_c\\
7440     *     1
7441     *     \end{bmatrix}
7442     *     =
7443     *     \begin{bmatrix}
7444     *     _{}^{c}\textrm{R}_t &amp; _{}^{c}\textrm{t}_t \\
7445     *     0_{1 \times 3} &amp; 1
7446     *     \end{bmatrix}
7447     *     \begin{bmatrix}
7448     *     X_t\\
7449     *     Y_t\\
7450     *     Z_t\\
7451     *     1
7452     *     \end{bmatrix}
7453     * \)
7454     *   </li>
7455     * </ul>
7456     *
7457     * The Hand-Eye calibration procedure returns the following homogeneous transformation
7458     * \(
7459     *     \begin{bmatrix}
7460     *     X_g\\
7461     *     Y_g\\
7462     *     Z_g\\
7463     *     1
7464     *     \end{bmatrix}
7465     *     =
7466     *     \begin{bmatrix}
7467     *     _{}^{g}\textrm{R}_c &amp; _{}^{g}\textrm{t}_c \\
7468     *     0_{1 \times 3} &amp; 1
7469     *     \end{bmatrix}
7470     *     \begin{bmatrix}
7471     *     X_c\\
7472     *     Y_c\\
7473     *     Z_c\\
7474     *     1
7475     *     \end{bmatrix}
7476     * \)
7477     *
7478     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
7479     * <ul>
7480     *   <li>
7481     *    for an eye-in-hand configuration
7482     * \(
7483     *     \begin{align*}
7484     *     ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
7485     *     \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
7486     *   </li>
7487     * </ul>
7488     *
7489     *     (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &amp;=
7490     *     \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
7491     *
7492     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
7493     *     \end{align*}
7494     * \)
7495     *
7496     * <ul>
7497     *   <li>
7498     *    for an eye-to-hand configuration
7499     * \(
7500     *     \begin{align*}
7501     *     ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
7502     *     \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
7503     *   </li>
7504     * </ul>
7505     *
7506     *     (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &amp;=
7507     *     \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
7508     *
7509     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
7510     *     \end{align*}
7511     * \)
7512     *
7513     * \note
7514     * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
7515     * \note
7516     * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
7517     * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
7518     */
7519    public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper, int method) {
7520        Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base);
7521        Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base);
7522        Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam);
7523        Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam);
7524        calibrateHandEye_0(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj, method);
7525    }
7526
7527    /**
7528     * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
7529     *
7530     * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
7531     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
7532     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7533     * for all the transformations from gripper frame to robot base frame.
7534     * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
7535     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
7536     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7537     * from gripper frame to robot base frame.
7538     * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
7539     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
7540     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7541     * for all the transformations from calibration target frame to camera frame.
7542     * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
7543     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
7544     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7545     * from calibration target frame to camera frame.
7546     * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7547     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
7548     * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7549     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
7550     *
7551     * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
7552     * rotation then the translation (separable solutions) and the following methods are implemented:
7553     * <ul>
7554     *   <li>
7555     *    R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
7556     *   </li>
7557     *   <li>
7558     *    F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
7559     *   </li>
7560     *   <li>
7561     *    R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
7562     *   </li>
7563     * </ul>
7564     *
7565     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
7566     * with the following implemented methods:
7567     * <ul>
7568     *   <li>
7569     *    N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
7570     *   </li>
7571     *   <li>
7572     *    K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
7573     *   </li>
7574     * </ul>
7575     *
7576     * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
7577     * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
7578     *
7579     * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
7580     * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
7581     * the suitable transformations to the function, see below.
7582     *
7583     * ![](pics/hand-eye_figure.png)
7584     *
7585     * The calibration procedure is the following:
7586     * <ul>
7587     *   <li>
7588     *    a static calibration pattern is used to estimate the transformation between the target frame
7589     *   and the camera frame
7590     *   </li>
7591     *   <li>
7592     *    the robot gripper is moved in order to acquire several poses
7593     *   </li>
7594     *   <li>
7595     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
7596     *   instance the robot kinematics
7597     * \(
7598     *     \begin{bmatrix}
7599     *     X_b\\
7600     *     Y_b\\
7601     *     Z_b\\
7602     *     1
7603     *     \end{bmatrix}
7604     *     =
7605     *     \begin{bmatrix}
7606     *     _{}^{b}\textrm{R}_g &amp; _{}^{b}\textrm{t}_g \\
7607     *     0_{1 \times 3} &amp; 1
7608     *     \end{bmatrix}
7609     *     \begin{bmatrix}
7610     *     X_g\\
7611     *     Y_g\\
7612     *     Z_g\\
7613     *     1
7614     *     \end{bmatrix}
7615     * \)
7616     *   </li>
7617     *   <li>
7618     *    for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
7619     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
7620     * \(
7621     *     \begin{bmatrix}
7622     *     X_c\\
7623     *     Y_c\\
7624     *     Z_c\\
7625     *     1
7626     *     \end{bmatrix}
7627     *     =
7628     *     \begin{bmatrix}
7629     *     _{}^{c}\textrm{R}_t &amp; _{}^{c}\textrm{t}_t \\
7630     *     0_{1 \times 3} &amp; 1
7631     *     \end{bmatrix}
7632     *     \begin{bmatrix}
7633     *     X_t\\
7634     *     Y_t\\
7635     *     Z_t\\
7636     *     1
7637     *     \end{bmatrix}
7638     * \)
7639     *   </li>
7640     * </ul>
7641     *
7642     * The Hand-Eye calibration procedure returns the following homogeneous transformation
7643     * \(
7644     *     \begin{bmatrix}
7645     *     X_g\\
7646     *     Y_g\\
7647     *     Z_g\\
7648     *     1
7649     *     \end{bmatrix}
7650     *     =
7651     *     \begin{bmatrix}
7652     *     _{}^{g}\textrm{R}_c &amp; _{}^{g}\textrm{t}_c \\
7653     *     0_{1 \times 3} &amp; 1
7654     *     \end{bmatrix}
7655     *     \begin{bmatrix}
7656     *     X_c\\
7657     *     Y_c\\
7658     *     Z_c\\
7659     *     1
7660     *     \end{bmatrix}
7661     * \)
7662     *
7663     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
7664     * <ul>
7665     *   <li>
7666     *    for an eye-in-hand configuration
7667     * \(
7668     *     \begin{align*}
7669     *     ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
7670     *     \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
7671     *   </li>
7672     * </ul>
7673     *
7674     *     (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &amp;=
7675     *     \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
7676     *
7677     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
7678     *     \end{align*}
7679     * \)
7680     *
7681     * <ul>
7682     *   <li>
7683     *    for an eye-to-hand configuration
7684     * \(
7685     *     \begin{align*}
7686     *     ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
7687     *     \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
7688     *   </li>
7689     * </ul>
7690     *
7691     *     (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &amp;=
7692     *     \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
7693     *
7694     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
7695     *     \end{align*}
7696     * \)
7697     *
7698     * \note
7699     * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
7700     * \note
7701     * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
7702     * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
7703     */
7704    public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper) {
7705        Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base);
7706        Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base);
7707        Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam);
7708        Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam);
7709        calibrateHandEye_1(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj);
7710    }
7711
7712
7713    //
7714    // C++:  void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
7715    //
7716
7717    /**
7718     * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)
7719     *
7720     * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
7721     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
7722     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7723     * for all the transformations from world frame to the camera frame.
7724     * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
7725     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
7726     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7727     * from world frame to the camera frame.
7728     * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
7729     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
7730     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7731     * for all the transformations from robot base frame to the gripper frame.
7732     * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
7733     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
7734     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7735     * from robot base frame to the gripper frame.
7736     * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7737     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
7738     * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7739     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
7740     * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7741     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
7742     * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7743     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
7744     * @param method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
7745     *
7746     * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
7747     * rotation then the translation (separable solutions):
7748     * <ul>
7749     *   <li>
7750     *    M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
7751     *   </li>
7752     * </ul>
7753     *
7754     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
7755     * with the following implemented method:
7756     * <ul>
7757     *   <li>
7758     *    A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
7759     *   </li>
7760     * </ul>
7761     *
7762     * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
7763     * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
7764     *
7765     * ![](pics/robot-world_hand-eye_figure.png)
7766     *
7767     * The calibration procedure is the following:
7768     * <ul>
7769     *   <li>
7770     *    a static calibration pattern is used to estimate the transformation between the target frame
7771     *   and the camera frame
7772     *   </li>
7773     *   <li>
7774     *    the robot gripper is moved in order to acquire several poses
7775     *   </li>
7776     *   <li>
7777     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
7778     *   instance the robot kinematics
7779     * \(
7780     *     \begin{bmatrix}
7781     *     X_g\\
7782     *     Y_g\\
7783     *     Z_g\\
7784     *     1
7785     *     \end{bmatrix}
7786     *     =
7787     *     \begin{bmatrix}
7788     *     _{}^{g}\textrm{R}_b &amp; _{}^{g}\textrm{t}_b \\
7789     *     0_{1 \times 3} &amp; 1
7790     *     \end{bmatrix}
7791     *     \begin{bmatrix}
7792     *     X_b\\
7793     *     Y_b\\
7794     *     Z_b\\
7795     *     1
7796     *     \end{bmatrix}
7797     * \)
7798     *   </li>
7799     *   <li>
7800     *    for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
7801     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
7802     * \(
7803     *     \begin{bmatrix}
7804     *     X_c\\
7805     *     Y_c\\
7806     *     Z_c\\
7807     *     1
7808     *     \end{bmatrix}
7809     *     =
7810     *     \begin{bmatrix}
7811     *     _{}^{c}\textrm{R}_w &amp; _{}^{c}\textrm{t}_w \\
7812     *     0_{1 \times 3} &amp; 1
7813     *     \end{bmatrix}
7814     *     \begin{bmatrix}
7815     *     X_w\\
7816     *     Y_w\\
7817     *     Z_w\\
7818     *     1
7819     *     \end{bmatrix}
7820     * \)
7821     *   </li>
7822     * </ul>
7823     *
7824     * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
7825     * \(
7826     *     \begin{bmatrix}
7827     *     X_w\\
7828     *     Y_w\\
7829     *     Z_w\\
7830     *     1
7831     *     \end{bmatrix}
7832     *     =
7833     *     \begin{bmatrix}
7834     *     _{}^{w}\textrm{R}_b &amp; _{}^{w}\textrm{t}_b \\
7835     *     0_{1 \times 3} &amp; 1
7836     *     \end{bmatrix}
7837     *     \begin{bmatrix}
7838     *     X_b\\
7839     *     Y_b\\
7840     *     Z_b\\
7841     *     1
7842     *     \end{bmatrix}
7843     * \)
7844     * \(
7845     *     \begin{bmatrix}
7846     *     X_c\\
7847     *     Y_c\\
7848     *     Z_c\\
7849     *     1
7850     *     \end{bmatrix}
7851     *     =
7852     *     \begin{bmatrix}
7853     *     _{}^{c}\textrm{R}_g &amp; _{}^{c}\textrm{t}_g \\
7854     *     0_{1 \times 3} &amp; 1
7855     *     \end{bmatrix}
7856     *     \begin{bmatrix}
7857     *     X_g\\
7858     *     Y_g\\
7859     *     Z_g\\
7860     *     1
7861     *     \end{bmatrix}
7862     * \)
7863     *
7864     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
7865     * <ul>
7866     *   <li>
7867     *    \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
7868     *   </li>
7869     *   <li>
7870     *    \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
7871     *   </li>
7872     *   <li>
7873     *    \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
7874     *   </li>
7875     *   <li>
7876     *    \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
7877     *   </li>
7878     * </ul>
7879     *
7880     * \note
7881     * At least 3 measurements are required (input vectors size must be greater or equal to 3).
7882     */
7883    public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam, int method) {
7884        Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam);
7885        Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam);
7886        Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper);
7887        Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper);
7888        calibrateRobotWorldHandEye_0(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj, method);
7889    }
7890
7891    /**
7892     * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)
7893     *
7894     * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
7895     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
7896     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7897     * for all the transformations from world frame to the camera frame.
7898     * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
7899     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
7900     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7901     * from world frame to the camera frame.
7902     * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
7903     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
7904     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
7905     * for all the transformations from robot base frame to the gripper frame.
7906     * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
7907     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
7908     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
7909     * from robot base frame to the gripper frame.
7910     * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7911     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
7912     * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7913     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
7914     * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
7915     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
7916     * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
7917     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
7918     *
7919     * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
7920     * rotation then the translation (separable solutions):
7921     * <ul>
7922     *   <li>
7923     *    M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
7924     *   </li>
7925     * </ul>
7926     *
7927     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
7928     * with the following implemented method:
7929     * <ul>
7930     *   <li>
7931     *    A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
7932     *   </li>
7933     * </ul>
7934     *
7935     * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
7936     * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
7937     *
7938     * ![](pics/robot-world_hand-eye_figure.png)
7939     *
7940     * The calibration procedure is the following:
7941     * <ul>
7942     *   <li>
7943     *    a static calibration pattern is used to estimate the transformation between the target frame
7944     *   and the camera frame
7945     *   </li>
7946     *   <li>
7947     *    the robot gripper is moved in order to acquire several poses
7948     *   </li>
7949     *   <li>
7950     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
7951     *   instance the robot kinematics
7952     * \(
7953     *     \begin{bmatrix}
7954     *     X_g\\
7955     *     Y_g\\
7956     *     Z_g\\
7957     *     1
7958     *     \end{bmatrix}
7959     *     =
7960     *     \begin{bmatrix}
7961     *     _{}^{g}\textrm{R}_b &amp; _{}^{g}\textrm{t}_b \\
7962     *     0_{1 \times 3} &amp; 1
7963     *     \end{bmatrix}
7964     *     \begin{bmatrix}
7965     *     X_b\\
7966     *     Y_b\\
7967     *     Z_b\\
7968     *     1
7969     *     \end{bmatrix}
7970     * \)
7971     *   </li>
7972     *   <li>
7973     *    for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
7974     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
7975     * \(
7976     *     \begin{bmatrix}
7977     *     X_c\\
7978     *     Y_c\\
7979     *     Z_c\\
7980     *     1
7981     *     \end{bmatrix}
7982     *     =
7983     *     \begin{bmatrix}
7984     *     _{}^{c}\textrm{R}_w &amp; _{}^{c}\textrm{t}_w \\
7985     *     0_{1 \times 3} &amp; 1
7986     *     \end{bmatrix}
7987     *     \begin{bmatrix}
7988     *     X_w\\
7989     *     Y_w\\
7990     *     Z_w\\
7991     *     1
7992     *     \end{bmatrix}
7993     * \)
7994     *   </li>
7995     * </ul>
7996     *
7997     * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
7998     * \(
7999     *     \begin{bmatrix}
8000     *     X_w\\
8001     *     Y_w\\
8002     *     Z_w\\
8003     *     1
8004     *     \end{bmatrix}
8005     *     =
8006     *     \begin{bmatrix}
8007     *     _{}^{w}\textrm{R}_b &amp; _{}^{w}\textrm{t}_b \\
8008     *     0_{1 \times 3} &amp; 1
8009     *     \end{bmatrix}
8010     *     \begin{bmatrix}
8011     *     X_b\\
8012     *     Y_b\\
8013     *     Z_b\\
8014     *     1
8015     *     \end{bmatrix}
8016     * \)
8017     * \(
8018     *     \begin{bmatrix}
8019     *     X_c\\
8020     *     Y_c\\
8021     *     Z_c\\
8022     *     1
8023     *     \end{bmatrix}
8024     *     =
8025     *     \begin{bmatrix}
8026     *     _{}^{c}\textrm{R}_g &amp; _{}^{c}\textrm{t}_g \\
8027     *     0_{1 \times 3} &amp; 1
8028     *     \end{bmatrix}
8029     *     \begin{bmatrix}
8030     *     X_g\\
8031     *     Y_g\\
8032     *     Z_g\\
8033     *     1
8034     *     \end{bmatrix}
8035     * \)
8036     *
8037     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
8038     * <ul>
8039     *   <li>
8040     *    \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
8041     *   </li>
8042     *   <li>
8043     *    \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
8044     *   </li>
8045     *   <li>
8046     *    \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
8047     *   </li>
8048     *   <li>
8049     *    \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
8050     *   </li>
8051     * </ul>
8052     *
8053     * \note
8054     * At least 3 measurements are required (input vectors size must be greater or equal to 3).
8055     */
8056    public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam) {
8057        Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam);
8058        Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam);
8059        Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper);
8060        Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper);
8061        calibrateRobotWorldHandEye_1(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj);
8062    }
8063
8064
8065    //
8066    // C++:  void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
8067    //
8068
8069    /**
8070     * Converts points from Euclidean to homogeneous space.
8071     *
8072     * @param src Input vector of N-dimensional points.
8073     * @param dst Output vector of N+1-dimensional points.
8074     *
8075     * The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
8076     * point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
8077     */
8078    public static void convertPointsToHomogeneous(Mat src, Mat dst) {
8079        convertPointsToHomogeneous_0(src.nativeObj, dst.nativeObj);
8080    }
8081
8082
8083    //
8084    // C++:  void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
8085    //
8086
8087    /**
8088     * Converts points from homogeneous to Euclidean space.
8089     *
8090     * @param src Input vector of N-dimensional points.
8091     * @param dst Output vector of N-1-dimensional points.
8092     *
8093     * The function converts points homogeneous to Euclidean space using perspective projection. That is,
8094     * each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
8095     * output point coordinates will be (0,0,0,...).
8096     */
8097    public static void convertPointsFromHomogeneous(Mat src, Mat dst) {
8098        convertPointsFromHomogeneous_0(src.nativeObj, dst.nativeObj);
8099    }
8100
8101
8102    //
8103    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
8104    //
8105
8106    /**
8107     * Calculates a fundamental matrix from the corresponding points in two images.
8108     *
8109     * @param points1 Array of N points from the first image. The point coordinates should be
8110     * floating-point (single or double precision).
8111     * @param points2 Array of the second image points of the same size and format as points1 .
8112     * @param method Method for computing a fundamental matrix.
8113     * <ul>
8114     *   <li>
8115     *    REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
8116     *   </li>
8117     *   <li>
8118     *    REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
8119     *   </li>
8120     *   <li>
8121     *    REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
8122     *   </li>
8123     *   <li>
8124     *    REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
8125     * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
8126     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8127     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8128     * point localization, image resolution, and the image noise.
8129     * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
8130     * of confidence (probability) that the estimated matrix is correct.
8131     * @param mask optional output mask
8132     * @param maxIters The maximum number of robust method iterations.
8133     *   </li>
8134     * </ul>
8135     *
8136     * The epipolar geometry is described by the following equation:
8137     *
8138     * \([p_2; 1]^T F [p_1; 1] = 0\)
8139     *
8140     * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8141     * second images, respectively.
8142     *
8143     * The function calculates the fundamental matrix using one of four methods listed above and returns
8144     * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
8145     * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
8146     * matrices sequentially).
8147     *
8148     * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
8149     * epipolar lines corresponding to the specified points. It can also be passed to
8150     * stereoRectifyUncalibrated to compute the rectification transformation. :
8151     * <code>
8152     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8153     *     int point_count = 100;
8154     *     vector&lt;Point2f&gt; points1(point_count);
8155     *     vector&lt;Point2f&gt; points2(point_count);
8156     *
8157     *     // initialize the points here ...
8158     *     for( int i = 0; i &lt; point_count; i++ )
8159     *     {
8160     *         points1[i] = ...;
8161     *         points2[i] = ...;
8162     *     }
8163     *
8164     *     Mat fundamental_matrix =
8165     *      findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
8166     * </code>
8167     * @return automatically generated
8168     */
8169    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat mask) {
8170        Mat points1_mat = points1;
8171        Mat points2_mat = points2;
8172        return new Mat(findFundamentalMat_0(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters, mask.nativeObj));
8173    }
8174
8175    /**
8176     * Calculates a fundamental matrix from the corresponding points in two images.
8177     *
8178     * @param points1 Array of N points from the first image. The point coordinates should be
8179     * floating-point (single or double precision).
8180     * @param points2 Array of the second image points of the same size and format as points1 .
8181     * @param method Method for computing a fundamental matrix.
8182     * <ul>
8183     *   <li>
8184     *    REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
8185     *   </li>
8186     *   <li>
8187     *    REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
8188     *   </li>
8189     *   <li>
8190     *    REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
8191     *   </li>
8192     *   <li>
8193     *    REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
8194     * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
8195     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8196     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8197     * point localization, image resolution, and the image noise.
8198     * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
8199     * of confidence (probability) that the estimated matrix is correct.
8200     * @param maxIters The maximum number of robust method iterations.
8201     *   </li>
8202     * </ul>
8203     *
8204     * The epipolar geometry is described by the following equation:
8205     *
8206     * \([p_2; 1]^T F [p_1; 1] = 0\)
8207     *
8208     * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8209     * second images, respectively.
8210     *
8211     * The function calculates the fundamental matrix using one of four methods listed above and returns
8212     * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
8213     * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
8214     * matrices sequentially).
8215     *
8216     * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
8217     * epipolar lines corresponding to the specified points. It can also be passed to
8218     * stereoRectifyUncalibrated to compute the rectification transformation. :
8219     * <code>
8220     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8221     *     int point_count = 100;
8222     *     vector&lt;Point2f&gt; points1(point_count);
8223     *     vector&lt;Point2f&gt; points2(point_count);
8224     *
8225     *     // initialize the points here ...
8226     *     for( int i = 0; i &lt; point_count; i++ )
8227     *     {
8228     *         points1[i] = ...;
8229     *         points2[i] = ...;
8230     *     }
8231     *
8232     *     Mat fundamental_matrix =
8233     *      findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
8234     * </code>
8235     * @return automatically generated
8236     */
8237    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters) {
8238        Mat points1_mat = points1;
8239        Mat points2_mat = points2;
8240        return new Mat(findFundamentalMat_1(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters));
8241    }
8242
8243
8244    //
8245    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
8246    //
8247
8248    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, Mat mask) {
8249        Mat points1_mat = points1;
8250        Mat points2_mat = points2;
8251        return new Mat(findFundamentalMat_2(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, mask.nativeObj));
8252    }
8253
8254    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence) {
8255        Mat points1_mat = points1;
8256        Mat points2_mat = points2;
8257        return new Mat(findFundamentalMat_3(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence));
8258    }
8259
8260    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold) {
8261        Mat points1_mat = points1;
8262        Mat points2_mat = points2;
8263        return new Mat(findFundamentalMat_4(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold));
8264    }
8265
8266    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method) {
8267        Mat points1_mat = points1;
8268        Mat points2_mat = points2;
8269        return new Mat(findFundamentalMat_5(points1_mat.nativeObj, points2_mat.nativeObj, method));
8270    }
8271
8272    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2) {
8273        Mat points1_mat = points1;
8274        Mat points2_mat = points2;
8275        return new Mat(findFundamentalMat_6(points1_mat.nativeObj, points2_mat.nativeObj));
8276    }
8277
8278
8279    //
8280    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params)
8281    //
8282
8283    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, Mat mask, UsacParams params) {
8284        Mat points1_mat = points1;
8285        Mat points2_mat = points2;
8286        return new Mat(findFundamentalMat_7(points1_mat.nativeObj, points2_mat.nativeObj, mask.nativeObj, params.nativeObj));
8287    }
8288
8289
8290    //
8291    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
8292    //
8293
8294    /**
8295     * Calculates an essential matrix from the corresponding points in two images.
8296     *
8297     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8298     * be floating-point (single or double precision).
8299     * @param points2 Array of the second image points of the same size and format as points1 .
8300     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8301     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8302     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
8303     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8304     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
8305     * passing these coordinates, pass the identity matrix for this parameter.
8306     * @param method Method for computing an essential matrix.
8307     * <ul>
8308     *   <li>
8309     *    REF: RANSAC for the RANSAC algorithm.
8310     *   </li>
8311     *   <li>
8312     *    REF: LMEDS for the LMedS algorithm.
8313     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8314     * confidence (probability) that the estimated matrix is correct.
8315     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8316     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8317     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8318     * point localization, image resolution, and the image noise.
8319     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
8320     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8321     *   </li>
8322     * </ul>
8323     *
8324     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8325     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8326     *
8327     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8328     *
8329     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8330     * second images, respectively. The result of this function may be passed further to
8331     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8332     * @return automatically generated
8333     */
8334    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, Mat mask) {
8335        return new Mat(findEssentialMat_0(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold, mask.nativeObj));
8336    }
8337
8338    /**
8339     * Calculates an essential matrix from the corresponding points in two images.
8340     *
8341     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8342     * be floating-point (single or double precision).
8343     * @param points2 Array of the second image points of the same size and format as points1 .
8344     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8345     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8346     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
8347     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8348     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
8349     * passing these coordinates, pass the identity matrix for this parameter.
8350     * @param method Method for computing an essential matrix.
8351     * <ul>
8352     *   <li>
8353     *    REF: RANSAC for the RANSAC algorithm.
8354     *   </li>
8355     *   <li>
8356     *    REF: LMEDS for the LMedS algorithm.
8357     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8358     * confidence (probability) that the estimated matrix is correct.
8359     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8360     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8361     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8362     * point localization, image resolution, and the image noise.
8363     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8364     *   </li>
8365     * </ul>
8366     *
8367     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8368     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8369     *
8370     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8371     *
8372     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8373     * second images, respectively. The result of this function may be passed further to
8374     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8375     * @return automatically generated
8376     */
8377    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold) {
8378        return new Mat(findEssentialMat_1(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold));
8379    }
8380
8381    /**
8382     * Calculates an essential matrix from the corresponding points in two images.
8383     *
8384     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8385     * be floating-point (single or double precision).
8386     * @param points2 Array of the second image points of the same size and format as points1 .
8387     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8388     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8389     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
8390     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8391     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
8392     * passing these coordinates, pass the identity matrix for this parameter.
8393     * @param method Method for computing an essential matrix.
8394     * <ul>
8395     *   <li>
8396     *    REF: RANSAC for the RANSAC algorithm.
8397     *   </li>
8398     *   <li>
8399     *    REF: LMEDS for the LMedS algorithm.
8400     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8401     * confidence (probability) that the estimated matrix is correct.
8402     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8403     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8404     * point localization, image resolution, and the image noise.
8405     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8406     *   </li>
8407     * </ul>
8408     *
8409     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8410     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8411     *
8412     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8413     *
8414     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8415     * second images, respectively. The result of this function may be passed further to
8416     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8417     * @return automatically generated
8418     */
8419    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob) {
8420        return new Mat(findEssentialMat_2(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob));
8421    }
8422
8423    /**
8424     * Calculates an essential matrix from the corresponding points in two images.
8425     *
8426     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8427     * be floating-point (single or double precision).
8428     * @param points2 Array of the second image points of the same size and format as points1 .
8429     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8430     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8431     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
8432     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8433     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
8434     * passing these coordinates, pass the identity matrix for this parameter.
8435     * @param method Method for computing an essential matrix.
8436     * <ul>
8437     *   <li>
8438     *    REF: RANSAC for the RANSAC algorithm.
8439     *   </li>
8440     *   <li>
8441     *    REF: LMEDS for the LMedS algorithm.
8442     * confidence (probability) that the estimated matrix is correct.
8443     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8444     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8445     * point localization, image resolution, and the image noise.
8446     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8447     *   </li>
8448     * </ul>
8449     *
8450     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8451     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8452     *
8453     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8454     *
8455     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8456     * second images, respectively. The result of this function may be passed further to
8457     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8458     * @return automatically generated
8459     */
8460    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method) {
8461        return new Mat(findEssentialMat_3(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method));
8462    }
8463
8464    /**
8465     * Calculates an essential matrix from the corresponding points in two images.
8466     *
8467     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8468     * be floating-point (single or double precision).
8469     * @param points2 Array of the second image points of the same size and format as points1 .
8470     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8471     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8472     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
8473     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8474     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
8475     * passing these coordinates, pass the identity matrix for this parameter.
8476     * <ul>
8477     *   <li>
8478     *    REF: RANSAC for the RANSAC algorithm.
8479     *   </li>
8480     *   <li>
8481     *    REF: LMEDS for the LMedS algorithm.
8482     * confidence (probability) that the estimated matrix is correct.
8483     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8484     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8485     * point localization, image resolution, and the image noise.
8486     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8487     *   </li>
8488     * </ul>
8489     *
8490     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8491     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8492     *
8493     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8494     *
8495     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8496     * second images, respectively. The result of this function may be passed further to
8497     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8498     * @return automatically generated
8499     */
8500    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix) {
8501        return new Mat(findEssentialMat_4(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj));
8502    }
8503
8504
8505    //
8506    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
8507    //
8508
8509    /**
8510     *
8511     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8512     * be floating-point (single or double precision).
8513     * @param points2 Array of the second image points of the same size and format as points1 .
8514     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8515     * are feature points from cameras with same focal length and principal point.
8516     * @param pp principal point of the camera.
8517     * @param method Method for computing a fundamental matrix.
8518     * <ul>
8519     *   <li>
8520     *    REF: RANSAC for the RANSAC algorithm.
8521     *   </li>
8522     *   <li>
8523     *    REF: LMEDS for the LMedS algorithm.
8524     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8525     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8526     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8527     * point localization, image resolution, and the image noise.
8528     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8529     * confidence (probability) that the estimated matrix is correct.
8530     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
8531     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8532     *   </li>
8533     * </ul>
8534     *
8535     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8536     * principal point:
8537     *
8538     * \(A =
8539     * \begin{bmatrix}
8540     * f &amp; 0 &amp; x_{pp}  \\
8541     * 0 &amp; f &amp; y_{pp}  \\
8542     * 0 &amp; 0 &amp; 1
8543     * \end{bmatrix}\)
8544     * @return automatically generated
8545     */
8546    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, Mat mask) {
8547        return new Mat(findEssentialMat_5(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold, mask.nativeObj));
8548    }
8549
8550    /**
8551     *
8552     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8553     * be floating-point (single or double precision).
8554     * @param points2 Array of the second image points of the same size and format as points1 .
8555     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8556     * are feature points from cameras with same focal length and principal point.
8557     * @param pp principal point of the camera.
8558     * @param method Method for computing a fundamental matrix.
8559     * <ul>
8560     *   <li>
8561     *    REF: RANSAC for the RANSAC algorithm.
8562     *   </li>
8563     *   <li>
8564     *    REF: LMEDS for the LMedS algorithm.
8565     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8566     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8567     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8568     * point localization, image resolution, and the image noise.
8569     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8570     * confidence (probability) that the estimated matrix is correct.
8571     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8572     *   </li>
8573     * </ul>
8574     *
8575     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8576     * principal point:
8577     *
8578     * \(A =
8579     * \begin{bmatrix}
8580     * f &amp; 0 &amp; x_{pp}  \\
8581     * 0 &amp; f &amp; y_{pp}  \\
8582     * 0 &amp; 0 &amp; 1
8583     * \end{bmatrix}\)
8584     * @return automatically generated
8585     */
8586    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold) {
8587        return new Mat(findEssentialMat_6(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold));
8588    }
8589
8590    /**
8591     *
8592     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8593     * be floating-point (single or double precision).
8594     * @param points2 Array of the second image points of the same size and format as points1 .
8595     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8596     * are feature points from cameras with same focal length and principal point.
8597     * @param pp principal point of the camera.
8598     * @param method Method for computing a fundamental matrix.
8599     * <ul>
8600     *   <li>
8601     *    REF: RANSAC for the RANSAC algorithm.
8602     *   </li>
8603     *   <li>
8604     *    REF: LMEDS for the LMedS algorithm.
8605     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8606     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8607     * point localization, image resolution, and the image noise.
8608     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8609     * confidence (probability) that the estimated matrix is correct.
8610     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8611     *   </li>
8612     * </ul>
8613     *
8614     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8615     * principal point:
8616     *
8617     * \(A =
8618     * \begin{bmatrix}
8619     * f &amp; 0 &amp; x_{pp}  \\
8620     * 0 &amp; f &amp; y_{pp}  \\
8621     * 0 &amp; 0 &amp; 1
8622     * \end{bmatrix}\)
8623     * @return automatically generated
8624     */
8625    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob) {
8626        return new Mat(findEssentialMat_7(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob));
8627    }
8628
8629    /**
8630     *
8631     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8632     * be floating-point (single or double precision).
8633     * @param points2 Array of the second image points of the same size and format as points1 .
8634     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8635     * are feature points from cameras with same focal length and principal point.
8636     * @param pp principal point of the camera.
8637     * @param method Method for computing a fundamental matrix.
8638     * <ul>
8639     *   <li>
8640     *    REF: RANSAC for the RANSAC algorithm.
8641     *   </li>
8642     *   <li>
8643     *    REF: LMEDS for the LMedS algorithm.
8644     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8645     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8646     * point localization, image resolution, and the image noise.
8647     * confidence (probability) that the estimated matrix is correct.
8648     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8649     *   </li>
8650     * </ul>
8651     *
8652     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8653     * principal point:
8654     *
8655     * \(A =
8656     * \begin{bmatrix}
8657     * f &amp; 0 &amp; x_{pp}  \\
8658     * 0 &amp; f &amp; y_{pp}  \\
8659     * 0 &amp; 0 &amp; 1
8660     * \end{bmatrix}\)
8661     * @return automatically generated
8662     */
8663    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method) {
8664        return new Mat(findEssentialMat_8(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method));
8665    }
8666
8667    /**
8668     *
8669     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8670     * be floating-point (single or double precision).
8671     * @param points2 Array of the second image points of the same size and format as points1 .
8672     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8673     * are feature points from cameras with same focal length and principal point.
8674     * @param pp principal point of the camera.
8675     * <ul>
8676     *   <li>
8677     *    REF: RANSAC for the RANSAC algorithm.
8678     *   </li>
8679     *   <li>
8680     *    REF: LMEDS for the LMedS algorithm.
8681     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8682     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8683     * point localization, image resolution, and the image noise.
8684     * confidence (probability) that the estimated matrix is correct.
8685     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8686     *   </li>
8687     * </ul>
8688     *
8689     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8690     * principal point:
8691     *
8692     * \(A =
8693     * \begin{bmatrix}
8694     * f &amp; 0 &amp; x_{pp}  \\
8695     * 0 &amp; f &amp; y_{pp}  \\
8696     * 0 &amp; 0 &amp; 1
8697     * \end{bmatrix}\)
8698     * @return automatically generated
8699     */
8700    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp) {
8701        return new Mat(findEssentialMat_9(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y));
8702    }
8703
8704    /**
8705     *
8706     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8707     * be floating-point (single or double precision).
8708     * @param points2 Array of the second image points of the same size and format as points1 .
8709     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
8710     * are feature points from cameras with same focal length and principal point.
8711     * <ul>
8712     *   <li>
8713     *    REF: RANSAC for the RANSAC algorithm.
8714     *   </li>
8715     *   <li>
8716     *    REF: LMEDS for the LMedS algorithm.
8717     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8718     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8719     * point localization, image resolution, and the image noise.
8720     * confidence (probability) that the estimated matrix is correct.
8721     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8722     *   </li>
8723     * </ul>
8724     *
8725     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8726     * principal point:
8727     *
8728     * \(A =
8729     * \begin{bmatrix}
8730     * f &amp; 0 &amp; x_{pp}  \\
8731     * 0 &amp; f &amp; y_{pp}  \\
8732     * 0 &amp; 0 &amp; 1
8733     * \end{bmatrix}\)
8734     * @return automatically generated
8735     */
8736    public static Mat findEssentialMat(Mat points1, Mat points2, double focal) {
8737        return new Mat(findEssentialMat_10(points1.nativeObj, points2.nativeObj, focal));
8738    }
8739
8740    /**
8741     *
8742     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8743     * be floating-point (single or double precision).
8744     * @param points2 Array of the second image points of the same size and format as points1 .
8745     * are feature points from cameras with same focal length and principal point.
8746     * <ul>
8747     *   <li>
8748     *    REF: RANSAC for the RANSAC algorithm.
8749     *   </li>
8750     *   <li>
8751     *    REF: LMEDS for the LMedS algorithm.
8752     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8753     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8754     * point localization, image resolution, and the image noise.
8755     * confidence (probability) that the estimated matrix is correct.
8756     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8757     *   </li>
8758     * </ul>
8759     *
8760     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8761     * principal point:
8762     *
8763     * \(A =
8764     * \begin{bmatrix}
8765     * f &amp; 0 &amp; x_{pp}  \\
8766     * 0 &amp; f &amp; y_{pp}  \\
8767     * 0 &amp; 0 &amp; 1
8768     * \end{bmatrix}\)
8769     * @return automatically generated
8770     */
8771    public static Mat findEssentialMat(Mat points1, Mat points2) {
8772        return new Mat(findEssentialMat_11(points1.nativeObj, points2.nativeObj));
8773    }
8774
8775
8776    //
8777    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
8778    //
8779
8780    /**
8781     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
8782     *
8783     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8784     * be floating-point (single or double precision).
8785     * @param points2 Array of the second image points of the same size and format as points1 .
8786     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8787     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8788     * same camera matrix. If this assumption does not hold for your use case, use
8789     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8790     * to normalized image coordinates, which are valid for the identity camera matrix. When
8791     * passing these coordinates, pass the identity matrix for this parameter.
8792     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8793     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8794     * same camera matrix. If this assumption does not hold for your use case, use
8795     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8796     * to normalized image coordinates, which are valid for the identity camera matrix. When
8797     * passing these coordinates, pass the identity matrix for this parameter.
8798     * @param distCoeffs1 Input vector of distortion coefficients
8799     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8800     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8801     * @param distCoeffs2 Input vector of distortion coefficients
8802     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8803     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8804     * @param method Method for computing an essential matrix.
8805     * <ul>
8806     *   <li>
8807     *    REF: RANSAC for the RANSAC algorithm.
8808     *   </li>
8809     *   <li>
8810     *    REF: LMEDS for the LMedS algorithm.
8811     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8812     * confidence (probability) that the estimated matrix is correct.
8813     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8814     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8815     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8816     * point localization, image resolution, and the image noise.
8817     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
8818     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8819     *   </li>
8820     * </ul>
8821     *
8822     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8823     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8824     *
8825     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8826     *
8827     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8828     * second images, respectively. The result of this function may be passed further to
8829     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8830     * @return automatically generated
8831     */
8832    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold, Mat mask) {
8833        return new Mat(findEssentialMat_12(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold, mask.nativeObj));
8834    }
8835
8836    /**
8837     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
8838     *
8839     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8840     * be floating-point (single or double precision).
8841     * @param points2 Array of the second image points of the same size and format as points1 .
8842     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8843     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8844     * same camera matrix. If this assumption does not hold for your use case, use
8845     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8846     * to normalized image coordinates, which are valid for the identity camera matrix. When
8847     * passing these coordinates, pass the identity matrix for this parameter.
8848     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8849     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8850     * same camera matrix. If this assumption does not hold for your use case, use
8851     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8852     * to normalized image coordinates, which are valid for the identity camera matrix. When
8853     * passing these coordinates, pass the identity matrix for this parameter.
8854     * @param distCoeffs1 Input vector of distortion coefficients
8855     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8856     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8857     * @param distCoeffs2 Input vector of distortion coefficients
8858     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8859     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8860     * @param method Method for computing an essential matrix.
8861     * <ul>
8862     *   <li>
8863     *    REF: RANSAC for the RANSAC algorithm.
8864     *   </li>
8865     *   <li>
8866     *    REF: LMEDS for the LMedS algorithm.
8867     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8868     * confidence (probability) that the estimated matrix is correct.
8869     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8870     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8871     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8872     * point localization, image resolution, and the image noise.
8873     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8874     *   </li>
8875     * </ul>
8876     *
8877     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8878     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8879     *
8880     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8881     *
8882     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8883     * second images, respectively. The result of this function may be passed further to
8884     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8885     * @return automatically generated
8886     */
8887    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold) {
8888        return new Mat(findEssentialMat_13(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold));
8889    }
8890
8891    /**
8892     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
8893     *
8894     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8895     * be floating-point (single or double precision).
8896     * @param points2 Array of the second image points of the same size and format as points1 .
8897     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8898     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8899     * same camera matrix. If this assumption does not hold for your use case, use
8900     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8901     * to normalized image coordinates, which are valid for the identity camera matrix. When
8902     * passing these coordinates, pass the identity matrix for this parameter.
8903     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8904     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8905     * same camera matrix. If this assumption does not hold for your use case, use
8906     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8907     * to normalized image coordinates, which are valid for the identity camera matrix. When
8908     * passing these coordinates, pass the identity matrix for this parameter.
8909     * @param distCoeffs1 Input vector of distortion coefficients
8910     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8911     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8912     * @param distCoeffs2 Input vector of distortion coefficients
8913     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8914     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8915     * @param method Method for computing an essential matrix.
8916     * <ul>
8917     *   <li>
8918     *    REF: RANSAC for the RANSAC algorithm.
8919     *   </li>
8920     *   <li>
8921     *    REF: LMEDS for the LMedS algorithm.
8922     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8923     * confidence (probability) that the estimated matrix is correct.
8924     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8925     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8926     * point localization, image resolution, and the image noise.
8927     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8928     *   </li>
8929     * </ul>
8930     *
8931     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8932     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8933     *
8934     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8935     *
8936     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8937     * second images, respectively. The result of this function may be passed further to
8938     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8939     * @return automatically generated
8940     */
8941    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob) {
8942        return new Mat(findEssentialMat_14(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob));
8943    }
8944
8945    /**
8946     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
8947     *
8948     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
8949     * be floating-point (single or double precision).
8950     * @param points2 Array of the second image points of the same size and format as points1 .
8951     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8952     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8953     * same camera matrix. If this assumption does not hold for your use case, use
8954     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8955     * to normalized image coordinates, which are valid for the identity camera matrix. When
8956     * passing these coordinates, pass the identity matrix for this parameter.
8957     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
8958     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8959     * same camera matrix. If this assumption does not hold for your use case, use
8960     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
8961     * to normalized image coordinates, which are valid for the identity camera matrix. When
8962     * passing these coordinates, pass the identity matrix for this parameter.
8963     * @param distCoeffs1 Input vector of distortion coefficients
8964     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8965     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8966     * @param distCoeffs2 Input vector of distortion coefficients
8967     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
8968     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
8969     * @param method Method for computing an essential matrix.
8970     * <ul>
8971     *   <li>
8972     *    REF: RANSAC for the RANSAC algorithm.
8973     *   </li>
8974     *   <li>
8975     *    REF: LMEDS for the LMedS algorithm.
8976     * confidence (probability) that the estimated matrix is correct.
8977     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8978     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8979     * point localization, image resolution, and the image noise.
8980     * for the other points. The array is computed only in the RANSAC and LMedS methods.
8981     *   </li>
8982     * </ul>
8983     *
8984     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8985     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8986     *
8987     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8988     *
8989     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8990     * second images, respectively. The result of this function may be passed further to
8991     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
8992     * @return automatically generated
8993     */
8994    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method) {
8995        return new Mat(findEssentialMat_15(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method));
8996    }
8997
8998    /**
8999     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
9000     *
9001     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
9002     * be floating-point (single or double precision).
9003     * @param points2 Array of the second image points of the same size and format as points1 .
9004     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
9005     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9006     * same camera matrix. If this assumption does not hold for your use case, use
9007     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
9008     * to normalized image coordinates, which are valid for the identity camera matrix. When
9009     * passing these coordinates, pass the identity matrix for this parameter.
9010     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
9011     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9012     * same camera matrix. If this assumption does not hold for your use case, use
9013     * {@code undistortPoints()} with {@code P = cv::NoArray()} for both cameras to transform image points
9014     * to normalized image coordinates, which are valid for the identity camera matrix. When
9015     * passing these coordinates, pass the identity matrix for this parameter.
9016     * @param distCoeffs1 Input vector of distortion coefficients
9017     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
9018     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
9019     * @param distCoeffs2 Input vector of distortion coefficients
9020     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
9021     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
9022     * <ul>
9023     *   <li>
9024     *    REF: RANSAC for the RANSAC algorithm.
9025     *   </li>
9026     *   <li>
9027     *    REF: LMEDS for the LMedS algorithm.
9028     * confidence (probability) that the estimated matrix is correct.
9029     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
9030     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
9031     * point localization, image resolution, and the image noise.
9032     * for the other points. The array is computed only in the RANSAC and LMedS methods.
9033     *   </li>
9034     * </ul>
9035     *
9036     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
9037     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
9038     *
9039     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
9040     *
9041     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
9042     * second images, respectively. The result of this function may be passed further to
9043     * decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
9044     * @return automatically generated
9045     */
9046    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2) {
9047        return new Mat(findEssentialMat_16(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj));
9048    }
9049
9050
9051    //
9052    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
9053    //
9054
9055    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat mask, UsacParams params) {
9056        return new Mat(findEssentialMat_17(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, cameraMatrix2.nativeObj, dist_coeff1.nativeObj, dist_coeff2.nativeObj, mask.nativeObj, params.nativeObj));
9057    }
9058
9059
9060    //
9061    // C++:  void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
9062    //
9063
9064    /**
9065     * Decompose an essential matrix to possible rotations and translation.
9066     *
9067     * @param E The input essential matrix.
9068     * @param R1 One possible rotation matrix.
9069     * @param R2 Another possible rotation matrix.
9070     * @param t One possible translation.
9071     *
9072     * This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In
9073     * general, four possible poses exist for the decomposition of E. They are \([R_1, t]\),
9074     * \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\).
9075     *
9076     * If E gives the epipolar constraint \([p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\) between the image
9077     * points \(p_1\) in the first image and \(p_2\) in second image, then any of the tuples
9078     * \([R_1, t]\), \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\) is a change of basis from the first
9079     * camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
9080     * can only get the direction of the translation. For this reason, the translation t is returned with
9081     * unit length.
9082     */
9083    public static void decomposeEssentialMat(Mat E, Mat R1, Mat R2, Mat t) {
9084        decomposeEssentialMat_0(E.nativeObj, R1.nativeObj, R2.nativeObj, t.nativeObj);
9085    }
9086
9087
9088    //
9089    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
9090    //
9091
9092    /**
9093     * Recovers the relative camera rotation and the translation from an estimated essential
9094     * matrix and the corresponding points in two images, using cheirality check. Returns the number of
9095     * inliers that pass the check.
9096     *
9097     * @param E The input essential matrix.
9098     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9099     * floating-point (single or double precision).
9100     * @param points2 Array of the second image points of the same size and format as points1 .
9101     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
9102     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9103     * same camera intrinsic matrix.
9104     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9105     * that performs a change of basis from the first camera's coordinate system to the second camera's
9106     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9107     * described below.
9108     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9109     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9110     * length.
9111     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
9112     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9113     * recover pose. In the output mask only inliers which pass the cheirality check.
9114     *
9115     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
9116     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
9117     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
9118     *
9119     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
9120     * scenario, points1 and points2 are the same input for findEssentialMat.:
9121     * <code>
9122     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
9123     *     int point_count = 100;
9124     *     vector&lt;Point2f&gt; points1(point_count);
9125     *     vector&lt;Point2f&gt; points2(point_count);
9126     *
9127     *     // initialize the points here ...
9128     *     for( int i = 0; i &lt; point_count; i++ )
9129     *     {
9130     *         points1[i] = ...;
9131     *         points2[i] = ...;
9132     *     }
9133     *
9134     *     // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
9135     *     Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
9136     *
9137     *     Mat E, R, t, mask;
9138     *
9139     *     E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
9140     *     recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
9141     * </code>
9142     * @return automatically generated
9143     */
9144    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, Mat mask) {
9145        return recoverPose_0(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, mask.nativeObj);
9146    }
9147
9148    /**
9149     * Recovers the relative camera rotation and the translation from an estimated essential
9150     * matrix and the corresponding points in two images, using cheirality check. Returns the number of
9151     * inliers that pass the check.
9152     *
9153     * @param E The input essential matrix.
9154     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9155     * floating-point (single or double precision).
9156     * @param points2 Array of the second image points of the same size and format as points1 .
9157     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
9158     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9159     * same camera intrinsic matrix.
9160     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9161     * that performs a change of basis from the first camera's coordinate system to the second camera's
9162     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9163     * described below.
9164     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9165     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9166     * length.
9167     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9168     * recover pose. In the output mask only inliers which pass the cheirality check.
9169     *
9170     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
9171     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
9172     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
9173     *
9174     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
9175     * scenario, points1 and points2 are the same input for findEssentialMat.:
9176     * <code>
9177     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
9178     *     int point_count = 100;
9179     *     vector&lt;Point2f&gt; points1(point_count);
9180     *     vector&lt;Point2f&gt; points2(point_count);
9181     *
9182     *     // initialize the points here ...
9183     *     for( int i = 0; i &lt; point_count; i++ )
9184     *     {
9185     *         points1[i] = ...;
9186     *         points2[i] = ...;
9187     *     }
9188     *
9189     *     // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
9190     *     Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
9191     *
9192     *     Mat E, R, t, mask;
9193     *
9194     *     E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
9195     *     recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
9196     * </code>
9197     * @return automatically generated
9198     */
9199    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t) {
9200        return recoverPose_1(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj);
9201    }
9202
9203
9204    //
9205    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
9206    //
9207
9208    /**
9209     *
9210     * @param E The input essential matrix.
9211     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9212     * floating-point (single or double precision).
9213     * @param points2 Array of the second image points of the same size and format as points1 .
9214     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9215     * that performs a change of basis from the first camera's coordinate system to the second camera's
9216     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9217     * description below.
9218     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9219     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9220     * length.
9221     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
9222     * are feature points from cameras with same focal length and principal point.
9223     * @param pp principal point of the camera.
9224     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
9225     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9226     * recover pose. In the output mask only inliers which pass the cheirality check.
9227     *
9228     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
9229     * principal point:
9230     *
9231     * \(A =
9232     * \begin{bmatrix}
9233     * f &amp; 0 &amp; x_{pp}  \\
9234     * 0 &amp; f &amp; y_{pp}  \\
9235     * 0 &amp; 0 &amp; 1
9236     * \end{bmatrix}\)
9237     * @return automatically generated
9238     */
9239    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp, Mat mask) {
9240        return recoverPose_2(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y, mask.nativeObj);
9241    }
9242
9243    /**
9244     *
9245     * @param E The input essential matrix.
9246     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9247     * floating-point (single or double precision).
9248     * @param points2 Array of the second image points of the same size and format as points1 .
9249     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9250     * that performs a change of basis from the first camera's coordinate system to the second camera's
9251     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9252     * description below.
9253     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9254     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9255     * length.
9256     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
9257     * are feature points from cameras with same focal length and principal point.
9258     * @param pp principal point of the camera.
9259     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9260     * recover pose. In the output mask only inliers which pass the cheirality check.
9261     *
9262     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
9263     * principal point:
9264     *
9265     * \(A =
9266     * \begin{bmatrix}
9267     * f &amp; 0 &amp; x_{pp}  \\
9268     * 0 &amp; f &amp; y_{pp}  \\
9269     * 0 &amp; 0 &amp; 1
9270     * \end{bmatrix}\)
9271     * @return automatically generated
9272     */
9273    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp) {
9274        return recoverPose_3(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y);
9275    }
9276
9277    /**
9278     *
9279     * @param E The input essential matrix.
9280     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9281     * floating-point (single or double precision).
9282     * @param points2 Array of the second image points of the same size and format as points1 .
9283     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9284     * that performs a change of basis from the first camera's coordinate system to the second camera's
9285     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9286     * description below.
9287     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9288     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9289     * length.
9290     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
9291     * are feature points from cameras with same focal length and principal point.
9292     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9293     * recover pose. In the output mask only inliers which pass the cheirality check.
9294     *
9295     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
9296     * principal point:
9297     *
9298     * \(A =
9299     * \begin{bmatrix}
9300     * f &amp; 0 &amp; x_{pp}  \\
9301     * 0 &amp; f &amp; y_{pp}  \\
9302     * 0 &amp; 0 &amp; 1
9303     * \end{bmatrix}\)
9304     * @return automatically generated
9305     */
9306    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal) {
9307        return recoverPose_4(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal);
9308    }
9309
9310    /**
9311     *
9312     * @param E The input essential matrix.
9313     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9314     * floating-point (single or double precision).
9315     * @param points2 Array of the second image points of the same size and format as points1 .
9316     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9317     * that performs a change of basis from the first camera's coordinate system to the second camera's
9318     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9319     * description below.
9320     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9321     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9322     * length.
9323     * are feature points from cameras with same focal length and principal point.
9324     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9325     * recover pose. In the output mask only inliers which pass the cheirality check.
9326     *
9327     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
9328     * principal point:
9329     *
9330     * \(A =
9331     * \begin{bmatrix}
9332     * f &amp; 0 &amp; x_{pp}  \\
9333     * 0 &amp; f &amp; y_{pp}  \\
9334     * 0 &amp; 0 &amp; 1
9335     * \end{bmatrix}\)
9336     * @return automatically generated
9337     */
9338    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t) {
9339        return recoverPose_5(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj);
9340    }
9341
9342
9343    //
9344    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
9345    //
9346
9347    /**
9348     *
9349     * @param E The input essential matrix.
9350     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9351     * floating-point (single or double precision).
9352     * @param points2 Array of the second image points of the same size and format as points1.
9353     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
9354     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9355     * same camera intrinsic matrix.
9356     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9357     * that performs a change of basis from the first camera's coordinate system to the second camera's
9358     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9359     * description below.
9360     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9361     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9362     * length.
9363     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
9364     * points).
9365     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
9366     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9367     * recover pose. In the output mask only inliers which pass the cheirality check.
9368     * @param triangulatedPoints 3D points which were reconstructed by triangulation.
9369     *
9370     * This function differs from the one above that it outputs the triangulated 3D point that are used for
9371     * the cheirality check.
9372     * @return automatically generated
9373     */
9374    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask, Mat triangulatedPoints) {
9375        return recoverPose_6(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj, triangulatedPoints.nativeObj);
9376    }
9377
9378    /**
9379     *
9380     * @param E The input essential matrix.
9381     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9382     * floating-point (single or double precision).
9383     * @param points2 Array of the second image points of the same size and format as points1.
9384     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
9385     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9386     * same camera intrinsic matrix.
9387     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9388     * that performs a change of basis from the first camera's coordinate system to the second camera's
9389     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9390     * description below.
9391     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9392     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9393     * length.
9394     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
9395     * points).
9396     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
9397     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9398     * recover pose. In the output mask only inliers which pass the cheirality check.
9399     *
9400     * This function differs from the one above that it outputs the triangulated 3D point that are used for
9401     * the cheirality check.
9402     * @return automatically generated
9403     */
9404    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask) {
9405        return recoverPose_7(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj);
9406    }
9407
9408    /**
9409     *
9410     * @param E The input essential matrix.
9411     * @param points1 Array of N 2D points from the first image. The point coordinates should be
9412     * floating-point (single or double precision).
9413     * @param points2 Array of the second image points of the same size and format as points1.
9414     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
9415     * Note that this function assumes that points1 and points2 are feature points from cameras with the
9416     * same camera intrinsic matrix.
9417     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
9418     * that performs a change of basis from the first camera's coordinate system to the second camera's
9419     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
9420     * description below.
9421     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
9422     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
9423     * length.
9424     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
9425     * points).
9426     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
9427     * recover pose. In the output mask only inliers which pass the cheirality check.
9428     *
9429     * This function differs from the one above that it outputs the triangulated 3D point that are used for
9430     * the cheirality check.
9431     * @return automatically generated
9432     */
9433    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh) {
9434        return recoverPose_8(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh);
9435    }
9436
9437
9438    //
9439    // C++:  void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
9440    //
9441
9442    /**
9443     * For points in an image of a stereo pair, computes the corresponding epilines in the other image.
9444     *
9445     * @param points Input points. \(N \times 1\) or \(1 \times N\) matrix of type CV_32FC2 or
9446     * vector&lt;Point2f&gt; .
9447     * @param whichImage Index of the image (1 or 2) that contains the points .
9448     * @param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .
9449     * @param lines Output vector of the epipolar lines corresponding to the points in the other image.
9450     * Each line \(ax + by + c=0\) is encoded by 3 numbers \((a, b, c)\) .
9451     *
9452     * For every point in one of the two images of a stereo pair, the function finds the equation of the
9453     * corresponding epipolar line in the other image.
9454     *
9455     * From the fundamental matrix definition (see findFundamentalMat ), line \(l^{(2)}_i\) in the second
9456     * image for the point \(p^{(1)}_i\) in the first image (when whichImage=1 ) is computed as:
9457     *
9458     * \(l^{(2)}_i = F p^{(1)}_i\)
9459     *
9460     * And vice versa, when whichImage=2, \(l^{(1)}_i\) is computed from \(p^{(2)}_i\) as:
9461     *
9462     * \(l^{(1)}_i = F^T p^{(2)}_i\)
9463     *
9464     * Line coefficients are defined up to a scale. They are normalized so that \(a_i^2+b_i^2=1\) .
9465     */
9466    public static void computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat lines) {
9467        computeCorrespondEpilines_0(points.nativeObj, whichImage, F.nativeObj, lines.nativeObj);
9468    }
9469
9470
9471    //
9472    // C++:  void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
9473    //
9474
9475    /**
9476     * This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
9477     * their observations with a stereo camera.
9478     *
9479     * @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
9480     * given in the world's coordinate system into the first image.
9481     * @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
9482     * given in the world's coordinate system into the second image.
9483     * @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
9484     * it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
9485     * @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
9486     * version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
9487     * @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
9488     * returned in the world's coordinate system.
9489     *
9490     * <b>Note:</b>
9491     *    Keep in mind that all input data should be of float type in order for this function to work.
9492     *
9493     * <b>Note:</b>
9494     *    If the projection matrices from REF: stereoRectify are used, then the returned points are
9495     *    represented in the first camera's rectified coordinate system.
9496     *
9497     * SEE:
9498     *    reprojectImageTo3D
9499     */
9500    public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D) {
9501        triangulatePoints_0(projMatr1.nativeObj, projMatr2.nativeObj, projPoints1.nativeObj, projPoints2.nativeObj, points4D.nativeObj);
9502    }
9503
9504
9505    //
9506    // C++:  void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
9507    //
9508
9509    /**
9510     * Refines coordinates of corresponding points.
9511     *
9512     * @param F 3x3 fundamental matrix.
9513     * @param points1 1xN array containing the first set of points.
9514     * @param points2 1xN array containing the second set of points.
9515     * @param newPoints1 The optimized points1.
9516     * @param newPoints2 The optimized points2.
9517     *
9518     * The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
9519     * For each given point correspondence points1[i] &lt;-&gt; points2[i], and a fundamental matrix F, it
9520     * computes the corrected correspondences newPoints1[i] &lt;-&gt; newPoints2[i] that minimize the geometric
9521     * error \(d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\) (where \(d(a,b)\) is the
9522     * geometric distance between points \(a\) and \(b\) ) subject to the epipolar constraint
9523     * \(newPoints2^T * F * newPoints1 = 0\) .
9524     */
9525    public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2) {
9526        correctMatches_0(F.nativeObj, points1.nativeObj, points2.nativeObj, newPoints1.nativeObj, newPoints2.nativeObj);
9527    }
9528
9529
9530    //
9531    // C++:  void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
9532    //
9533
9534    /**
9535     * Filters off small noise blobs (speckles) in the disparity map
9536     *
9537     * @param img The input 16-bit signed disparity image
9538     * @param newVal The disparity value used to paint-off the speckles
9539     * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
9540     * affected by the algorithm
9541     * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
9542     * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
9543     * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
9544     * account when specifying this parameter value.
9545     * @param buf The optional temporary buffer to avoid memory allocation within the function.
9546     */
9547    public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf) {
9548        filterSpeckles_0(img.nativeObj, newVal, maxSpeckleSize, maxDiff, buf.nativeObj);
9549    }
9550
9551    /**
9552     * Filters off small noise blobs (speckles) in the disparity map
9553     *
9554     * @param img The input 16-bit signed disparity image
9555     * @param newVal The disparity value used to paint-off the speckles
9556     * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
9557     * affected by the algorithm
9558     * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
9559     * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
9560     * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
9561     * account when specifying this parameter value.
9562     */
9563    public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff) {
9564        filterSpeckles_1(img.nativeObj, newVal, maxSpeckleSize, maxDiff);
9565    }
9566
9567
9568    //
9569    // C++:  Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
9570    //
9571
9572    public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize) {
9573        return new Rect(getValidDisparityROI_0(roi1.x, roi1.y, roi1.width, roi1.height, roi2.x, roi2.y, roi2.width, roi2.height, minDisparity, numberOfDisparities, blockSize));
9574    }
9575
9576
9577    //
9578    // C++:  void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
9579    //
9580
9581    public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp) {
9582        validateDisparity_0(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities, disp12MaxDisp);
9583    }
9584
9585    public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities) {
9586        validateDisparity_1(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities);
9587    }
9588
9589
9590    //
9591    // C++:  void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
9592    //
9593
9594    /**
9595     * Reprojects a disparity image to 3D space.
9596     *
9597     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
9598     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
9599     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
9600     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
9601     * being used here.
9602     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
9603     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
9604     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
9605     * camera's rectified coordinate system.
9606     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
9607     * REF: stereoRectify.
9608     * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
9609     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
9610     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
9611     * to 3D points with a very large Z value (currently set to 10000).
9612     * @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
9613     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
9614     *
9615     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
9616     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
9617     * computes:
9618     *
9619     * \(\begin{bmatrix}
9620     * X \\
9621     * Y \\
9622     * Z \\
9623     * W
9624     * \end{bmatrix} = Q \begin{bmatrix}
9625     * x \\
9626     * y \\
9627     * \texttt{disparity} (x,y) \\
9628     * z
9629     * \end{bmatrix}.\)
9630     *
9631     * SEE:
9632     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
9633     */
9634    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth) {
9635        reprojectImageTo3D_0(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues, ddepth);
9636    }
9637
9638    /**
9639     * Reprojects a disparity image to 3D space.
9640     *
9641     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
9642     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
9643     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
9644     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
9645     * being used here.
9646     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
9647     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
9648     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
9649     * camera's rectified coordinate system.
9650     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
9651     * REF: stereoRectify.
9652     * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
9653     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
9654     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
9655     * to 3D points with a very large Z value (currently set to 10000).
9656     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
9657     *
9658     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
9659     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
9660     * computes:
9661     *
9662     * \(\begin{bmatrix}
9663     * X \\
9664     * Y \\
9665     * Z \\
9666     * W
9667     * \end{bmatrix} = Q \begin{bmatrix}
9668     * x \\
9669     * y \\
9670     * \texttt{disparity} (x,y) \\
9671     * z
9672     * \end{bmatrix}.\)
9673     *
9674     * SEE:
9675     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
9676     */
9677    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues) {
9678        reprojectImageTo3D_1(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues);
9679    }
9680
9681    /**
9682     * Reprojects a disparity image to 3D space.
9683     *
9684     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
9685     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
9686     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
9687     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
9688     * being used here.
9689     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
9690     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
9691     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
9692     * camera's rectified coordinate system.
9693     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
9694     * REF: stereoRectify.
9695     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
9696     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
9697     * to 3D points with a very large Z value (currently set to 10000).
9698     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
9699     *
9700     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
9701     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
9702     * computes:
9703     *
9704     * \(\begin{bmatrix}
9705     * X \\
9706     * Y \\
9707     * Z \\
9708     * W
9709     * \end{bmatrix} = Q \begin{bmatrix}
9710     * x \\
9711     * y \\
9712     * \texttt{disparity} (x,y) \\
9713     * z
9714     * \end{bmatrix}.\)
9715     *
9716     * SEE:
9717     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
9718     */
9719    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q) {
9720        reprojectImageTo3D_2(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj);
9721    }
9722
9723
9724    //
9725    // C++:  double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
9726    //
9727
9728    /**
9729     * Calculates the Sampson Distance between two points.
9730     *
9731     * The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
9732     * \(
9733     * sd( \texttt{pt1} , \texttt{pt2} )=
9734     * \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
9735     * {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
9736     * ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
9737     * ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
9738     * ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
9739     * \)
9740     * The fundamental matrix may be calculated using the cv::findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details.
9741     * @param pt1 first homogeneous 2d point
9742     * @param pt2 second homogeneous 2d point
9743     * @param F fundamental matrix
9744     * @return The computed Sampson distance.
9745     */
9746    public static double sampsonDistance(Mat pt1, Mat pt2, Mat F) {
9747        return sampsonDistance_0(pt1.nativeObj, pt2.nativeObj, F.nativeObj);
9748    }
9749
9750
9751    //
9752    // C++:  int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
9753    //
9754
9755    /**
9756     * Computes an optimal affine transformation between two 3D point sets.
9757     *
9758     * It computes
9759     * \(
9760     * \begin{bmatrix}
9761     * x\\
9762     * y\\
9763     * z\\
9764     * \end{bmatrix}
9765     * =
9766     * \begin{bmatrix}
9767     * a_{11} &amp; a_{12} &amp; a_{13}\\
9768     * a_{21} &amp; a_{22} &amp; a_{23}\\
9769     * a_{31} &amp; a_{32} &amp; a_{33}\\
9770     * \end{bmatrix}
9771     * \begin{bmatrix}
9772     * X\\
9773     * Y\\
9774     * Z\\
9775     * \end{bmatrix}
9776     * +
9777     * \begin{bmatrix}
9778     * b_1\\
9779     * b_2\\
9780     * b_3\\
9781     * \end{bmatrix}
9782     * \)
9783     *
9784     * @param src First input 3D point set containing \((X,Y,Z)\).
9785     * @param dst Second input 3D point set containing \((x,y,z)\).
9786     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9787     * \(
9788     * \begin{bmatrix}
9789     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9790     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9791     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9792     * \end{bmatrix}
9793     * \)
9794     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9795     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9796     * an inlier.
9797     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9798     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9799     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9800     *
9801     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9802     * RANSAC algorithm.
9803     * @return automatically generated
9804     */
9805    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) {
9806        return estimateAffine3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence);
9807    }
9808
9809    /**
9810     * Computes an optimal affine transformation between two 3D point sets.
9811     *
9812     * It computes
9813     * \(
9814     * \begin{bmatrix}
9815     * x\\
9816     * y\\
9817     * z\\
9818     * \end{bmatrix}
9819     * =
9820     * \begin{bmatrix}
9821     * a_{11} &amp; a_{12} &amp; a_{13}\\
9822     * a_{21} &amp; a_{22} &amp; a_{23}\\
9823     * a_{31} &amp; a_{32} &amp; a_{33}\\
9824     * \end{bmatrix}
9825     * \begin{bmatrix}
9826     * X\\
9827     * Y\\
9828     * Z\\
9829     * \end{bmatrix}
9830     * +
9831     * \begin{bmatrix}
9832     * b_1\\
9833     * b_2\\
9834     * b_3\\
9835     * \end{bmatrix}
9836     * \)
9837     *
9838     * @param src First input 3D point set containing \((X,Y,Z)\).
9839     * @param dst Second input 3D point set containing \((x,y,z)\).
9840     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9841     * \(
9842     * \begin{bmatrix}
9843     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9844     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9845     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9846     * \end{bmatrix}
9847     * \)
9848     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9849     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9850     * an inlier.
9851     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9852     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9853     *
9854     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9855     * RANSAC algorithm.
9856     * @return automatically generated
9857     */
9858    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) {
9859        return estimateAffine3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold);
9860    }
9861
9862    /**
9863     * Computes an optimal affine transformation between two 3D point sets.
9864     *
9865     * It computes
9866     * \(
9867     * \begin{bmatrix}
9868     * x\\
9869     * y\\
9870     * z\\
9871     * \end{bmatrix}
9872     * =
9873     * \begin{bmatrix}
9874     * a_{11} &amp; a_{12} &amp; a_{13}\\
9875     * a_{21} &amp; a_{22} &amp; a_{23}\\
9876     * a_{31} &amp; a_{32} &amp; a_{33}\\
9877     * \end{bmatrix}
9878     * \begin{bmatrix}
9879     * X\\
9880     * Y\\
9881     * Z\\
9882     * \end{bmatrix}
9883     * +
9884     * \begin{bmatrix}
9885     * b_1\\
9886     * b_2\\
9887     * b_3\\
9888     * \end{bmatrix}
9889     * \)
9890     *
9891     * @param src First input 3D point set containing \((X,Y,Z)\).
9892     * @param dst Second input 3D point set containing \((x,y,z)\).
9893     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9894     * \(
9895     * \begin{bmatrix}
9896     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9897     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9898     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9899     * \end{bmatrix}
9900     * \)
9901     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9902     * an inlier.
9903     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9904     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9905     *
9906     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9907     * RANSAC algorithm.
9908     * @return automatically generated
9909     */
9910    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers) {
9911        return estimateAffine3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj);
9912    }
9913
9914
9915    //
9916    // C++:  int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
9917    //
9918
9919    /**
9920     * Computes an optimal translation between two 3D point sets.
9921     *
9922     * It computes
9923     * \(
9924     * \begin{bmatrix}
9925     * x\\
9926     * y\\
9927     * z\\
9928     * \end{bmatrix}
9929     * =
9930     * \begin{bmatrix}
9931     * X\\
9932     * Y\\
9933     * Z\\
9934     * \end{bmatrix}
9935     * +
9936     * \begin{bmatrix}
9937     * b_1\\
9938     * b_2\\
9939     * b_3\\
9940     * \end{bmatrix}
9941     * \)
9942     *
9943     * @param src First input 3D point set containing \((X,Y,Z)\).
9944     * @param dst Second input 3D point set containing \((x,y,z)\).
9945     * @param out Output 3D translation vector \(3 \times 1\) of the form
9946     * \(
9947     * \begin{bmatrix}
9948     * b_1 \\
9949     * b_2 \\
9950     * b_3 \\
9951     * \end{bmatrix}
9952     * \)
9953     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9954     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9955     * an inlier.
9956     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9957     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9958     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9959     *
9960     * The function estimates an optimal 3D translation between two 3D point sets using the
9961     * RANSAC algorithm.
9962     *
9963     * @return automatically generated
9964     */
9965    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) {
9966        return estimateTranslation3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence);
9967    }
9968
9969    /**
9970     * Computes an optimal translation between two 3D point sets.
9971     *
9972     * It computes
9973     * \(
9974     * \begin{bmatrix}
9975     * x\\
9976     * y\\
9977     * z\\
9978     * \end{bmatrix}
9979     * =
9980     * \begin{bmatrix}
9981     * X\\
9982     * Y\\
9983     * Z\\
9984     * \end{bmatrix}
9985     * +
9986     * \begin{bmatrix}
9987     * b_1\\
9988     * b_2\\
9989     * b_3\\
9990     * \end{bmatrix}
9991     * \)
9992     *
9993     * @param src First input 3D point set containing \((X,Y,Z)\).
9994     * @param dst Second input 3D point set containing \((x,y,z)\).
9995     * @param out Output 3D translation vector \(3 \times 1\) of the form
9996     * \(
9997     * \begin{bmatrix}
9998     * b_1 \\
9999     * b_2 \\
10000     * b_3 \\
10001     * \end{bmatrix}
10002     * \)
10003     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10004     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
10005     * an inlier.
10006     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10007     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10008     *
10009     * The function estimates an optimal 3D translation between two 3D point sets using the
10010     * RANSAC algorithm.
10011     *
10012     * @return automatically generated
10013     */
10014    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) {
10015        return estimateTranslation3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold);
10016    }
10017
10018    /**
10019     * Computes an optimal translation between two 3D point sets.
10020     *
10021     * It computes
10022     * \(
10023     * \begin{bmatrix}
10024     * x\\
10025     * y\\
10026     * z\\
10027     * \end{bmatrix}
10028     * =
10029     * \begin{bmatrix}
10030     * X\\
10031     * Y\\
10032     * Z\\
10033     * \end{bmatrix}
10034     * +
10035     * \begin{bmatrix}
10036     * b_1\\
10037     * b_2\\
10038     * b_3\\
10039     * \end{bmatrix}
10040     * \)
10041     *
10042     * @param src First input 3D point set containing \((X,Y,Z)\).
10043     * @param dst Second input 3D point set containing \((x,y,z)\).
10044     * @param out Output 3D translation vector \(3 \times 1\) of the form
10045     * \(
10046     * \begin{bmatrix}
10047     * b_1 \\
10048     * b_2 \\
10049     * b_3 \\
10050     * \end{bmatrix}
10051     * \)
10052     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10053     * an inlier.
10054     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10055     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10056     *
10057     * The function estimates an optimal 3D translation between two 3D point sets using the
10058     * RANSAC algorithm.
10059     *
10060     * @return automatically generated
10061     */
10062    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers) {
10063        return estimateTranslation3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj);
10064    }
10065
10066
10067    //
10068    // C++:  Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
10069    //
10070
10071    /**
10072     * Computes an optimal affine transformation between two 2D point sets.
10073     *
10074     * It computes
10075     * \(
10076     * \begin{bmatrix}
10077     * x\\
10078     * y\\
10079     * \end{bmatrix}
10080     * =
10081     * \begin{bmatrix}
10082     * a_{11} &amp; a_{12}\\
10083     * a_{21} &amp; a_{22}\\
10084     * \end{bmatrix}
10085     * \begin{bmatrix}
10086     * X\\
10087     * Y\\
10088     * \end{bmatrix}
10089     * +
10090     * \begin{bmatrix}
10091     * b_1\\
10092     * b_2\\
10093     * \end{bmatrix}
10094     * \)
10095     *
10096     * @param from First input 2D point set containing \((X,Y)\).
10097     * @param to Second input 2D point set containing \((x,y)\).
10098     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10099     * @param method Robust method used to compute transformation. The following methods are possible:
10100     * <ul>
10101     *   <li>
10102     *    REF: RANSAC - RANSAC-based robust method
10103     *   </li>
10104     *   <li>
10105     *    REF: LMEDS - Least-Median robust method
10106     * RANSAC is the default method.
10107     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10108     * a point as an inlier. Applies only to RANSAC.
10109     * @param maxIters The maximum number of robust method iterations.
10110     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10111     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10112     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10113     * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
10114     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10115     *   </li>
10116     * </ul>
10117     *
10118     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10119     * could not be estimated. The returned matrix has the following form:
10120     * \(
10121     * \begin{bmatrix}
10122     * a_{11} &amp; a_{12} &amp; b_1\\
10123     * a_{21} &amp; a_{22} &amp; b_2\\
10124     * \end{bmatrix}
10125     * \)
10126     *
10127     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10128     * selected robust algorithm.
10129     *
10130     * The computed transformation is then refined further (using only inliers) with the
10131     * Levenberg-Marquardt method to reduce the re-projection error even more.
10132     *
10133     * <b>Note:</b>
10134     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10135     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10136     * correctly only when there are more than 50% of inliers.
10137     *
10138     * SEE: estimateAffinePartial2D, getAffineTransform
10139     */
10140    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) {
10141        return new Mat(estimateAffine2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters));
10142    }
10143
10144    /**
10145     * Computes an optimal affine transformation between two 2D point sets.
10146     *
10147     * It computes
10148     * \(
10149     * \begin{bmatrix}
10150     * x\\
10151     * y\\
10152     * \end{bmatrix}
10153     * =
10154     * \begin{bmatrix}
10155     * a_{11} &amp; a_{12}\\
10156     * a_{21} &amp; a_{22}\\
10157     * \end{bmatrix}
10158     * \begin{bmatrix}
10159     * X\\
10160     * Y\\
10161     * \end{bmatrix}
10162     * +
10163     * \begin{bmatrix}
10164     * b_1\\
10165     * b_2\\
10166     * \end{bmatrix}
10167     * \)
10168     *
10169     * @param from First input 2D point set containing \((X,Y)\).
10170     * @param to Second input 2D point set containing \((x,y)\).
10171     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10172     * @param method Robust method used to compute transformation. The following methods are possible:
10173     * <ul>
10174     *   <li>
10175     *    REF: RANSAC - RANSAC-based robust method
10176     *   </li>
10177     *   <li>
10178     *    REF: LMEDS - Least-Median robust method
10179     * RANSAC is the default method.
10180     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10181     * a point as an inlier. Applies only to RANSAC.
10182     * @param maxIters The maximum number of robust method iterations.
10183     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10184     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10185     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10186     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10187     *   </li>
10188     * </ul>
10189     *
10190     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10191     * could not be estimated. The returned matrix has the following form:
10192     * \(
10193     * \begin{bmatrix}
10194     * a_{11} &amp; a_{12} &amp; b_1\\
10195     * a_{21} &amp; a_{22} &amp; b_2\\
10196     * \end{bmatrix}
10197     * \)
10198     *
10199     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10200     * selected robust algorithm.
10201     *
10202     * The computed transformation is then refined further (using only inliers) with the
10203     * Levenberg-Marquardt method to reduce the re-projection error even more.
10204     *
10205     * <b>Note:</b>
10206     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10207     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10208     * correctly only when there are more than 50% of inliers.
10209     *
10210     * SEE: estimateAffinePartial2D, getAffineTransform
10211     */
10212    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) {
10213        return new Mat(estimateAffine2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence));
10214    }
10215
10216    /**
10217     * Computes an optimal affine transformation between two 2D point sets.
10218     *
10219     * It computes
10220     * \(
10221     * \begin{bmatrix}
10222     * x\\
10223     * y\\
10224     * \end{bmatrix}
10225     * =
10226     * \begin{bmatrix}
10227     * a_{11} &amp; a_{12}\\
10228     * a_{21} &amp; a_{22}\\
10229     * \end{bmatrix}
10230     * \begin{bmatrix}
10231     * X\\
10232     * Y\\
10233     * \end{bmatrix}
10234     * +
10235     * \begin{bmatrix}
10236     * b_1\\
10237     * b_2\\
10238     * \end{bmatrix}
10239     * \)
10240     *
10241     * @param from First input 2D point set containing \((X,Y)\).
10242     * @param to Second input 2D point set containing \((x,y)\).
10243     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10244     * @param method Robust method used to compute transformation. The following methods are possible:
10245     * <ul>
10246     *   <li>
10247     *    REF: RANSAC - RANSAC-based robust method
10248     *   </li>
10249     *   <li>
10250     *    REF: LMEDS - Least-Median robust method
10251     * RANSAC is the default method.
10252     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10253     * a point as an inlier. Applies only to RANSAC.
10254     * @param maxIters The maximum number of robust method iterations.
10255     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10256     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10257     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10258     *   </li>
10259     * </ul>
10260     *
10261     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10262     * could not be estimated. The returned matrix has the following form:
10263     * \(
10264     * \begin{bmatrix}
10265     * a_{11} &amp; a_{12} &amp; b_1\\
10266     * a_{21} &amp; a_{22} &amp; b_2\\
10267     * \end{bmatrix}
10268     * \)
10269     *
10270     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10271     * selected robust algorithm.
10272     *
10273     * The computed transformation is then refined further (using only inliers) with the
10274     * Levenberg-Marquardt method to reduce the re-projection error even more.
10275     *
10276     * <b>Note:</b>
10277     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10278     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10279     * correctly only when there are more than 50% of inliers.
10280     *
10281     * SEE: estimateAffinePartial2D, getAffineTransform
10282     */
10283    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) {
10284        return new Mat(estimateAffine2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters));
10285    }
10286
10287    /**
10288     * Computes an optimal affine transformation between two 2D point sets.
10289     *
10290     * It computes
10291     * \(
10292     * \begin{bmatrix}
10293     * x\\
10294     * y\\
10295     * \end{bmatrix}
10296     * =
10297     * \begin{bmatrix}
10298     * a_{11} &amp; a_{12}\\
10299     * a_{21} &amp; a_{22}\\
10300     * \end{bmatrix}
10301     * \begin{bmatrix}
10302     * X\\
10303     * Y\\
10304     * \end{bmatrix}
10305     * +
10306     * \begin{bmatrix}
10307     * b_1\\
10308     * b_2\\
10309     * \end{bmatrix}
10310     * \)
10311     *
10312     * @param from First input 2D point set containing \((X,Y)\).
10313     * @param to Second input 2D point set containing \((x,y)\).
10314     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10315     * @param method Robust method used to compute transformation. The following methods are possible:
10316     * <ul>
10317     *   <li>
10318     *    REF: RANSAC - RANSAC-based robust method
10319     *   </li>
10320     *   <li>
10321     *    REF: LMEDS - Least-Median robust method
10322     * RANSAC is the default method.
10323     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10324     * a point as an inlier. Applies only to RANSAC.
10325     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10326     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10327     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10328     *   </li>
10329     * </ul>
10330     *
10331     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10332     * could not be estimated. The returned matrix has the following form:
10333     * \(
10334     * \begin{bmatrix}
10335     * a_{11} &amp; a_{12} &amp; b_1\\
10336     * a_{21} &amp; a_{22} &amp; b_2\\
10337     * \end{bmatrix}
10338     * \)
10339     *
10340     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10341     * selected robust algorithm.
10342     *
10343     * The computed transformation is then refined further (using only inliers) with the
10344     * Levenberg-Marquardt method to reduce the re-projection error even more.
10345     *
10346     * <b>Note:</b>
10347     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10348     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10349     * correctly only when there are more than 50% of inliers.
10350     *
10351     * SEE: estimateAffinePartial2D, getAffineTransform
10352     */
10353    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) {
10354        return new Mat(estimateAffine2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold));
10355    }
10356
10357    /**
10358     * Computes an optimal affine transformation between two 2D point sets.
10359     *
10360     * It computes
10361     * \(
10362     * \begin{bmatrix}
10363     * x\\
10364     * y\\
10365     * \end{bmatrix}
10366     * =
10367     * \begin{bmatrix}
10368     * a_{11} &amp; a_{12}\\
10369     * a_{21} &amp; a_{22}\\
10370     * \end{bmatrix}
10371     * \begin{bmatrix}
10372     * X\\
10373     * Y\\
10374     * \end{bmatrix}
10375     * +
10376     * \begin{bmatrix}
10377     * b_1\\
10378     * b_2\\
10379     * \end{bmatrix}
10380     * \)
10381     *
10382     * @param from First input 2D point set containing \((X,Y)\).
10383     * @param to Second input 2D point set containing \((x,y)\).
10384     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10385     * @param method Robust method used to compute transformation. The following methods are possible:
10386     * <ul>
10387     *   <li>
10388     *    REF: RANSAC - RANSAC-based robust method
10389     *   </li>
10390     *   <li>
10391     *    REF: LMEDS - Least-Median robust method
10392     * RANSAC is the default method.
10393     * a point as an inlier. Applies only to RANSAC.
10394     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10395     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10396     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10397     *   </li>
10398     * </ul>
10399     *
10400     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10401     * could not be estimated. The returned matrix has the following form:
10402     * \(
10403     * \begin{bmatrix}
10404     * a_{11} &amp; a_{12} &amp; b_1\\
10405     * a_{21} &amp; a_{22} &amp; b_2\\
10406     * \end{bmatrix}
10407     * \)
10408     *
10409     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10410     * selected robust algorithm.
10411     *
10412     * The computed transformation is then refined further (using only inliers) with the
10413     * Levenberg-Marquardt method to reduce the re-projection error even more.
10414     *
10415     * <b>Note:</b>
10416     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10417     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10418     * correctly only when there are more than 50% of inliers.
10419     *
10420     * SEE: estimateAffinePartial2D, getAffineTransform
10421     */
10422    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method) {
10423        return new Mat(estimateAffine2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method));
10424    }
10425
10426    /**
10427     * Computes an optimal affine transformation between two 2D point sets.
10428     *
10429     * It computes
10430     * \(
10431     * \begin{bmatrix}
10432     * x\\
10433     * y\\
10434     * \end{bmatrix}
10435     * =
10436     * \begin{bmatrix}
10437     * a_{11} &amp; a_{12}\\
10438     * a_{21} &amp; a_{22}\\
10439     * \end{bmatrix}
10440     * \begin{bmatrix}
10441     * X\\
10442     * Y\\
10443     * \end{bmatrix}
10444     * +
10445     * \begin{bmatrix}
10446     * b_1\\
10447     * b_2\\
10448     * \end{bmatrix}
10449     * \)
10450     *
10451     * @param from First input 2D point set containing \((X,Y)\).
10452     * @param to Second input 2D point set containing \((x,y)\).
10453     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
10454     * <ul>
10455     *   <li>
10456     *    REF: RANSAC - RANSAC-based robust method
10457     *   </li>
10458     *   <li>
10459     *    REF: LMEDS - Least-Median robust method
10460     * RANSAC is the default method.
10461     * a point as an inlier. Applies only to RANSAC.
10462     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10463     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10464     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10465     *   </li>
10466     * </ul>
10467     *
10468     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10469     * could not be estimated. The returned matrix has the following form:
10470     * \(
10471     * \begin{bmatrix}
10472     * a_{11} &amp; a_{12} &amp; b_1\\
10473     * a_{21} &amp; a_{22} &amp; b_2\\
10474     * \end{bmatrix}
10475     * \)
10476     *
10477     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10478     * selected robust algorithm.
10479     *
10480     * The computed transformation is then refined further (using only inliers) with the
10481     * Levenberg-Marquardt method to reduce the re-projection error even more.
10482     *
10483     * <b>Note:</b>
10484     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10485     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10486     * correctly only when there are more than 50% of inliers.
10487     *
10488     * SEE: estimateAffinePartial2D, getAffineTransform
10489     */
10490    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers) {
10491        return new Mat(estimateAffine2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj));
10492    }
10493
10494    /**
10495     * Computes an optimal affine transformation between two 2D point sets.
10496     *
10497     * It computes
10498     * \(
10499     * \begin{bmatrix}
10500     * x\\
10501     * y\\
10502     * \end{bmatrix}
10503     * =
10504     * \begin{bmatrix}
10505     * a_{11} &amp; a_{12}\\
10506     * a_{21} &amp; a_{22}\\
10507     * \end{bmatrix}
10508     * \begin{bmatrix}
10509     * X\\
10510     * Y\\
10511     * \end{bmatrix}
10512     * +
10513     * \begin{bmatrix}
10514     * b_1\\
10515     * b_2\\
10516     * \end{bmatrix}
10517     * \)
10518     *
10519     * @param from First input 2D point set containing \((X,Y)\).
10520     * @param to Second input 2D point set containing \((x,y)\).
10521     * <ul>
10522     *   <li>
10523     *    REF: RANSAC - RANSAC-based robust method
10524     *   </li>
10525     *   <li>
10526     *    REF: LMEDS - Least-Median robust method
10527     * RANSAC is the default method.
10528     * a point as an inlier. Applies only to RANSAC.
10529     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10530     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10531     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10532     *   </li>
10533     * </ul>
10534     *
10535     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
10536     * could not be estimated. The returned matrix has the following form:
10537     * \(
10538     * \begin{bmatrix}
10539     * a_{11} &amp; a_{12} &amp; b_1\\
10540     * a_{21} &amp; a_{22} &amp; b_2\\
10541     * \end{bmatrix}
10542     * \)
10543     *
10544     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
10545     * selected robust algorithm.
10546     *
10547     * The computed transformation is then refined further (using only inliers) with the
10548     * Levenberg-Marquardt method to reduce the re-projection error even more.
10549     *
10550     * <b>Note:</b>
10551     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
10552     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10553     * correctly only when there are more than 50% of inliers.
10554     *
10555     * SEE: estimateAffinePartial2D, getAffineTransform
10556     */
10557    public static Mat estimateAffine2D(Mat from, Mat to) {
10558        return new Mat(estimateAffine2D_6(from.nativeObj, to.nativeObj));
10559    }
10560
10561
10562    //
10563    // C++:  Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
10564    //
10565
10566    public static Mat estimateAffine2D(Mat pts1, Mat pts2, Mat inliers, UsacParams params) {
10567        return new Mat(estimateAffine2D_7(pts1.nativeObj, pts2.nativeObj, inliers.nativeObj, params.nativeObj));
10568    }
10569
10570
10571    //
10572    // C++:  Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
10573    //
10574
10575    /**
10576     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10577     * two 2D point sets.
10578     *
10579     * @param from First input 2D point set.
10580     * @param to Second input 2D point set.
10581     * @param inliers Output vector indicating which points are inliers.
10582     * @param method Robust method used to compute transformation. The following methods are possible:
10583     * <ul>
10584     *   <li>
10585     *    REF: RANSAC - RANSAC-based robust method
10586     *   </li>
10587     *   <li>
10588     *    REF: LMEDS - Least-Median robust method
10589     * RANSAC is the default method.
10590     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10591     * a point as an inlier. Applies only to RANSAC.
10592     * @param maxIters The maximum number of robust method iterations.
10593     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10594     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10595     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10596     * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
10597     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10598     *   </li>
10599     * </ul>
10600     *
10601     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10602     * empty matrix if transformation could not be estimated.
10603     *
10604     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10605     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10606     * estimation.
10607     *
10608     * The computed transformation is then refined further (using only inliers) with the
10609     * Levenberg-Marquardt method to reduce the re-projection error even more.
10610     *
10611     * Estimated transformation matrix is:
10612     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10613     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10614     * \end{bmatrix} \)
10615     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10616     * translations in \( x, y \) axes respectively.
10617     *
10618     * <b>Note:</b>
10619     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10620     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10621     * correctly only when there are more than 50% of inliers.
10622     *
10623     * SEE: estimateAffine2D, getAffineTransform
10624     */
10625    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) {
10626        return new Mat(estimateAffinePartial2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters));
10627    }
10628
10629    /**
10630     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10631     * two 2D point sets.
10632     *
10633     * @param from First input 2D point set.
10634     * @param to Second input 2D point set.
10635     * @param inliers Output vector indicating which points are inliers.
10636     * @param method Robust method used to compute transformation. The following methods are possible:
10637     * <ul>
10638     *   <li>
10639     *    REF: RANSAC - RANSAC-based robust method
10640     *   </li>
10641     *   <li>
10642     *    REF: LMEDS - Least-Median robust method
10643     * RANSAC is the default method.
10644     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10645     * a point as an inlier. Applies only to RANSAC.
10646     * @param maxIters The maximum number of robust method iterations.
10647     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10648     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10649     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10650     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10651     *   </li>
10652     * </ul>
10653     *
10654     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10655     * empty matrix if transformation could not be estimated.
10656     *
10657     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10658     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10659     * estimation.
10660     *
10661     * The computed transformation is then refined further (using only inliers) with the
10662     * Levenberg-Marquardt method to reduce the re-projection error even more.
10663     *
10664     * Estimated transformation matrix is:
10665     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10666     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10667     * \end{bmatrix} \)
10668     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10669     * translations in \( x, y \) axes respectively.
10670     *
10671     * <b>Note:</b>
10672     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10673     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10674     * correctly only when there are more than 50% of inliers.
10675     *
10676     * SEE: estimateAffine2D, getAffineTransform
10677     */
10678    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) {
10679        return new Mat(estimateAffinePartial2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence));
10680    }
10681
10682    /**
10683     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10684     * two 2D point sets.
10685     *
10686     * @param from First input 2D point set.
10687     * @param to Second input 2D point set.
10688     * @param inliers Output vector indicating which points are inliers.
10689     * @param method Robust method used to compute transformation. The following methods are possible:
10690     * <ul>
10691     *   <li>
10692     *    REF: RANSAC - RANSAC-based robust method
10693     *   </li>
10694     *   <li>
10695     *    REF: LMEDS - Least-Median robust method
10696     * RANSAC is the default method.
10697     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10698     * a point as an inlier. Applies only to RANSAC.
10699     * @param maxIters The maximum number of robust method iterations.
10700     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10701     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10702     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10703     *   </li>
10704     * </ul>
10705     *
10706     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10707     * empty matrix if transformation could not be estimated.
10708     *
10709     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10710     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10711     * estimation.
10712     *
10713     * The computed transformation is then refined further (using only inliers) with the
10714     * Levenberg-Marquardt method to reduce the re-projection error even more.
10715     *
10716     * Estimated transformation matrix is:
10717     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10718     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10719     * \end{bmatrix} \)
10720     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10721     * translations in \( x, y \) axes respectively.
10722     *
10723     * <b>Note:</b>
10724     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10725     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10726     * correctly only when there are more than 50% of inliers.
10727     *
10728     * SEE: estimateAffine2D, getAffineTransform
10729     */
10730    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) {
10731        return new Mat(estimateAffinePartial2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters));
10732    }
10733
10734    /**
10735     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10736     * two 2D point sets.
10737     *
10738     * @param from First input 2D point set.
10739     * @param to Second input 2D point set.
10740     * @param inliers Output vector indicating which points are inliers.
10741     * @param method Robust method used to compute transformation. The following methods are possible:
10742     * <ul>
10743     *   <li>
10744     *    REF: RANSAC - RANSAC-based robust method
10745     *   </li>
10746     *   <li>
10747     *    REF: LMEDS - Least-Median robust method
10748     * RANSAC is the default method.
10749     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10750     * a point as an inlier. Applies only to RANSAC.
10751     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10752     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10753     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10754     *   </li>
10755     * </ul>
10756     *
10757     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10758     * empty matrix if transformation could not be estimated.
10759     *
10760     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10761     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10762     * estimation.
10763     *
10764     * The computed transformation is then refined further (using only inliers) with the
10765     * Levenberg-Marquardt method to reduce the re-projection error even more.
10766     *
10767     * Estimated transformation matrix is:
10768     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10769     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10770     * \end{bmatrix} \)
10771     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10772     * translations in \( x, y \) axes respectively.
10773     *
10774     * <b>Note:</b>
10775     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10776     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10777     * correctly only when there are more than 50% of inliers.
10778     *
10779     * SEE: estimateAffine2D, getAffineTransform
10780     */
10781    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) {
10782        return new Mat(estimateAffinePartial2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold));
10783    }
10784
10785    /**
10786     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10787     * two 2D point sets.
10788     *
10789     * @param from First input 2D point set.
10790     * @param to Second input 2D point set.
10791     * @param inliers Output vector indicating which points are inliers.
10792     * @param method Robust method used to compute transformation. The following methods are possible:
10793     * <ul>
10794     *   <li>
10795     *    REF: RANSAC - RANSAC-based robust method
10796     *   </li>
10797     *   <li>
10798     *    REF: LMEDS - Least-Median robust method
10799     * RANSAC is the default method.
10800     * a point as an inlier. Applies only to RANSAC.
10801     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10802     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10803     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10804     *   </li>
10805     * </ul>
10806     *
10807     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10808     * empty matrix if transformation could not be estimated.
10809     *
10810     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10811     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10812     * estimation.
10813     *
10814     * The computed transformation is then refined further (using only inliers) with the
10815     * Levenberg-Marquardt method to reduce the re-projection error even more.
10816     *
10817     * Estimated transformation matrix is:
10818     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10819     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10820     * \end{bmatrix} \)
10821     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10822     * translations in \( x, y \) axes respectively.
10823     *
10824     * <b>Note:</b>
10825     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10826     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10827     * correctly only when there are more than 50% of inliers.
10828     *
10829     * SEE: estimateAffine2D, getAffineTransform
10830     */
10831    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method) {
10832        return new Mat(estimateAffinePartial2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method));
10833    }
10834
10835    /**
10836     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10837     * two 2D point sets.
10838     *
10839     * @param from First input 2D point set.
10840     * @param to Second input 2D point set.
10841     * @param inliers Output vector indicating which points are inliers.
10842     * <ul>
10843     *   <li>
10844     *    REF: RANSAC - RANSAC-based robust method
10845     *   </li>
10846     *   <li>
10847     *    REF: LMEDS - Least-Median robust method
10848     * RANSAC is the default method.
10849     * a point as an inlier. Applies only to RANSAC.
10850     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10851     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10852     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10853     *   </li>
10854     * </ul>
10855     *
10856     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10857     * empty matrix if transformation could not be estimated.
10858     *
10859     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10860     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10861     * estimation.
10862     *
10863     * The computed transformation is then refined further (using only inliers) with the
10864     * Levenberg-Marquardt method to reduce the re-projection error even more.
10865     *
10866     * Estimated transformation matrix is:
10867     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10868     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10869     * \end{bmatrix} \)
10870     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10871     * translations in \( x, y \) axes respectively.
10872     *
10873     * <b>Note:</b>
10874     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10875     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10876     * correctly only when there are more than 50% of inliers.
10877     *
10878     * SEE: estimateAffine2D, getAffineTransform
10879     */
10880    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers) {
10881        return new Mat(estimateAffinePartial2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj));
10882    }
10883
10884    /**
10885     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10886     * two 2D point sets.
10887     *
10888     * @param from First input 2D point set.
10889     * @param to Second input 2D point set.
10890     * <ul>
10891     *   <li>
10892     *    REF: RANSAC - RANSAC-based robust method
10893     *   </li>
10894     *   <li>
10895     *    REF: LMEDS - Least-Median robust method
10896     * RANSAC is the default method.
10897     * a point as an inlier. Applies only to RANSAC.
10898     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10899     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10900     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10901     *   </li>
10902     * </ul>
10903     *
10904     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10905     * empty matrix if transformation could not be estimated.
10906     *
10907     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10908     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10909     * estimation.
10910     *
10911     * The computed transformation is then refined further (using only inliers) with the
10912     * Levenberg-Marquardt method to reduce the re-projection error even more.
10913     *
10914     * Estimated transformation matrix is:
10915     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10916     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10917     * \end{bmatrix} \)
10918     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10919     * translations in \( x, y \) axes respectively.
10920     *
10921     * <b>Note:</b>
10922     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10923     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10924     * correctly only when there are more than 50% of inliers.
10925     *
10926     * SEE: estimateAffine2D, getAffineTransform
10927     */
10928    public static Mat estimateAffinePartial2D(Mat from, Mat to) {
10929        return new Mat(estimateAffinePartial2D_6(from.nativeObj, to.nativeObj));
10930    }
10931
10932
10933    //
10934    // C++:  int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
10935    //
10936
10937    /**
10938     * Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
10939     *
10940     * @param H The input homography matrix between two images.
10941     * @param K The input camera intrinsic matrix.
10942     * @param rotations Array of rotation matrices.
10943     * @param translations Array of translation matrices.
10944     * @param normals Array of plane normal matrices.
10945     *
10946     * This function extracts relative camera motion between two views of a planar object and returns up to
10947     * four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
10948     * the homography matrix H is described in detail in CITE: Malis.
10949     *
10950     * If the homography H, induced by the plane, gives the constraint
10951     * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) on the source image points
10952     * \(p_i\) and the destination image points \(p'_i\), then the tuple of rotations[k] and
10953     * translations[k] is a change of basis from the source camera's coordinate system to the destination
10954     * camera's coordinate system. However, by decomposing H, one can only get the translation normalized
10955     * by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
10956     *
10957     * If point correspondences are available, at least two solutions may further be invalidated, by
10958     * applying positive depth constraint, i.e. all points must be in front of the camera.
10959     * @return automatically generated
10960     */
10961    public static int decomposeHomographyMat(Mat H, Mat K, List<Mat> rotations, List<Mat> translations, List<Mat> normals) {
10962        Mat rotations_mat = new Mat();
10963        Mat translations_mat = new Mat();
10964        Mat normals_mat = new Mat();
10965        int retVal = decomposeHomographyMat_0(H.nativeObj, K.nativeObj, rotations_mat.nativeObj, translations_mat.nativeObj, normals_mat.nativeObj);
10966        Converters.Mat_to_vector_Mat(rotations_mat, rotations);
10967        rotations_mat.release();
10968        Converters.Mat_to_vector_Mat(translations_mat, translations);
10969        translations_mat.release();
10970        Converters.Mat_to_vector_Mat(normals_mat, normals);
10971        normals_mat.release();
10972        return retVal;
10973    }
10974
10975
10976    //
10977    // C++:  void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
10978    //
10979
10980    /**
10981     * Filters homography decompositions based on additional information.
10982     *
10983     * @param rotations Vector of rotation matrices.
10984     * @param normals Vector of plane normal matrices.
10985     * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
10986     * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
10987     * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
10988     * @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the findHomography function
10989     *
10990     * This function is intended to filter the output of the decomposeHomographyMat based on additional
10991     * information as described in CITE: Malis . The summary of the method: the decomposeHomographyMat function
10992     * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
10993     * sets of points visible in the camera frame before and after the homography transformation is applied,
10994     * we can determine which are the true potential solutions and which are the opposites by verifying which
10995     * homographies are consistent with all visible reference points being in front of the camera. The inputs
10996     * are left unchanged; the filtered solution set is returned as indices into the existing one.
10997     */
10998    public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions, Mat pointsMask) {
10999        Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations);
11000        Mat normals_mat = Converters.vector_Mat_to_Mat(normals);
11001        filterHomographyDecompByVisibleRefpoints_0(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj, pointsMask.nativeObj);
11002    }
11003
11004    /**
11005     * Filters homography decompositions based on additional information.
11006     *
11007     * @param rotations Vector of rotation matrices.
11008     * @param normals Vector of plane normal matrices.
11009     * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
11010     * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
11011     * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
11012     *
11013     * This function is intended to filter the output of the decomposeHomographyMat based on additional
11014     * information as described in CITE: Malis . The summary of the method: the decomposeHomographyMat function
11015     * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
11016     * sets of points visible in the camera frame before and after the homography transformation is applied,
11017     * we can determine which are the true potential solutions and which are the opposites by verifying which
11018     * homographies are consistent with all visible reference points being in front of the camera. The inputs
11019     * are left unchanged; the filtered solution set is returned as indices into the existing one.
11020     */
11021    public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions) {
11022        Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations);
11023        Mat normals_mat = Converters.vector_Mat_to_Mat(normals);
11024        filterHomographyDecompByVisibleRefpoints_1(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj);
11025    }
11026
11027
11028    //
11029    // C++:  void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
11030    //
11031
11032    /**
11033     * Transforms an image to compensate for lens distortion.
11034     *
11035     * The function transforms an image to compensate radial and tangential lens distortion.
11036     *
11037     * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
11038     * (with bilinear interpolation). See the former function for details of the transformation being
11039     * performed.
11040     *
11041     * Those pixels in the destination image, for which there is no correspondent pixels in the source
11042     * image, are filled with zeros (black color).
11043     *
11044     * A particular subset of the source image that will be visible in the corrected image can be regulated
11045     * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
11046     * newCameraMatrix depending on your requirements.
11047     *
11048     * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
11049     * the resolution of images is different from the resolution used at the calibration stage, \(f_x,
11050     * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
11051     * the same.
11052     *
11053     * @param src Input (distorted) image.
11054     * @param dst Output (corrected) image that has the same size and type as src .
11055     * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11056     * @param distCoeffs Input vector of distortion coefficients
11057     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11058     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11059     * @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
11060     * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
11061     */
11062    public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) {
11063        undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj);
11064    }
11065
11066    /**
11067     * Transforms an image to compensate for lens distortion.
11068     *
11069     * The function transforms an image to compensate radial and tangential lens distortion.
11070     *
11071     * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
11072     * (with bilinear interpolation). See the former function for details of the transformation being
11073     * performed.
11074     *
11075     * Those pixels in the destination image, for which there is no correspondent pixels in the source
11076     * image, are filled with zeros (black color).
11077     *
11078     * A particular subset of the source image that will be visible in the corrected image can be regulated
11079     * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
11080     * newCameraMatrix depending on your requirements.
11081     *
11082     * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
11083     * the resolution of images is different from the resolution used at the calibration stage, \(f_x,
11084     * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
11085     * the same.
11086     *
11087     * @param src Input (distorted) image.
11088     * @param dst Output (corrected) image that has the same size and type as src .
11089     * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11090     * @param distCoeffs Input vector of distortion coefficients
11091     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11092     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11093     * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
11094     */
11095    public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) {
11096        undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj);
11097    }
11098
11099
11100    //
11101    // C++:  void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
11102    //
11103
11104    /**
11105     * Computes the undistortion and rectification transformation map.
11106     *
11107     * The function computes the joint undistortion and rectification transformation and represents the
11108     * result in the form of maps for remap. The undistorted image looks like original, as if it is
11109     * captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
11110     * monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
11111     * #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
11112     * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
11113     *
11114     * Also, this new camera is oriented differently in the coordinate space, according to R. That, for
11115     * example, helps to align two heads of a stereo camera so that the epipolar lines on both images
11116     * become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
11117     *
11118     * The function actually builds the maps for the inverse mapping algorithm that is used by remap. That
11119     * is, for each pixel \((u, v)\) in the destination (corrected and rectified) image, the function
11120     * computes the corresponding coordinates in the source image (that is, in the original image from
11121     * camera). The following process is applied:
11122     * \(
11123     * \begin{array}{l}
11124     * x  \leftarrow (u - {c'}_x)/{f'}_x  \\
11125     * y  \leftarrow (v - {c'}_y)/{f'}_y  \\
11126     * {[X\,Y\,W]} ^T  \leftarrow R^{-1}*[x \, y \, 1]^T  \\
11127     * x'  \leftarrow X/W  \\
11128     * y'  \leftarrow Y/W  \\
11129     * r^2  \leftarrow x'^2 + y'^2 \\
11130     * x''  \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
11131     * + 2p_1 x' y' + p_2(r^2 + 2 x'^2)  + s_1 r^2 + s_2 r^4\\
11132     * y''  \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
11133     * + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
11134     * s\vecthree{x'''}{y'''}{1} =
11135     * \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
11136     * {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
11137     * {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
11138     * map_x(u,v)  \leftarrow x''' f_x + c_x  \\
11139     * map_y(u,v)  \leftarrow y''' f_y + c_y
11140     * \end{array}
11141     * \)
11142     * where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11143     * are the distortion coefficients.
11144     *
11145     * In case of a stereo camera, this function is called twice: once for each camera head, after
11146     * stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
11147     * was not calibrated, it is still possible to compute the rectification transformations directly from
11148     * the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
11149     * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
11150     * space. R can be computed from H as
11151     * \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\)
11152     * where cameraMatrix can be chosen arbitrarily.
11153     *
11154     * @param cameraMatrix Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11155     * @param distCoeffs Input vector of distortion coefficients
11156     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11157     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11158     * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
11159     * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
11160     * is assumed. In cvInitUndistortMap R assumed to be an identity matrix.
11161     * @param newCameraMatrix New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).
11162     * @param size Undistorted image size.
11163     * @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
11164     * @param map1 The first output map.
11165     * @param map2 The second output map.
11166     */
11167    public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) {
11168        initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
11169    }
11170
11171
11172    //
11173    // C++:  Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
11174    //
11175
11176    /**
11177     * Returns the default new camera matrix.
11178     *
11179     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
11180     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
11181     *
11182     * In the latter case, the new camera matrix will be:
11183     *
11184     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
11185     *
11186     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
11187     *
11188     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
11189     * move the principal point. However, when you work with stereo, it is important to move the principal
11190     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
11191     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
11192     * each view where the principal points are located at the center.
11193     *
11194     * @param cameraMatrix Input camera matrix.
11195     * @param imgsize Camera view image size in pixels.
11196     * @param centerPrincipalPoint Location of the principal point in the new camera matrix. The
11197     * parameter indicates whether this location should be at the image center or not.
11198     * @return automatically generated
11199     */
11200    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) {
11201        return new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint));
11202    }
11203
11204    /**
11205     * Returns the default new camera matrix.
11206     *
11207     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
11208     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
11209     *
11210     * In the latter case, the new camera matrix will be:
11211     *
11212     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
11213     *
11214     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
11215     *
11216     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
11217     * move the principal point. However, when you work with stereo, it is important to move the principal
11218     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
11219     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
11220     * each view where the principal points are located at the center.
11221     *
11222     * @param cameraMatrix Input camera matrix.
11223     * @param imgsize Camera view image size in pixels.
11224     * parameter indicates whether this location should be at the image center or not.
11225     * @return automatically generated
11226     */
11227    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize) {
11228        return new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj, imgsize.width, imgsize.height));
11229    }
11230
11231    /**
11232     * Returns the default new camera matrix.
11233     *
11234     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
11235     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
11236     *
11237     * In the latter case, the new camera matrix will be:
11238     *
11239     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
11240     *
11241     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
11242     *
11243     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
11244     * move the principal point. However, when you work with stereo, it is important to move the principal
11245     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
11246     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
11247     * each view where the principal points are located at the center.
11248     *
11249     * @param cameraMatrix Input camera matrix.
11250     * parameter indicates whether this location should be at the image center or not.
11251     * @return automatically generated
11252     */
11253    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) {
11254        return new Mat(getDefaultNewCameraMatrix_2(cameraMatrix.nativeObj));
11255    }
11256
11257
11258    //
11259    // C++:  void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
11260    //
11261
11262    /**
11263     * Computes the ideal point coordinates from the observed point coordinates.
11264     *
11265     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
11266     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
11267     * to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
11268     * planar object, it does, up to a translation vector, if the proper R is specified.
11269     *
11270     * For each observed point coordinate \((u, v)\) the function computes:
11271     * \(
11272     * \begin{array}{l}
11273     * x^{"}  \leftarrow (u - c_x)/f_x  \\
11274     * y^{"}  \leftarrow (v - c_y)/f_y  \\
11275     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
11276     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
11277     * x  \leftarrow X/W  \\
11278     * y  \leftarrow Y/W  \\
11279     * \text{only performed if P is specified:} \\
11280     * u'  \leftarrow x {f'}_x + {c'}_x  \\
11281     * v'  \leftarrow y {f'}_y + {c'}_y
11282     * \end{array}
11283     * \)
11284     *
11285     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
11286     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
11287     * coordinates do not depend on the camera matrix).
11288     *
11289     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
11290     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
11291     * vector&lt;Point2f&gt; ).
11292     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
11293     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
11294     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11295     * @param distCoeffs Input vector of distortion coefficients
11296     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11297     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11298     * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
11299     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
11300     * @param P New camera matrix (3x3) or new projection matrix (3x4) \(\begin{bmatrix} {f'}_x &amp; 0 &amp; {c'}_x &amp; t_x \\ 0 &amp; {f'}_y &amp; {c'}_y &amp; t_y \\ 0 &amp; 0 &amp; 1 &amp; t_z \end{bmatrix}\). P1 or P2 computed by
11301     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
11302     */
11303    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P) {
11304        Mat src_mat = src;
11305        Mat dst_mat = dst;
11306        undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj);
11307    }
11308
11309    /**
11310     * Computes the ideal point coordinates from the observed point coordinates.
11311     *
11312     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
11313     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
11314     * to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
11315     * planar object, it does, up to a translation vector, if the proper R is specified.
11316     *
11317     * For each observed point coordinate \((u, v)\) the function computes:
11318     * \(
11319     * \begin{array}{l}
11320     * x^{"}  \leftarrow (u - c_x)/f_x  \\
11321     * y^{"}  \leftarrow (v - c_y)/f_y  \\
11322     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
11323     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
11324     * x  \leftarrow X/W  \\
11325     * y  \leftarrow Y/W  \\
11326     * \text{only performed if P is specified:} \\
11327     * u'  \leftarrow x {f'}_x + {c'}_x  \\
11328     * v'  \leftarrow y {f'}_y + {c'}_y
11329     * \end{array}
11330     * \)
11331     *
11332     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
11333     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
11334     * coordinates do not depend on the camera matrix).
11335     *
11336     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
11337     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
11338     * vector&lt;Point2f&gt; ).
11339     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
11340     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
11341     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11342     * @param distCoeffs Input vector of distortion coefficients
11343     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11344     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11345     * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
11346     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
11347     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
11348     */
11349    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R) {
11350        Mat src_mat = src;
11351        Mat dst_mat = dst;
11352        undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj);
11353    }
11354
11355    /**
11356     * Computes the ideal point coordinates from the observed point coordinates.
11357     *
11358     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
11359     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
11360     * to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
11361     * planar object, it does, up to a translation vector, if the proper R is specified.
11362     *
11363     * For each observed point coordinate \((u, v)\) the function computes:
11364     * \(
11365     * \begin{array}{l}
11366     * x^{"}  \leftarrow (u - c_x)/f_x  \\
11367     * y^{"}  \leftarrow (v - c_y)/f_y  \\
11368     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
11369     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
11370     * x  \leftarrow X/W  \\
11371     * y  \leftarrow Y/W  \\
11372     * \text{only performed if P is specified:} \\
11373     * u'  \leftarrow x {f'}_x + {c'}_x  \\
11374     * v'  \leftarrow y {f'}_y + {c'}_y
11375     * \end{array}
11376     * \)
11377     *
11378     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
11379     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
11380     * coordinates do not depend on the camera matrix).
11381     *
11382     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
11383     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
11384     * vector&lt;Point2f&gt; ).
11385     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
11386     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
11387     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
11388     * @param distCoeffs Input vector of distortion coefficients
11389     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
11390     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
11391     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
11392     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
11393     */
11394    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) {
11395        Mat src_mat = src;
11396        Mat dst_mat = dst;
11397        undistortPoints_2(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj);
11398    }
11399
11400
11401    //
11402    // C++:  void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
11403    //
11404
11405    /**
11406     *
11407     *     <b>Note:</b> Default version of #undistortPoints does 5 iterations to compute undistorted points.
11408     * @param src automatically generated
11409     * @param dst automatically generated
11410     * @param cameraMatrix automatically generated
11411     * @param distCoeffs automatically generated
11412     * @param R automatically generated
11413     * @param P automatically generated
11414     * @param criteria automatically generated
11415     */
11416    public static void undistortPointsIter(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria) {
11417        undistortPointsIter_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
11418    }
11419
11420
11421    //
11422    // C++:  void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
11423    //
11424
11425    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha, Mat jacobian) {
11426        fisheye_projectPoints_0(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha, jacobian.nativeObj);
11427    }
11428
11429    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha) {
11430        fisheye_projectPoints_1(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha);
11431    }
11432
11433    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D) {
11434        fisheye_projectPoints_2(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj);
11435    }
11436
11437
11438    //
11439    // C++:  void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
11440    //
11441
11442    /**
11443     * Distorts 2D points using fisheye model.
11444     *
11445     *     @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is
11446     *     the number of points in the view.
11447     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11448     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11449     *     @param alpha The skew coefficient.
11450     *     @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11451     *
11452     *     Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
11453     *     This means if you want to transform back points undistorted with undistortPoints() you have to
11454     *     multiply them with \(P^{-1}\).
11455     */
11456    public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D, double alpha) {
11457        fisheye_distortPoints_0(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj, alpha);
11458    }
11459
11460    /**
11461     * Distorts 2D points using fisheye model.
11462     *
11463     *     @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is
11464     *     the number of points in the view.
11465     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11466     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11467     *     @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11468     *
11469     *     Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
11470     *     This means if you want to transform back points undistorted with undistortPoints() you have to
11471     *     multiply them with \(P^{-1}\).
11472     */
11473    public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D) {
11474        fisheye_distortPoints_1(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj);
11475    }
11476
11477
11478    //
11479    // C++:  void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat())
11480    //
11481
11482    /**
11483     * Undistorts 2D points using fisheye model
11484     *
11485     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11486     *     number of points in the view.
11487     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11488     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11489     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11490     *     1-channel or 1x1 3-channel
11491     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11492     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11493     */
11494    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P) {
11495        fisheye_undistortPoints_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj);
11496    }
11497
11498    /**
11499     * Undistorts 2D points using fisheye model
11500     *
11501     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11502     *     number of points in the view.
11503     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11504     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11505     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11506     *     1-channel or 1x1 3-channel
11507     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11508     */
11509    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R) {
11510        fisheye_undistortPoints_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj);
11511    }
11512
11513    /**
11514     * Undistorts 2D points using fisheye model
11515     *
11516     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11517     *     number of points in the view.
11518     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11519     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11520     *     1-channel or 1x1 3-channel
11521     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11522     */
11523    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D) {
11524        fisheye_undistortPoints_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj);
11525    }
11526
11527
11528    //
11529    // C++:  void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
11530    //
11531
11532    /**
11533     * Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero
11534     *     distortion is used, if R or P is empty identity matrixes are used.
11535     *
11536     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11537     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11538     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11539     *     1-channel or 1x1 3-channel
11540     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11541     *     @param size Undistorted image size.
11542     *     @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
11543     *     for details.
11544     *     @param map1 The first output map.
11545     *     @param map2 The second output map.
11546     */
11547    public static void fisheye_initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat map1, Mat map2) {
11548        fisheye_initUndistortRectifyMap_0(K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
11549    }
11550
11551
11552    //
11553    // C++:  void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
11554    //
11555
11556    /**
11557     * Transforms an image to compensate for fisheye lens distortion.
11558     *
11559     *     @param distorted image with fisheye lens distortion.
11560     *     @param undistorted Output image with compensated fisheye lens distortion.
11561     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11562     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11563     *     @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
11564     *     may additionally scale and shift the result by using a different matrix.
11565     *     @param new_size the new size
11566     *
11567     *     The function transforms an image to compensate radial and tangential lens distortion.
11568     *
11569     *     The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
11570     *     (with bilinear interpolation). See the former function for details of the transformation being
11571     *     performed.
11572     *
11573     *     See below the results of undistortImage.
11574     * <ul>
11575     *   <li>
11576     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11577     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11578     *   <ul>
11579     *     <li>
11580     *            b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11581     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11582     *     </li>
11583     *     <li>
11584     *            c\) original image was captured with fisheye lens
11585     *     </li>
11586     *   </ul>
11587     *
11588     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11589     *     of image, we can notice that on image a) these points are distorted.
11590     *   </li>
11591     * </ul>
11592     *
11593     *     ![image](pics/fisheye_undistorted.jpg)
11594     */
11595    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew, Size new_size) {
11596        fisheye_undistortImage_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj, new_size.width, new_size.height);
11597    }
11598
11599    /**
11600     * Transforms an image to compensate for fisheye lens distortion.
11601     *
11602     *     @param distorted image with fisheye lens distortion.
11603     *     @param undistorted Output image with compensated fisheye lens distortion.
11604     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11605     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11606     *     @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
11607     *     may additionally scale and shift the result by using a different matrix.
11608     *
11609     *     The function transforms an image to compensate radial and tangential lens distortion.
11610     *
11611     *     The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
11612     *     (with bilinear interpolation). See the former function for details of the transformation being
11613     *     performed.
11614     *
11615     *     See below the results of undistortImage.
11616     * <ul>
11617     *   <li>
11618     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11619     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11620     *   <ul>
11621     *     <li>
11622     *            b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11623     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11624     *     </li>
11625     *     <li>
11626     *            c\) original image was captured with fisheye lens
11627     *     </li>
11628     *   </ul>
11629     *
11630     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11631     *     of image, we can notice that on image a) these points are distorted.
11632     *   </li>
11633     * </ul>
11634     *
11635     *     ![image](pics/fisheye_undistorted.jpg)
11636     */
11637    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew) {
11638        fisheye_undistortImage_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj);
11639    }
11640
11641    /**
11642     * Transforms an image to compensate for fisheye lens distortion.
11643     *
11644     *     @param distorted image with fisheye lens distortion.
11645     *     @param undistorted Output image with compensated fisheye lens distortion.
11646     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11647     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11648     *     may additionally scale and shift the result by using a different matrix.
11649     *
11650     *     The function transforms an image to compensate radial and tangential lens distortion.
11651     *
11652     *     The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap
11653     *     (with bilinear interpolation). See the former function for details of the transformation being
11654     *     performed.
11655     *
11656     *     See below the results of undistortImage.
11657     * <ul>
11658     *   <li>
11659     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11660     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11661     *   <ul>
11662     *     <li>
11663     *            b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11664     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11665     *     </li>
11666     *     <li>
11667     *            c\) original image was captured with fisheye lens
11668     *     </li>
11669     *   </ul>
11670     *
11671     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11672     *     of image, we can notice that on image a) these points are distorted.
11673     *   </li>
11674     * </ul>
11675     *
11676     *     ![image](pics/fisheye_undistorted.jpg)
11677     */
11678    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D) {
11679        fisheye_undistortImage_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj);
11680    }
11681
11682
11683    //
11684    // C++:  void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
11685    //
11686
11687    /**
11688     * Estimates new camera intrinsic matrix for undistortion or rectification.
11689     *
11690     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11691     *     @param image_size Size of the image
11692     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11693     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11694     *     1-channel or 1x1 3-channel
11695     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11696     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11697     *     length. Balance is in range of [0, 1].
11698     *     @param new_size the new size
11699     *     @param fov_scale Divisor for new focal length.
11700     */
11701    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size, double fov_scale) {
11702        fisheye_estimateNewCameraMatrixForUndistortRectify_0(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height, fov_scale);
11703    }
11704
11705    /**
11706     * Estimates new camera intrinsic matrix for undistortion or rectification.
11707     *
11708     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11709     *     @param image_size Size of the image
11710     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11711     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11712     *     1-channel or 1x1 3-channel
11713     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11714     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11715     *     length. Balance is in range of [0, 1].
11716     *     @param new_size the new size
11717     */
11718    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size) {
11719        fisheye_estimateNewCameraMatrixForUndistortRectify_1(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height);
11720    }
11721
11722    /**
11723     * Estimates new camera intrinsic matrix for undistortion or rectification.
11724     *
11725     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11726     *     @param image_size Size of the image
11727     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11728     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11729     *     1-channel or 1x1 3-channel
11730     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11731     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11732     *     length. Balance is in range of [0, 1].
11733     */
11734    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance) {
11735        fisheye_estimateNewCameraMatrixForUndistortRectify_2(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance);
11736    }
11737
11738    /**
11739     * Estimates new camera intrinsic matrix for undistortion or rectification.
11740     *
11741     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11742     *     @param image_size Size of the image
11743     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11744     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11745     *     1-channel or 1x1 3-channel
11746     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11747     *     length. Balance is in range of [0, 1].
11748     */
11749    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P) {
11750        fisheye_estimateNewCameraMatrixForUndistortRectify_3(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj);
11751    }
11752
11753
11754    //
11755    // C++:  double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
11756    //
11757
11758    /**
11759     * Performs camera calibaration
11760     *
11761     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11762     *     coordinate space.
11763     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11764     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11765     *     objectPoints[i].size() for each i.
11766     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11767     *     @param K Output 3x3 floating-point camera intrinsic matrix
11768     *     \(\cameramatrix{A}\) . If
11769     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11770     *     initialized before calling the function.
11771     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11772     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11773     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11774     *     the next output parameter description) brings the calibration pattern from the model coordinate
11775     *     space (in which object points are specified) to the world coordinate space, that is, a real
11776     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11777     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11778     *     @param flags Different flags that may be zero or a combination of the following values:
11779     * <ul>
11780     *   <li>
11781     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11782     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11783     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11784     *   </li>
11785     *   <li>
11786     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11787     *     of intrinsic optimization.
11788     *   </li>
11789     *   <li>
11790     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11791     *   </li>
11792     *   <li>
11793     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11794     *   </li>
11795     *   <li>
11796     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11797     *     are set to zeros and stay zero.
11798     *   </li>
11799     *   <li>
11800     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11801     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11802     *   </li>
11803     *   <li>
11804     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11805     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11806     *     @param criteria Termination criteria for the iterative optimization algorithm.
11807     *   </li>
11808     * </ul>
11809     * @return automatically generated
11810     */
11811    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) {
11812        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11813        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11814        Mat rvecs_mat = new Mat();
11815        Mat tvecs_mat = new Mat();
11816        double retVal = fisheye_calibrate_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
11817        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11818        rvecs_mat.release();
11819        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11820        tvecs_mat.release();
11821        return retVal;
11822    }
11823
11824    /**
11825     * Performs camera calibaration
11826     *
11827     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11828     *     coordinate space.
11829     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11830     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11831     *     objectPoints[i].size() for each i.
11832     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11833     *     @param K Output 3x3 floating-point camera intrinsic matrix
11834     *     \(\cameramatrix{A}\) . If
11835     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11836     *     initialized before calling the function.
11837     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11838     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11839     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11840     *     the next output parameter description) brings the calibration pattern from the model coordinate
11841     *     space (in which object points are specified) to the world coordinate space, that is, a real
11842     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11843     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11844     *     @param flags Different flags that may be zero or a combination of the following values:
11845     * <ul>
11846     *   <li>
11847     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11848     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11849     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11850     *   </li>
11851     *   <li>
11852     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11853     *     of intrinsic optimization.
11854     *   </li>
11855     *   <li>
11856     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11857     *   </li>
11858     *   <li>
11859     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11860     *   </li>
11861     *   <li>
11862     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11863     *     are set to zeros and stay zero.
11864     *   </li>
11865     *   <li>
11866     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11867     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11868     *   </li>
11869     *   <li>
11870     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11871     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11872     *   </li>
11873     * </ul>
11874     * @return automatically generated
11875     */
11876    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags) {
11877        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11878        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11879        Mat rvecs_mat = new Mat();
11880        Mat tvecs_mat = new Mat();
11881        double retVal = fisheye_calibrate_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
11882        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11883        rvecs_mat.release();
11884        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11885        tvecs_mat.release();
11886        return retVal;
11887    }
11888
11889    /**
11890     * Performs camera calibaration
11891     *
11892     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11893     *     coordinate space.
11894     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11895     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11896     *     objectPoints[i].size() for each i.
11897     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11898     *     @param K Output 3x3 floating-point camera intrinsic matrix
11899     *     \(\cameramatrix{A}\) . If
11900     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11901     *     initialized before calling the function.
11902     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11903     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11904     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11905     *     the next output parameter description) brings the calibration pattern from the model coordinate
11906     *     space (in which object points are specified) to the world coordinate space, that is, a real
11907     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11908     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11909     * <ul>
11910     *   <li>
11911     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11912     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11913     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11914     *   </li>
11915     *   <li>
11916     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11917     *     of intrinsic optimization.
11918     *   </li>
11919     *   <li>
11920     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11921     *   </li>
11922     *   <li>
11923     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11924     *   </li>
11925     *   <li>
11926     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11927     *     are set to zeros and stay zero.
11928     *   </li>
11929     *   <li>
11930     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11931     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11932     *   </li>
11933     *   <li>
11934     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11935     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11936     *   </li>
11937     * </ul>
11938     * @return automatically generated
11939     */
11940    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs) {
11941        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11942        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11943        Mat rvecs_mat = new Mat();
11944        Mat tvecs_mat = new Mat();
11945        double retVal = fisheye_calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
11946        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11947        rvecs_mat.release();
11948        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11949        tvecs_mat.release();
11950        return retVal;
11951    }
11952
11953
11954    //
11955    // C++:  void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
11956    //
11957
11958    /**
11959     * Stereo rectification for fisheye camera model
11960     *
11961     *     @param K1 First camera intrinsic matrix.
11962     *     @param D1 First camera distortion parameters.
11963     *     @param K2 Second camera intrinsic matrix.
11964     *     @param D2 Second camera distortion parameters.
11965     *     @param imageSize Size of the image used for stereo calibration.
11966     *     @param R Rotation matrix between the coordinate systems of the first and the second
11967     *     cameras.
11968     *     @param tvec Translation vector between coordinate systems of the cameras.
11969     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
11970     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
11971     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
11972     *     camera.
11973     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
11974     *     camera.
11975     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
11976     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
11977     *     the function makes the principal points of each camera have the same pixel coordinates in the
11978     *     rectified views. And if the flag is not set, the function may still shift the images in the
11979     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
11980     *     useful image area.
11981     *     @param newImageSize New image resolution after rectification. The same size should be passed to
11982     *     initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
11983     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
11984     *     preserve details in the original image, especially when there is a big radial distortion.
11985     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11986     *     length. Balance is in range of [0, 1].
11987     *     @param fov_scale Divisor for new focal length.
11988     */
11989    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance, double fov_scale) {
11990        fisheye_stereoRectify_0(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance, fov_scale);
11991    }
11992
11993    /**
11994     * Stereo rectification for fisheye camera model
11995     *
11996     *     @param K1 First camera intrinsic matrix.
11997     *     @param D1 First camera distortion parameters.
11998     *     @param K2 Second camera intrinsic matrix.
11999     *     @param D2 Second camera distortion parameters.
12000     *     @param imageSize Size of the image used for stereo calibration.
12001     *     @param R Rotation matrix between the coordinate systems of the first and the second
12002     *     cameras.
12003     *     @param tvec Translation vector between coordinate systems of the cameras.
12004     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
12005     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
12006     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
12007     *     camera.
12008     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
12009     *     camera.
12010     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
12011     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
12012     *     the function makes the principal points of each camera have the same pixel coordinates in the
12013     *     rectified views. And if the flag is not set, the function may still shift the images in the
12014     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
12015     *     useful image area.
12016     *     @param newImageSize New image resolution after rectification. The same size should be passed to
12017     *     initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
12018     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
12019     *     preserve details in the original image, especially when there is a big radial distortion.
12020     *     @param balance Sets the new focal length in range between the min focal length and the max focal
12021     *     length. Balance is in range of [0, 1].
12022     */
12023    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance) {
12024        fisheye_stereoRectify_1(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance);
12025    }
12026
12027    /**
12028     * Stereo rectification for fisheye camera model
12029     *
12030     *     @param K1 First camera intrinsic matrix.
12031     *     @param D1 First camera distortion parameters.
12032     *     @param K2 Second camera intrinsic matrix.
12033     *     @param D2 Second camera distortion parameters.
12034     *     @param imageSize Size of the image used for stereo calibration.
12035     *     @param R Rotation matrix between the coordinate systems of the first and the second
12036     *     cameras.
12037     *     @param tvec Translation vector between coordinate systems of the cameras.
12038     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
12039     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
12040     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
12041     *     camera.
12042     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
12043     *     camera.
12044     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
12045     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
12046     *     the function makes the principal points of each camera have the same pixel coordinates in the
12047     *     rectified views. And if the flag is not set, the function may still shift the images in the
12048     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
12049     *     useful image area.
12050     *     @param newImageSize New image resolution after rectification. The same size should be passed to
12051     *     initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
12052     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
12053     *     preserve details in the original image, especially when there is a big radial distortion.
12054     *     length. Balance is in range of [0, 1].
12055     */
12056    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize) {
12057        fisheye_stereoRectify_2(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height);
12058    }
12059
12060    /**
12061     * Stereo rectification for fisheye camera model
12062     *
12063     *     @param K1 First camera intrinsic matrix.
12064     *     @param D1 First camera distortion parameters.
12065     *     @param K2 Second camera intrinsic matrix.
12066     *     @param D2 Second camera distortion parameters.
12067     *     @param imageSize Size of the image used for stereo calibration.
12068     *     @param R Rotation matrix between the coordinate systems of the first and the second
12069     *     cameras.
12070     *     @param tvec Translation vector between coordinate systems of the cameras.
12071     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
12072     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
12073     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
12074     *     camera.
12075     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
12076     *     camera.
12077     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ).
12078     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
12079     *     the function makes the principal points of each camera have the same pixel coordinates in the
12080     *     rectified views. And if the flag is not set, the function may still shift the images in the
12081     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
12082     *     useful image area.
12083     *     initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
12084     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
12085     *     preserve details in the original image, especially when there is a big radial distortion.
12086     *     length. Balance is in range of [0, 1].
12087     */
12088    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) {
12089        fisheye_stereoRectify_3(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags);
12090    }
12091
12092
12093    //
12094    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12095    //
12096
12097    /**
12098     * Performs stereo calibration
12099     *
12100     *     @param objectPoints Vector of vectors of the calibration pattern points.
12101     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
12102     *     observed by the first camera.
12103     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
12104     *     observed by the second camera.
12105     *     @param K1 Input/output first camera intrinsic matrix:
12106     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
12107     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
12108     *     some or all of the matrix components must be initialized.
12109     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
12110     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
12111     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
12112     *     similar to D1 .
12113     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
12114     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
12115     *     @param T Output translation vector between the coordinate systems of the cameras.
12116     *     @param flags Different flags that may be zero or a combination of the following values:
12117     * <ul>
12118     *   <li>
12119     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
12120     *     are estimated.
12121     *   </li>
12122     *   <li>
12123     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
12124     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
12125     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
12126     *   </li>
12127     *   <li>
12128     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
12129     *     of intrinsic optimization.
12130     *   </li>
12131     *   <li>
12132     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
12133     *   </li>
12134     *   <li>
12135     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
12136     *   </li>
12137     *   <li>
12138     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
12139     *     zero.
12140     *     @param criteria Termination criteria for the iterative optimization algorithm.
12141     *   </li>
12142     * </ul>
12143     * @return automatically generated
12144     */
12145    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria) {
12146        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
12147        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
12148        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
12149        return fisheye_stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
12150    }
12151
12152    /**
12153     * Performs stereo calibration
12154     *
12155     *     @param objectPoints Vector of vectors of the calibration pattern points.
12156     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
12157     *     observed by the first camera.
12158     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
12159     *     observed by the second camera.
12160     *     @param K1 Input/output first camera intrinsic matrix:
12161     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
12162     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
12163     *     some or all of the matrix components must be initialized.
12164     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
12165     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
12166     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
12167     *     similar to D1 .
12168     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
12169     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
12170     *     @param T Output translation vector between the coordinate systems of the cameras.
12171     *     @param flags Different flags that may be zero or a combination of the following values:
12172     * <ul>
12173     *   <li>
12174     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
12175     *     are estimated.
12176     *   </li>
12177     *   <li>
12178     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
12179     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
12180     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
12181     *   </li>
12182     *   <li>
12183     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
12184     *     of intrinsic optimization.
12185     *   </li>
12186     *   <li>
12187     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
12188     *   </li>
12189     *   <li>
12190     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
12191     *   </li>
12192     *   <li>
12193     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
12194     *     zero.
12195     *   </li>
12196     * </ul>
12197     * @return automatically generated
12198     */
12199    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags) {
12200        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
12201        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
12202        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
12203        return fisheye_stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags);
12204    }
12205
12206    /**
12207     * Performs stereo calibration
12208     *
12209     *     @param objectPoints Vector of vectors of the calibration pattern points.
12210     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
12211     *     observed by the first camera.
12212     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
12213     *     observed by the second camera.
12214     *     @param K1 Input/output first camera intrinsic matrix:
12215     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
12216     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
12217     *     some or all of the matrix components must be initialized.
12218     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
12219     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
12220     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
12221     *     similar to D1 .
12222     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
12223     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
12224     *     @param T Output translation vector between the coordinate systems of the cameras.
12225     * <ul>
12226     *   <li>
12227     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
12228     *     are estimated.
12229     *   </li>
12230     *   <li>
12231     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
12232     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
12233     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
12234     *   </li>
12235     *   <li>
12236     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
12237     *     of intrinsic optimization.
12238     *   </li>
12239     *   <li>
12240     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
12241     *   </li>
12242     *   <li>
12243     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
12244     *   </li>
12245     *   <li>
12246     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
12247     *     zero.
12248     *   </li>
12249     * </ul>
12250     * @return automatically generated
12251     */
12252    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T) {
12253        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
12254        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
12255        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
12256        return fisheye_stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj);
12257    }
12258
12259
12260
12261
12262    // C++:  void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
12263    private static native void Rodrigues_0(long src_nativeObj, long dst_nativeObj, long jacobian_nativeObj);
12264    private static native void Rodrigues_1(long src_nativeObj, long dst_nativeObj);
12265
12266    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
12267    private static native long findHomography_0(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters, double confidence);
12268    private static native long findHomography_1(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters);
12269    private static native long findHomography_2(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj);
12270    private static native long findHomography_3(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold);
12271    private static native long findHomography_4(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method);
12272    private static native long findHomography_5(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj);
12273
12274    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params)
12275    private static native long findHomography_6(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, long mask_nativeObj, long params_nativeObj);
12276
12277    // C++:  Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
12278    private static native double[] RQDecomp3x3_0(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj, long Qz_nativeObj);
12279    private static native double[] RQDecomp3x3_1(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj);
12280    private static native double[] RQDecomp3x3_2(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj);
12281    private static native double[] RQDecomp3x3_3(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj);
12282
12283    // C++:  void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
12284    private static native void decomposeProjectionMatrix_0(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj, long eulerAngles_nativeObj);
12285    private static native void decomposeProjectionMatrix_1(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj);
12286    private static native void decomposeProjectionMatrix_2(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj);
12287    private static native void decomposeProjectionMatrix_3(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj);
12288    private static native void decomposeProjectionMatrix_4(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj);
12289
12290    // C++:  void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
12291    private static native void matMulDeriv_0(long A_nativeObj, long B_nativeObj, long dABdA_nativeObj, long dABdB_nativeObj);
12292
12293    // C++:  void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
12294    private static native void composeRT_0(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj, long dt3dt2_nativeObj);
12295    private static native void composeRT_1(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj);
12296    private static native void composeRT_2(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj);
12297    private static native void composeRT_3(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj);
12298    private static native void composeRT_4(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj);
12299    private static native void composeRT_5(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj);
12300    private static native void composeRT_6(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj);
12301    private static native void composeRT_7(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj);
12302    private static native void composeRT_8(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj);
12303
12304    // C++:  void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
12305    private static native void projectPoints_0(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj, double aspectRatio);
12306    private static native void projectPoints_1(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj);
12307    private static native void projectPoints_2(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj);
12308
12309    // C++:  bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
12310    private static native boolean solvePnP_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int flags);
12311    private static native boolean solvePnP_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess);
12312    private static native boolean solvePnP_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
12313
12314    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
12315    private static native boolean solvePnPRansac_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj, int flags);
12316    private static native boolean solvePnPRansac_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj);
12317    private static native boolean solvePnPRansac_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence);
12318    private static native boolean solvePnPRansac_3(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError);
12319    private static native boolean solvePnPRansac_4(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount);
12320    private static native boolean solvePnPRansac_5(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess);
12321    private static native boolean solvePnPRansac_6(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
12322
12323    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
12324    private static native boolean solvePnPRansac_7(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj, long params_nativeObj);
12325    private static native boolean solvePnPRansac_8(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj);
12326
12327    // C++:  int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
12328    private static native int solveP3P_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12329
12330    // C++:  void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
12331    private static native void solvePnPRefineLM_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12332    private static native void solvePnPRefineLM_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
12333
12334    // C++:  void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
12335    private static native void solvePnPRefineVVS_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, double VVSlambda);
12336    private static native void solvePnPRefineVVS_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12337    private static native void solvePnPRefineVVS_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
12338
12339    // C++:  int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
12340    private static native int solvePnPGeneric_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj, long reprojectionError_nativeObj);
12341    private static native int solvePnPGeneric_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj);
12342    private static native int solvePnPGeneric_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj);
12343    private static native int solvePnPGeneric_3(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags);
12344    private static native int solvePnPGeneric_4(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess);
12345    private static native int solvePnPGeneric_5(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12346
12347    // C++:  Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0)
12348    private static native long initCameraMatrix2D_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, double aspectRatio);
12349    private static native long initCameraMatrix2D_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height);
12350
12351    // C++:  bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
12352    private static native boolean findChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, int flags);
12353    private static native boolean findChessboardCorners_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj);
12354
12355    // C++:  bool cv::checkChessboard(Mat img, Size size)
12356    private static native boolean checkChessboard_0(long img_nativeObj, double size_width, double size_height);
12357
12358    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
12359    private static native boolean findChessboardCornersSBWithMeta_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags, long meta_nativeObj);
12360
12361    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
12362    private static native boolean findChessboardCornersSB_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags);
12363    private static native boolean findChessboardCornersSB_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj);
12364
12365    // C++:  Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
12366    private static native double[] estimateChessboardSharpness_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical, long sharpness_nativeObj);
12367    private static native double[] estimateChessboardSharpness_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical);
12368    private static native double[] estimateChessboardSharpness_2(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance);
12369    private static native double[] estimateChessboardSharpness_3(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj);
12370
12371    // C++:  bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
12372    private static native boolean find4QuadCornerSubpix_0(long img_nativeObj, long corners_nativeObj, double region_size_width, double region_size_height);
12373
12374    // C++:  void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound)
12375    private static native void drawChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, boolean patternWasFound);
12376
12377    // C++:  void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
12378    private static native void drawFrameAxes_0(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length, int thickness);
12379    private static native void drawFrameAxes_1(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length);
12380
12381    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create())
12382    private static native boolean findCirclesGrid_0(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj, int flags);
12383    private static native boolean findCirclesGrid_2(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj);
12384
12385    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12386    private static native double calibrateCameraExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12387    private static native double calibrateCameraExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags);
12388    private static native double calibrateCameraExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj);
12389
12390    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12391    private static native double calibrateCamera_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12392    private static native double calibrateCamera_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12393    private static native double calibrateCamera_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12394
12395    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12396    private static native double calibrateCameraROExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12397    private static native double calibrateCameraROExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags);
12398    private static native double calibrateCameraROExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj);
12399
12400    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12401    private static native double calibrateCameraRO_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12402    private static native double calibrateCameraRO_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags);
12403    private static native double calibrateCameraRO_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj);
12404
12405    // C++:  void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
12406    private static native void calibrationMatrixValues_0(long cameraMatrix_nativeObj, double imageSize_width, double imageSize_height, double apertureWidth, double apertureHeight, double[] fovx_out, double[] fovy_out, double[] focalLength_out, double[] principalPoint_out, double[] aspectRatio_out);
12407
12408    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
12409    private static native double stereoCalibrateExtended_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12410    private static native double stereoCalibrateExtended_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags);
12411    private static native double stereoCalibrateExtended_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj);
12412
12413    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
12414    private static native double stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12415    private static native double stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags);
12416    private static native double stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj);
12417
12418    // C++:  void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
12419    private static native void stereoRectify_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out, double[] validPixROI2_out);
12420    private static native void stereoRectify_1(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out);
12421    private static native void stereoRectify_2(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height);
12422    private static native void stereoRectify_3(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha);
12423    private static native void stereoRectify_4(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags);
12424    private static native void stereoRectify_5(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj);
12425
12426    // C++:  bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
12427    private static native boolean stereoRectifyUncalibrated_0(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj, double threshold);
12428    private static native boolean stereoRectifyUncalibrated_1(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj);
12429
12430    // C++:  float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
12431    private static native float rectify3Collinear_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long cameraMatrix3_nativeObj, long distCoeffs3_nativeObj, long imgpt1_mat_nativeObj, long imgpt3_mat_nativeObj, double imageSize_width, double imageSize_height, long R12_nativeObj, long T12_nativeObj, long R13_nativeObj, long T13_nativeObj, long R1_nativeObj, long R2_nativeObj, long R3_nativeObj, long P1_nativeObj, long P2_nativeObj, long P3_nativeObj, long Q_nativeObj, double alpha, double newImgSize_width, double newImgSize_height, double[] roi1_out, double[] roi2_out, int flags);
12432
12433    // C++:  Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
12434    private static native long getOptimalNewCameraMatrix_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out, boolean centerPrincipalPoint);
12435    private static native long getOptimalNewCameraMatrix_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out);
12436    private static native long getOptimalNewCameraMatrix_2(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height);
12437    private static native long getOptimalNewCameraMatrix_3(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha);
12438
12439    // C++:  void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
12440    private static native void calibrateHandEye_0(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj, int method);
12441    private static native void calibrateHandEye_1(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj);
12442
12443    // C++:  void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
12444    private static native void calibrateRobotWorldHandEye_0(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj, int method);
12445    private static native void calibrateRobotWorldHandEye_1(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj);
12446
12447    // C++:  void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
12448    private static native void convertPointsToHomogeneous_0(long src_nativeObj, long dst_nativeObj);
12449
12450    // C++:  void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
12451    private static native void convertPointsFromHomogeneous_0(long src_nativeObj, long dst_nativeObj);
12452
12453    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
12454    private static native long findFundamentalMat_0(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters, long mask_nativeObj);
12455    private static native long findFundamentalMat_1(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters);
12456
12457    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
12458    private static native long findFundamentalMat_2(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, long mask_nativeObj);
12459    private static native long findFundamentalMat_3(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence);
12460    private static native long findFundamentalMat_4(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold);
12461    private static native long findFundamentalMat_5(long points1_mat_nativeObj, long points2_mat_nativeObj, int method);
12462    private static native long findFundamentalMat_6(long points1_mat_nativeObj, long points2_mat_nativeObj);
12463
12464    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params)
12465    private static native long findFundamentalMat_7(long points1_mat_nativeObj, long points2_mat_nativeObj, long mask_nativeObj, long params_nativeObj);
12466
12467    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
12468    private static native long findEssentialMat_0(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold, long mask_nativeObj);
12469    private static native long findEssentialMat_1(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold);
12470    private static native long findEssentialMat_2(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob);
12471    private static native long findEssentialMat_3(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method);
12472    private static native long findEssentialMat_4(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj);
12473
12474    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
12475    private static native long findEssentialMat_5(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold, long mask_nativeObj);
12476    private static native long findEssentialMat_6(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold);
12477    private static native long findEssentialMat_7(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob);
12478    private static native long findEssentialMat_8(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method);
12479    private static native long findEssentialMat_9(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y);
12480    private static native long findEssentialMat_10(long points1_nativeObj, long points2_nativeObj, double focal);
12481    private static native long findEssentialMat_11(long points1_nativeObj, long points2_nativeObj);
12482
12483    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
12484    private static native long findEssentialMat_12(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold, long mask_nativeObj);
12485    private static native long findEssentialMat_13(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold);
12486    private static native long findEssentialMat_14(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob);
12487    private static native long findEssentialMat_15(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method);
12488    private static native long findEssentialMat_16(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj);
12489
12490    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
12491    private static native long findEssentialMat_17(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long cameraMatrix2_nativeObj, long dist_coeff1_nativeObj, long dist_coeff2_nativeObj, long mask_nativeObj, long params_nativeObj);
12492
12493    // C++:  void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
12494    private static native void decomposeEssentialMat_0(long E_nativeObj, long R1_nativeObj, long R2_nativeObj, long t_nativeObj);
12495
12496    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
12497    private static native int recoverPose_0(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, long mask_nativeObj);
12498    private static native int recoverPose_1(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj);
12499
12500    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
12501    private static native int recoverPose_2(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y, long mask_nativeObj);
12502    private static native int recoverPose_3(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y);
12503    private static native int recoverPose_4(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal);
12504    private static native int recoverPose_5(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj);
12505
12506    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
12507    private static native int recoverPose_6(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj, long triangulatedPoints_nativeObj);
12508    private static native int recoverPose_7(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj);
12509    private static native int recoverPose_8(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh);
12510
12511    // C++:  void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
12512    private static native void computeCorrespondEpilines_0(long points_nativeObj, int whichImage, long F_nativeObj, long lines_nativeObj);
12513
12514    // C++:  void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
12515    private static native void triangulatePoints_0(long projMatr1_nativeObj, long projMatr2_nativeObj, long projPoints1_nativeObj, long projPoints2_nativeObj, long points4D_nativeObj);
12516
12517    // C++:  void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
12518    private static native void correctMatches_0(long F_nativeObj, long points1_nativeObj, long points2_nativeObj, long newPoints1_nativeObj, long newPoints2_nativeObj);
12519
12520    // C++:  void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
12521    private static native void filterSpeckles_0(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff, long buf_nativeObj);
12522    private static native void filterSpeckles_1(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff);
12523
12524    // C++:  Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
12525    private static native double[] getValidDisparityROI_0(int roi1_x, int roi1_y, int roi1_width, int roi1_height, int roi2_x, int roi2_y, int roi2_width, int roi2_height, int minDisparity, int numberOfDisparities, int blockSize);
12526
12527    // C++:  void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
12528    private static native void validateDisparity_0(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities, int disp12MaxDisp);
12529    private static native void validateDisparity_1(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities);
12530
12531    // C++:  void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
12532    private static native void reprojectImageTo3D_0(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues, int ddepth);
12533    private static native void reprojectImageTo3D_1(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues);
12534    private static native void reprojectImageTo3D_2(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj);
12535
12536    // C++:  double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
12537    private static native double sampsonDistance_0(long pt1_nativeObj, long pt2_nativeObj, long F_nativeObj);
12538
12539    // C++:  int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
12540    private static native int estimateAffine3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence);
12541    private static native int estimateAffine3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold);
12542    private static native int estimateAffine3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj);
12543
12544    // C++:  int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
12545    private static native int estimateTranslation3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence);
12546    private static native int estimateTranslation3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold);
12547    private static native int estimateTranslation3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj);
12548
12549    // C++:  Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
12550    private static native long estimateAffine2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters);
12551    private static native long estimateAffine2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence);
12552    private static native long estimateAffine2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters);
12553    private static native long estimateAffine2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold);
12554    private static native long estimateAffine2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method);
12555    private static native long estimateAffine2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj);
12556    private static native long estimateAffine2D_6(long from_nativeObj, long to_nativeObj);
12557
12558    // C++:  Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
12559    private static native long estimateAffine2D_7(long pts1_nativeObj, long pts2_nativeObj, long inliers_nativeObj, long params_nativeObj);
12560
12561    // C++:  Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
12562    private static native long estimateAffinePartial2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters);
12563    private static native long estimateAffinePartial2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence);
12564    private static native long estimateAffinePartial2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters);
12565    private static native long estimateAffinePartial2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold);
12566    private static native long estimateAffinePartial2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method);
12567    private static native long estimateAffinePartial2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj);
12568    private static native long estimateAffinePartial2D_6(long from_nativeObj, long to_nativeObj);
12569
12570    // C++:  int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
12571    private static native int decomposeHomographyMat_0(long H_nativeObj, long K_nativeObj, long rotations_mat_nativeObj, long translations_mat_nativeObj, long normals_mat_nativeObj);
12572
12573    // C++:  void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
12574    private static native void filterHomographyDecompByVisibleRefpoints_0(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj, long pointsMask_nativeObj);
12575    private static native void filterHomographyDecompByVisibleRefpoints_1(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj);
12576
12577    // C++:  void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
12578    private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj);
12579    private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
12580
12581    // C++:  void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
12582    private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
12583
12584    // C++:  Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
12585    private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint);
12586    private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height);
12587    private static native long getDefaultNewCameraMatrix_2(long cameraMatrix_nativeObj);
12588
12589    // C++:  void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
12590    private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj);
12591    private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj);
12592    private static native void undistortPoints_2(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
12593
12594    // C++:  void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
12595    private static native void undistortPointsIter_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12596
12597    // C++:  void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
12598    private static native void fisheye_projectPoints_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha, long jacobian_nativeObj);
12599    private static native void fisheye_projectPoints_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha);
12600    private static native void fisheye_projectPoints_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj);
12601
12602    // C++:  void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
12603    private static native void fisheye_distortPoints_0(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj, double alpha);
12604    private static native void fisheye_distortPoints_1(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj);
12605
12606    // C++:  void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat())
12607    private static native void fisheye_undistortPoints_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj);
12608    private static native void fisheye_undistortPoints_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj);
12609    private static native void fisheye_undistortPoints_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj);
12610
12611    // C++:  void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
12612    private static native void fisheye_initUndistortRectifyMap_0(long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
12613
12614    // C++:  void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
12615    private static native void fisheye_undistortImage_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj, double new_size_width, double new_size_height);
12616    private static native void fisheye_undistortImage_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj);
12617    private static native void fisheye_undistortImage_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj);
12618
12619    // C++:  void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
12620    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_0(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height, double fov_scale);
12621    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_1(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height);
12622    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_2(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance);
12623    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_3(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj);
12624
12625    // C++:  double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12626    private static native double fisheye_calibrate_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12627    private static native double fisheye_calibrate_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12628    private static native double fisheye_calibrate_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12629
12630    // C++:  void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
12631    private static native void fisheye_stereoRectify_0(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance, double fov_scale);
12632    private static native void fisheye_stereoRectify_1(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance);
12633    private static native void fisheye_stereoRectify_2(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height);
12634    private static native void fisheye_stereoRectify_3(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags);
12635
12636    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12637    private static native double fisheye_stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12638    private static native double fisheye_stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags);
12639    private static native double fisheye_stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj);
12640
12641}