001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.imgproc;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.core.Mat;
009import org.opencv.core.MatOfFloat;
010import org.opencv.core.MatOfInt;
011import org.opencv.core.MatOfInt4;
012import org.opencv.core.MatOfPoint;
013import org.opencv.core.MatOfPoint2f;
014import org.opencv.core.Point;
015import org.opencv.core.Rect;
016import org.opencv.core.RotatedRect;
017import org.opencv.core.Scalar;
018import org.opencv.core.Size;
019import org.opencv.core.TermCriteria;
020import org.opencv.imgproc.CLAHE;
021import org.opencv.imgproc.GeneralizedHoughBallard;
022import org.opencv.imgproc.GeneralizedHoughGuil;
023import org.opencv.imgproc.LineSegmentDetector;
024import org.opencv.utils.Converters;
025
026// C++: class Imgproc
027
028public class Imgproc {
029
030    private static final int
031            IPL_BORDER_CONSTANT = 0,
032            IPL_BORDER_REPLICATE = 1,
033            IPL_BORDER_REFLECT = 2,
034            IPL_BORDER_WRAP = 3,
035            IPL_BORDER_REFLECT_101 = 4,
036            IPL_BORDER_TRANSPARENT = 5,
037            CV_INTER_NN = 0,
038            CV_INTER_LINEAR = 1,
039            CV_INTER_CUBIC = 2,
040            CV_INTER_AREA = 3,
041            CV_INTER_LANCZOS4 = 4,
042            CV_MOP_ERODE = 0,
043            CV_MOP_DILATE = 1,
044            CV_MOP_OPEN = 2,
045            CV_MOP_CLOSE = 3,
046            CV_MOP_GRADIENT = 4,
047            CV_MOP_TOPHAT = 5,
048            CV_MOP_BLACKHAT = 6,
049            CV_RETR_EXTERNAL = 0,
050            CV_RETR_LIST = 1,
051            CV_RETR_CCOMP = 2,
052            CV_RETR_TREE = 3,
053            CV_RETR_FLOODFILL = 4,
054            CV_CHAIN_APPROX_NONE = 1,
055            CV_CHAIN_APPROX_SIMPLE = 2,
056            CV_CHAIN_APPROX_TC89_L1 = 3,
057            CV_CHAIN_APPROX_TC89_KCOS = 4,
058            CV_THRESH_BINARY = 0,
059            CV_THRESH_BINARY_INV = 1,
060            CV_THRESH_TRUNC = 2,
061            CV_THRESH_TOZERO = 3,
062            CV_THRESH_TOZERO_INV = 4,
063            CV_THRESH_MASK = 7,
064            CV_THRESH_OTSU = 8,
065            CV_THRESH_TRIANGLE = 16;
066
067
068    // C++: enum <unnamed>
069    public static final int
070            CV_GAUSSIAN_5x5 = 7,
071            CV_SCHARR = -1,
072            CV_MAX_SOBEL_KSIZE = 7,
073            CV_RGBA2mRGBA = 125,
074            CV_mRGBA2RGBA = 126,
075            CV_WARP_FILL_OUTLIERS = 8,
076            CV_WARP_INVERSE_MAP = 16,
077            CV_CHAIN_CODE = 0,
078            CV_LINK_RUNS = 5,
079            CV_POLY_APPROX_DP = 0,
080            CV_CONTOURS_MATCH_I1 = 1,
081            CV_CONTOURS_MATCH_I2 = 2,
082            CV_CONTOURS_MATCH_I3 = 3,
083            CV_CLOCKWISE = 1,
084            CV_COUNTER_CLOCKWISE = 2,
085            CV_COMP_CORREL = 0,
086            CV_COMP_CHISQR = 1,
087            CV_COMP_INTERSECT = 2,
088            CV_COMP_BHATTACHARYYA = 3,
089            CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA,
090            CV_COMP_CHISQR_ALT = 4,
091            CV_COMP_KL_DIV = 5,
092            CV_DIST_MASK_3 = 3,
093            CV_DIST_MASK_5 = 5,
094            CV_DIST_MASK_PRECISE = 0,
095            CV_DIST_LABEL_CCOMP = 0,
096            CV_DIST_LABEL_PIXEL = 1,
097            CV_DIST_USER = -1,
098            CV_DIST_L1 = 1,
099            CV_DIST_L2 = 2,
100            CV_DIST_C = 3,
101            CV_DIST_L12 = 4,
102            CV_DIST_FAIR = 5,
103            CV_DIST_WELSCH = 6,
104            CV_DIST_HUBER = 7,
105            CV_CANNY_L2_GRADIENT = (1 << 31),
106            CV_HOUGH_STANDARD = 0,
107            CV_HOUGH_PROBABILISTIC = 1,
108            CV_HOUGH_MULTI_SCALE = 2,
109            CV_HOUGH_GRADIENT = 3;
110
111
112    // C++: enum MorphShapes_c (MorphShapes_c)
113    public static final int
114            CV_SHAPE_RECT = 0,
115            CV_SHAPE_CROSS = 1,
116            CV_SHAPE_ELLIPSE = 2,
117            CV_SHAPE_CUSTOM = 100;
118
119
120    // C++: enum SmoothMethod_c (SmoothMethod_c)
121    public static final int
122            CV_BLUR_NO_SCALE = 0,
123            CV_BLUR = 1,
124            CV_GAUSSIAN = 2,
125            CV_MEDIAN = 3,
126            CV_BILATERAL = 4;
127
128
129    // C++: enum AdaptiveThresholdTypes (cv.AdaptiveThresholdTypes)
130    public static final int
131            ADAPTIVE_THRESH_MEAN_C = 0,
132            ADAPTIVE_THRESH_GAUSSIAN_C = 1;
133
134
135    // C++: enum ColorConversionCodes (cv.ColorConversionCodes)
136    public static final int
137            COLOR_BGR2BGRA = 0,
138            COLOR_RGB2RGBA = COLOR_BGR2BGRA,
139            COLOR_BGRA2BGR = 1,
140            COLOR_RGBA2RGB = COLOR_BGRA2BGR,
141            COLOR_BGR2RGBA = 2,
142            COLOR_RGB2BGRA = COLOR_BGR2RGBA,
143            COLOR_RGBA2BGR = 3,
144            COLOR_BGRA2RGB = COLOR_RGBA2BGR,
145            COLOR_BGR2RGB = 4,
146            COLOR_RGB2BGR = COLOR_BGR2RGB,
147            COLOR_BGRA2RGBA = 5,
148            COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
149            COLOR_BGR2GRAY = 6,
150            COLOR_RGB2GRAY = 7,
151            COLOR_GRAY2BGR = 8,
152            COLOR_GRAY2RGB = COLOR_GRAY2BGR,
153            COLOR_GRAY2BGRA = 9,
154            COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
155            COLOR_BGRA2GRAY = 10,
156            COLOR_RGBA2GRAY = 11,
157            COLOR_BGR2BGR565 = 12,
158            COLOR_RGB2BGR565 = 13,
159            COLOR_BGR5652BGR = 14,
160            COLOR_BGR5652RGB = 15,
161            COLOR_BGRA2BGR565 = 16,
162            COLOR_RGBA2BGR565 = 17,
163            COLOR_BGR5652BGRA = 18,
164            COLOR_BGR5652RGBA = 19,
165            COLOR_GRAY2BGR565 = 20,
166            COLOR_BGR5652GRAY = 21,
167            COLOR_BGR2BGR555 = 22,
168            COLOR_RGB2BGR555 = 23,
169            COLOR_BGR5552BGR = 24,
170            COLOR_BGR5552RGB = 25,
171            COLOR_BGRA2BGR555 = 26,
172            COLOR_RGBA2BGR555 = 27,
173            COLOR_BGR5552BGRA = 28,
174            COLOR_BGR5552RGBA = 29,
175            COLOR_GRAY2BGR555 = 30,
176            COLOR_BGR5552GRAY = 31,
177            COLOR_BGR2XYZ = 32,
178            COLOR_RGB2XYZ = 33,
179            COLOR_XYZ2BGR = 34,
180            COLOR_XYZ2RGB = 35,
181            COLOR_BGR2YCrCb = 36,
182            COLOR_RGB2YCrCb = 37,
183            COLOR_YCrCb2BGR = 38,
184            COLOR_YCrCb2RGB = 39,
185            COLOR_BGR2HSV = 40,
186            COLOR_RGB2HSV = 41,
187            COLOR_BGR2Lab = 44,
188            COLOR_RGB2Lab = 45,
189            COLOR_BGR2Luv = 50,
190            COLOR_RGB2Luv = 51,
191            COLOR_BGR2HLS = 52,
192            COLOR_RGB2HLS = 53,
193            COLOR_HSV2BGR = 54,
194            COLOR_HSV2RGB = 55,
195            COLOR_Lab2BGR = 56,
196            COLOR_Lab2RGB = 57,
197            COLOR_Luv2BGR = 58,
198            COLOR_Luv2RGB = 59,
199            COLOR_HLS2BGR = 60,
200            COLOR_HLS2RGB = 61,
201            COLOR_BGR2HSV_FULL = 66,
202            COLOR_RGB2HSV_FULL = 67,
203            COLOR_BGR2HLS_FULL = 68,
204            COLOR_RGB2HLS_FULL = 69,
205            COLOR_HSV2BGR_FULL = 70,
206            COLOR_HSV2RGB_FULL = 71,
207            COLOR_HLS2BGR_FULL = 72,
208            COLOR_HLS2RGB_FULL = 73,
209            COLOR_LBGR2Lab = 74,
210            COLOR_LRGB2Lab = 75,
211            COLOR_LBGR2Luv = 76,
212            COLOR_LRGB2Luv = 77,
213            COLOR_Lab2LBGR = 78,
214            COLOR_Lab2LRGB = 79,
215            COLOR_Luv2LBGR = 80,
216            COLOR_Luv2LRGB = 81,
217            COLOR_BGR2YUV = 82,
218            COLOR_RGB2YUV = 83,
219            COLOR_YUV2BGR = 84,
220            COLOR_YUV2RGB = 85,
221            COLOR_YUV2RGB_NV12 = 90,
222            COLOR_YUV2BGR_NV12 = 91,
223            COLOR_YUV2RGB_NV21 = 92,
224            COLOR_YUV2BGR_NV21 = 93,
225            COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
226            COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
227            COLOR_YUV2RGBA_NV12 = 94,
228            COLOR_YUV2BGRA_NV12 = 95,
229            COLOR_YUV2RGBA_NV21 = 96,
230            COLOR_YUV2BGRA_NV21 = 97,
231            COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
232            COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
233            COLOR_YUV2RGB_YV12 = 98,
234            COLOR_YUV2BGR_YV12 = 99,
235            COLOR_YUV2RGB_IYUV = 100,
236            COLOR_YUV2BGR_IYUV = 101,
237            COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
238            COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
239            COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
240            COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
241            COLOR_YUV2RGBA_YV12 = 102,
242            COLOR_YUV2BGRA_YV12 = 103,
243            COLOR_YUV2RGBA_IYUV = 104,
244            COLOR_YUV2BGRA_IYUV = 105,
245            COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
246            COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
247            COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
248            COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
249            COLOR_YUV2GRAY_420 = 106,
250            COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
251            COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
252            COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
253            COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
254            COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
255            COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
256            COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
257            COLOR_YUV2RGB_UYVY = 107,
258            COLOR_YUV2BGR_UYVY = 108,
259            COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
260            COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
261            COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
262            COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
263            COLOR_YUV2RGBA_UYVY = 111,
264            COLOR_YUV2BGRA_UYVY = 112,
265            COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
266            COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
267            COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
268            COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
269            COLOR_YUV2RGB_YUY2 = 115,
270            COLOR_YUV2BGR_YUY2 = 116,
271            COLOR_YUV2RGB_YVYU = 117,
272            COLOR_YUV2BGR_YVYU = 118,
273            COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
274            COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
275            COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
276            COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
277            COLOR_YUV2RGBA_YUY2 = 119,
278            COLOR_YUV2BGRA_YUY2 = 120,
279            COLOR_YUV2RGBA_YVYU = 121,
280            COLOR_YUV2BGRA_YVYU = 122,
281            COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
282            COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
283            COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
284            COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
285            COLOR_YUV2GRAY_UYVY = 123,
286            COLOR_YUV2GRAY_YUY2 = 124,
287            COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
288            COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
289            COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
290            COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
291            COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
292            COLOR_RGBA2mRGBA = 125,
293            COLOR_mRGBA2RGBA = 126,
294            COLOR_RGB2YUV_I420 = 127,
295            COLOR_BGR2YUV_I420 = 128,
296            COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
297            COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
298            COLOR_RGBA2YUV_I420 = 129,
299            COLOR_BGRA2YUV_I420 = 130,
300            COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
301            COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
302            COLOR_RGB2YUV_YV12 = 131,
303            COLOR_BGR2YUV_YV12 = 132,
304            COLOR_RGBA2YUV_YV12 = 133,
305            COLOR_BGRA2YUV_YV12 = 134,
306            COLOR_BayerBG2BGR = 46,
307            COLOR_BayerGB2BGR = 47,
308            COLOR_BayerRG2BGR = 48,
309            COLOR_BayerGR2BGR = 49,
310            COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,
311            COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,
312            COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,
313            COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,
314            COLOR_BayerBG2GRAY = 86,
315            COLOR_BayerGB2GRAY = 87,
316            COLOR_BayerRG2GRAY = 88,
317            COLOR_BayerGR2GRAY = 89,
318            COLOR_BayerBG2BGR_VNG = 62,
319            COLOR_BayerGB2BGR_VNG = 63,
320            COLOR_BayerRG2BGR_VNG = 64,
321            COLOR_BayerGR2BGR_VNG = 65,
322            COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,
323            COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,
324            COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,
325            COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,
326            COLOR_BayerBG2BGR_EA = 135,
327            COLOR_BayerGB2BGR_EA = 136,
328            COLOR_BayerRG2BGR_EA = 137,
329            COLOR_BayerGR2BGR_EA = 138,
330            COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA,
331            COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA,
332            COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA,
333            COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA,
334            COLOR_BayerBG2BGRA = 139,
335            COLOR_BayerGB2BGRA = 140,
336            COLOR_BayerRG2BGRA = 141,
337            COLOR_BayerGR2BGRA = 142,
338            COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA,
339            COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA,
340            COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA,
341            COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA,
342            COLOR_COLORCVT_MAX = 143;
343
344
345    // C++: enum ColormapTypes (cv.ColormapTypes)
346    public static final int
347            COLORMAP_AUTUMN = 0,
348            COLORMAP_BONE = 1,
349            COLORMAP_JET = 2,
350            COLORMAP_WINTER = 3,
351            COLORMAP_RAINBOW = 4,
352            COLORMAP_OCEAN = 5,
353            COLORMAP_SUMMER = 6,
354            COLORMAP_SPRING = 7,
355            COLORMAP_COOL = 8,
356            COLORMAP_HSV = 9,
357            COLORMAP_PINK = 10,
358            COLORMAP_HOT = 11,
359            COLORMAP_PARULA = 12,
360            COLORMAP_MAGMA = 13,
361            COLORMAP_INFERNO = 14,
362            COLORMAP_PLASMA = 15,
363            COLORMAP_VIRIDIS = 16,
364            COLORMAP_CIVIDIS = 17,
365            COLORMAP_TWILIGHT = 18,
366            COLORMAP_TWILIGHT_SHIFTED = 19,
367            COLORMAP_TURBO = 20,
368            COLORMAP_DEEPGREEN = 21;
369
370
371    // C++: enum ConnectedComponentsAlgorithmsTypes (cv.ConnectedComponentsAlgorithmsTypes)
372    public static final int
373            CCL_DEFAULT = -1,
374            CCL_WU = 0,
375            CCL_GRANA = 1,
376            CCL_BOLELLI = 2,
377            CCL_SAUF = 3,
378            CCL_BBDT = 4,
379            CCL_SPAGHETTI = 5;
380
381
382    // C++: enum ConnectedComponentsTypes (cv.ConnectedComponentsTypes)
383    public static final int
384            CC_STAT_LEFT = 0,
385            CC_STAT_TOP = 1,
386            CC_STAT_WIDTH = 2,
387            CC_STAT_HEIGHT = 3,
388            CC_STAT_AREA = 4,
389            CC_STAT_MAX = 5;
390
391
392    // C++: enum ContourApproximationModes (cv.ContourApproximationModes)
393    public static final int
394            CHAIN_APPROX_NONE = 1,
395            CHAIN_APPROX_SIMPLE = 2,
396            CHAIN_APPROX_TC89_L1 = 3,
397            CHAIN_APPROX_TC89_KCOS = 4;
398
399
400    // C++: enum DistanceTransformLabelTypes (cv.DistanceTransformLabelTypes)
401    public static final int
402            DIST_LABEL_CCOMP = 0,
403            DIST_LABEL_PIXEL = 1;
404
405
406    // C++: enum DistanceTransformMasks (cv.DistanceTransformMasks)
407    public static final int
408            DIST_MASK_3 = 3,
409            DIST_MASK_5 = 5,
410            DIST_MASK_PRECISE = 0;
411
412
413    // C++: enum DistanceTypes (cv.DistanceTypes)
414    public static final int
415            DIST_USER = -1,
416            DIST_L1 = 1,
417            DIST_L2 = 2,
418            DIST_C = 3,
419            DIST_L12 = 4,
420            DIST_FAIR = 5,
421            DIST_WELSCH = 6,
422            DIST_HUBER = 7;
423
424
425    // C++: enum FloodFillFlags (cv.FloodFillFlags)
426    public static final int
427            FLOODFILL_FIXED_RANGE = 1 << 16,
428            FLOODFILL_MASK_ONLY = 1 << 17;
429
430
431    // C++: enum GrabCutClasses (cv.GrabCutClasses)
432    public static final int
433            GC_BGD = 0,
434            GC_FGD = 1,
435            GC_PR_BGD = 2,
436            GC_PR_FGD = 3;
437
438
439    // C++: enum GrabCutModes (cv.GrabCutModes)
440    public static final int
441            GC_INIT_WITH_RECT = 0,
442            GC_INIT_WITH_MASK = 1,
443            GC_EVAL = 2,
444            GC_EVAL_FREEZE_MODEL = 3;
445
446
447    // C++: enum HersheyFonts (cv.HersheyFonts)
448    public static final int
449            FONT_HERSHEY_SIMPLEX = 0,
450            FONT_HERSHEY_PLAIN = 1,
451            FONT_HERSHEY_DUPLEX = 2,
452            FONT_HERSHEY_COMPLEX = 3,
453            FONT_HERSHEY_TRIPLEX = 4,
454            FONT_HERSHEY_COMPLEX_SMALL = 5,
455            FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
456            FONT_HERSHEY_SCRIPT_COMPLEX = 7,
457            FONT_ITALIC = 16;
458
459
460    // C++: enum HistCompMethods (cv.HistCompMethods)
461    public static final int
462            HISTCMP_CORREL = 0,
463            HISTCMP_CHISQR = 1,
464            HISTCMP_INTERSECT = 2,
465            HISTCMP_BHATTACHARYYA = 3,
466            HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA,
467            HISTCMP_CHISQR_ALT = 4,
468            HISTCMP_KL_DIV = 5;
469
470
471    // C++: enum HoughModes (cv.HoughModes)
472    public static final int
473            HOUGH_STANDARD = 0,
474            HOUGH_PROBABILISTIC = 1,
475            HOUGH_MULTI_SCALE = 2,
476            HOUGH_GRADIENT = 3,
477            HOUGH_GRADIENT_ALT = 4;
478
479
480    // C++: enum InterpolationFlags (cv.InterpolationFlags)
481    public static final int
482            INTER_NEAREST = 0,
483            INTER_LINEAR = 1,
484            INTER_CUBIC = 2,
485            INTER_AREA = 3,
486            INTER_LANCZOS4 = 4,
487            INTER_LINEAR_EXACT = 5,
488            INTER_NEAREST_EXACT = 6,
489            INTER_MAX = 7,
490            WARP_FILL_OUTLIERS = 8,
491            WARP_INVERSE_MAP = 16;
492
493
494    // C++: enum InterpolationMasks (cv.InterpolationMasks)
495    public static final int
496            INTER_BITS = 5,
497            INTER_BITS2 = INTER_BITS * 2,
498            INTER_TAB_SIZE = 1 << INTER_BITS,
499            INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE;
500
501
502    // C++: enum LineSegmentDetectorModes (cv.LineSegmentDetectorModes)
503    public static final int
504            LSD_REFINE_NONE = 0,
505            LSD_REFINE_STD = 1,
506            LSD_REFINE_ADV = 2;
507
508
509    // C++: enum LineTypes (cv.LineTypes)
510    public static final int
511            FILLED = -1,
512            LINE_4 = 4,
513            LINE_8 = 8,
514            LINE_AA = 16;
515
516
517    // C++: enum MarkerTypes (cv.MarkerTypes)
518    public static final int
519            MARKER_CROSS = 0,
520            MARKER_TILTED_CROSS = 1,
521            MARKER_STAR = 2,
522            MARKER_DIAMOND = 3,
523            MARKER_SQUARE = 4,
524            MARKER_TRIANGLE_UP = 5,
525            MARKER_TRIANGLE_DOWN = 6;
526
527
528    // C++: enum MorphShapes (cv.MorphShapes)
529    public static final int
530            MORPH_RECT = 0,
531            MORPH_CROSS = 1,
532            MORPH_ELLIPSE = 2;
533
534
535    // C++: enum MorphTypes (cv.MorphTypes)
536    public static final int
537            MORPH_ERODE = 0,
538            MORPH_DILATE = 1,
539            MORPH_OPEN = 2,
540            MORPH_CLOSE = 3,
541            MORPH_GRADIENT = 4,
542            MORPH_TOPHAT = 5,
543            MORPH_BLACKHAT = 6,
544            MORPH_HITMISS = 7;
545
546
547    // C++: enum RectanglesIntersectTypes (cv.RectanglesIntersectTypes)
548    public static final int
549            INTERSECT_NONE = 0,
550            INTERSECT_PARTIAL = 1,
551            INTERSECT_FULL = 2;
552
553
554    // C++: enum RetrievalModes (cv.RetrievalModes)
555    public static final int
556            RETR_EXTERNAL = 0,
557            RETR_LIST = 1,
558            RETR_CCOMP = 2,
559            RETR_TREE = 3,
560            RETR_FLOODFILL = 4;
561
562
563    // C++: enum ShapeMatchModes (cv.ShapeMatchModes)
564    public static final int
565            CONTOURS_MATCH_I1 = 1,
566            CONTOURS_MATCH_I2 = 2,
567            CONTOURS_MATCH_I3 = 3;
568
569
570    // C++: enum SpecialFilter (cv.SpecialFilter)
571    public static final int
572            FILTER_SCHARR = -1;
573
574
575    // C++: enum TemplateMatchModes (cv.TemplateMatchModes)
576    public static final int
577            TM_SQDIFF = 0,
578            TM_SQDIFF_NORMED = 1,
579            TM_CCORR = 2,
580            TM_CCORR_NORMED = 3,
581            TM_CCOEFF = 4,
582            TM_CCOEFF_NORMED = 5;
583
584
585    // C++: enum ThresholdTypes (cv.ThresholdTypes)
586    public static final int
587            THRESH_BINARY = 0,
588            THRESH_BINARY_INV = 1,
589            THRESH_TRUNC = 2,
590            THRESH_TOZERO = 3,
591            THRESH_TOZERO_INV = 4,
592            THRESH_MASK = 7,
593            THRESH_OTSU = 8,
594            THRESH_TRIANGLE = 16;
595
596
597    // C++: enum WarpPolarMode (cv.WarpPolarMode)
598    public static final int
599            WARP_POLAR_LINEAR = 0,
600            WARP_POLAR_LOG = 256;
601
602
603    //
604    // C++:  Ptr_LineSegmentDetector cv::createLineSegmentDetector(int _refine = LSD_REFINE_STD, double _scale = 0.8, double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5, double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024)
605    //
606
607    /**
608     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
609     *
610     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
611     * to edit those, as to tailor it for their own application.
612     *
613     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
614     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
615     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
616     * @param _quant Bound to the quantization error on the gradient norm.
617     * @param _ang_th Gradient angle tolerance in degrees.
618     * @param _log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement
619     * is chosen.
620     * @param _density_th Minimal density of aligned region points in the enclosing rectangle.
621     * @param _n_bins Number of bins in pseudo-ordering of gradient modulus.
622     *
623     * <b>Note:</b> Implementation has been removed due original code license conflict
624     * @return automatically generated
625     */
626    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps, double _density_th, int _n_bins) {
627        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_0(_refine, _scale, _sigma_scale, _quant, _ang_th, _log_eps, _density_th, _n_bins));
628    }
629
630    /**
631     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
632     *
633     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
634     * to edit those, as to tailor it for their own application.
635     *
636     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
637     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
638     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
639     * @param _quant Bound to the quantization error on the gradient norm.
640     * @param _ang_th Gradient angle tolerance in degrees.
641     * @param _log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement
642     * is chosen.
643     * @param _density_th Minimal density of aligned region points in the enclosing rectangle.
644     *
645     * <b>Note:</b> Implementation has been removed due original code license conflict
646     * @return automatically generated
647     */
648    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps, double _density_th) {
649        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_1(_refine, _scale, _sigma_scale, _quant, _ang_th, _log_eps, _density_th));
650    }
651
652    /**
653     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
654     *
655     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
656     * to edit those, as to tailor it for their own application.
657     *
658     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
659     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
660     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
661     * @param _quant Bound to the quantization error on the gradient norm.
662     * @param _ang_th Gradient angle tolerance in degrees.
663     * @param _log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement
664     * is chosen.
665     *
666     * <b>Note:</b> Implementation has been removed due original code license conflict
667     * @return automatically generated
668     */
669    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps) {
670        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_2(_refine, _scale, _sigma_scale, _quant, _ang_th, _log_eps));
671    }
672
673    /**
674     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
675     *
676     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
677     * to edit those, as to tailor it for their own application.
678     *
679     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
680     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
681     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
682     * @param _quant Bound to the quantization error on the gradient norm.
683     * @param _ang_th Gradient angle tolerance in degrees.
684     * is chosen.
685     *
686     * <b>Note:</b> Implementation has been removed due original code license conflict
687     * @return automatically generated
688     */
689    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th) {
690        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_3(_refine, _scale, _sigma_scale, _quant, _ang_th));
691    }
692
693    /**
694     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
695     *
696     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
697     * to edit those, as to tailor it for their own application.
698     *
699     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
700     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
701     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
702     * @param _quant Bound to the quantization error on the gradient norm.
703     * is chosen.
704     *
705     * <b>Note:</b> Implementation has been removed due original code license conflict
706     * @return automatically generated
707     */
708    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale, double _quant) {
709        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_4(_refine, _scale, _sigma_scale, _quant));
710    }
711
712    /**
713     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
714     *
715     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
716     * to edit those, as to tailor it for their own application.
717     *
718     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
719     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
720     * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
721     * is chosen.
722     *
723     * <b>Note:</b> Implementation has been removed due original code license conflict
724     * @return automatically generated
725     */
726    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale, double _sigma_scale) {
727        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_5(_refine, _scale, _sigma_scale));
728    }
729
730    /**
731     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
732     *
733     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
734     * to edit those, as to tailor it for their own application.
735     *
736     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
737     * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
738     * is chosen.
739     *
740     * <b>Note:</b> Implementation has been removed due original code license conflict
741     * @return automatically generated
742     */
743    public static LineSegmentDetector createLineSegmentDetector(int _refine, double _scale) {
744        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_6(_refine, _scale));
745    }
746
747    /**
748     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
749     *
750     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
751     * to edit those, as to tailor it for their own application.
752     *
753     * @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
754     * is chosen.
755     *
756     * <b>Note:</b> Implementation has been removed due original code license conflict
757     * @return automatically generated
758     */
759    public static LineSegmentDetector createLineSegmentDetector(int _refine) {
760        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_7(_refine));
761    }
762
763    /**
764     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
765     *
766     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
767     * to edit those, as to tailor it for their own application.
768     *
769     * is chosen.
770     *
771     * <b>Note:</b> Implementation has been removed due original code license conflict
772     * @return automatically generated
773     */
774    public static LineSegmentDetector createLineSegmentDetector() {
775        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_8());
776    }
777
778
779    //
780    // C++:  Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
781    //
782
783    /**
784     * Returns Gaussian filter coefficients.
785     *
786     * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter
787     * coefficients:
788     *
789     * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\)
790     *
791     * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\).
792     *
793     * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
794     * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
795     * You may also use the higher-level GaussianBlur.
796     * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive.
797     * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
798     * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}.
799     * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
800     * SEE:  sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur
801     * @return automatically generated
802     */
803    public static Mat getGaussianKernel(int ksize, double sigma, int ktype) {
804        return new Mat(getGaussianKernel_0(ksize, sigma, ktype));
805    }
806
807    /**
808     * Returns Gaussian filter coefficients.
809     *
810     * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter
811     * coefficients:
812     *
813     * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\)
814     *
815     * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\).
816     *
817     * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
818     * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
819     * You may also use the higher-level GaussianBlur.
820     * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive.
821     * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
822     * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}.
823     * SEE:  sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur
824     * @return automatically generated
825     */
826    public static Mat getGaussianKernel(int ksize, double sigma) {
827        return new Mat(getGaussianKernel_1(ksize, sigma));
828    }
829
830
831    //
832    // C++:  void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
833    //
834
835    /**
836     * Returns filter coefficients for computing spatial image derivatives.
837     *
838     * The function computes and returns the filter coefficients for spatial image derivatives. When
839     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
840     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
841     *
842     * @param kx Output matrix of row filter coefficients. It has the type ktype .
843     * @param ky Output matrix of column filter coefficients. It has the type ktype .
844     * @param dx Derivative order in respect of x.
845     * @param dy Derivative order in respect of y.
846     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
847     * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
848     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
849     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
850     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
851     * all the fractional bits, you may want to set normalize=false .
852     * @param ktype Type of filter coefficients. It can be CV_32f or CV_64F .
853     */
854    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype) {
855        getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype);
856    }
857
858    /**
859     * Returns filter coefficients for computing spatial image derivatives.
860     *
861     * The function computes and returns the filter coefficients for spatial image derivatives. When
862     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
863     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
864     *
865     * @param kx Output matrix of row filter coefficients. It has the type ktype .
866     * @param ky Output matrix of column filter coefficients. It has the type ktype .
867     * @param dx Derivative order in respect of x.
868     * @param dy Derivative order in respect of y.
869     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
870     * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
871     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
872     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
873     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
874     * all the fractional bits, you may want to set normalize=false .
875     */
876    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize) {
877        getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize);
878    }
879
880    /**
881     * Returns filter coefficients for computing spatial image derivatives.
882     *
883     * The function computes and returns the filter coefficients for spatial image derivatives. When
884     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
885     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
886     *
887     * @param kx Output matrix of row filter coefficients. It has the type ktype .
888     * @param ky Output matrix of column filter coefficients. It has the type ktype .
889     * @param dx Derivative order in respect of x.
890     * @param dy Derivative order in respect of y.
891     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
892     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
893     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
894     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
895     * all the fractional bits, you may want to set normalize=false .
896     */
897    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize) {
898        getDerivKernels_2(kx.nativeObj, ky.nativeObj, dx, dy, ksize);
899    }
900
901
902    //
903    // C++:  Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
904    //
905
906    /**
907     * Returns Gabor filter coefficients.
908     *
909     * For more details about gabor filter equations and parameters, see: [Gabor
910     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
911     *
912     * @param ksize Size of the filter returned.
913     * @param sigma Standard deviation of the gaussian envelope.
914     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
915     * @param lambd Wavelength of the sinusoidal factor.
916     * @param gamma Spatial aspect ratio.
917     * @param psi Phase offset.
918     * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
919     * @return automatically generated
920     */
921    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype) {
922        return new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype));
923    }
924
925    /**
926     * Returns Gabor filter coefficients.
927     *
928     * For more details about gabor filter equations and parameters, see: [Gabor
929     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
930     *
931     * @param ksize Size of the filter returned.
932     * @param sigma Standard deviation of the gaussian envelope.
933     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
934     * @param lambd Wavelength of the sinusoidal factor.
935     * @param gamma Spatial aspect ratio.
936     * @param psi Phase offset.
937     * @return automatically generated
938     */
939    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi) {
940        return new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi));
941    }
942
943    /**
944     * Returns Gabor filter coefficients.
945     *
946     * For more details about gabor filter equations and parameters, see: [Gabor
947     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
948     *
949     * @param ksize Size of the filter returned.
950     * @param sigma Standard deviation of the gaussian envelope.
951     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
952     * @param lambd Wavelength of the sinusoidal factor.
953     * @param gamma Spatial aspect ratio.
954     * @return automatically generated
955     */
956    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma) {
957        return new Mat(getGaborKernel_2(ksize.width, ksize.height, sigma, theta, lambd, gamma));
958    }
959
960
961    //
962    // C++:  Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1))
963    //
964
965    /**
966     * Returns a structuring element of the specified size and shape for morphological operations.
967     *
968     * The function constructs and returns the structuring element that can be further passed to #erode,
969     * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
970     * the structuring element.
971     *
972     * @param shape Element shape that could be one of #MorphShapes
973     * @param ksize Size of the structuring element.
974     * @param anchor Anchor position within the element. The default value \((-1, -1)\) means that the
975     * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
976     * position. In other cases the anchor just regulates how much the result of the morphological
977     * operation is shifted.
978     * @return automatically generated
979     */
980    public static Mat getStructuringElement(int shape, Size ksize, Point anchor) {
981        return new Mat(getStructuringElement_0(shape, ksize.width, ksize.height, anchor.x, anchor.y));
982    }
983
984    /**
985     * Returns a structuring element of the specified size and shape for morphological operations.
986     *
987     * The function constructs and returns the structuring element that can be further passed to #erode,
988     * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
989     * the structuring element.
990     *
991     * @param shape Element shape that could be one of #MorphShapes
992     * @param ksize Size of the structuring element.
993     * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
994     * position. In other cases the anchor just regulates how much the result of the morphological
995     * operation is shifted.
996     * @return automatically generated
997     */
998    public static Mat getStructuringElement(int shape, Size ksize) {
999        return new Mat(getStructuringElement_1(shape, ksize.width, ksize.height));
1000    }
1001
1002
1003    //
1004    // C++:  void cv::medianBlur(Mat src, Mat& dst, int ksize)
1005    //
1006
1007    /**
1008     * Blurs an image using the median filter.
1009     *
1010     * The function smoothes an image using the median filter with the \(\texttt{ksize} \times
1011     * \texttt{ksize}\) aperture. Each channel of a multi-channel image is processed independently.
1012     * In-place operation is supported.
1013     *
1014     * <b>Note:</b> The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes
1015     *
1016     * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be
1017     * CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.
1018     * @param dst destination array of the same size and type as src.
1019     * @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
1020     * SEE:  bilateralFilter, blur, boxFilter, GaussianBlur
1021     */
1022    public static void medianBlur(Mat src, Mat dst, int ksize) {
1023        medianBlur_0(src.nativeObj, dst.nativeObj, ksize);
1024    }
1025
1026
1027    //
1028    // C++:  void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT)
1029    //
1030
1031    /**
1032     * Blurs an image using a Gaussian filter.
1033     *
1034     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1035     * supported.
1036     *
1037     * @param src input image; the image can have any number of channels, which are processed
1038     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1039     * @param dst output image of the same size and type as src.
1040     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1041     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1042     * @param sigmaX Gaussian kernel standard deviation in X direction.
1043     * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
1044     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1045     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1046     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1047     * sigmaX, and sigmaY.
1048     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1049     *
1050     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1051     */
1052    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) {
1053        GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType);
1054    }
1055
1056    /**
1057     * Blurs an image using a Gaussian filter.
1058     *
1059     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1060     * supported.
1061     *
1062     * @param src input image; the image can have any number of channels, which are processed
1063     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1064     * @param dst output image of the same size and type as src.
1065     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1066     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1067     * @param sigmaX Gaussian kernel standard deviation in X direction.
1068     * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
1069     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1070     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1071     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1072     * sigmaX, and sigmaY.
1073     *
1074     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1075     */
1076    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) {
1077        GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY);
1078    }
1079
1080    /**
1081     * Blurs an image using a Gaussian filter.
1082     *
1083     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1084     * supported.
1085     *
1086     * @param src input image; the image can have any number of channels, which are processed
1087     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1088     * @param dst output image of the same size and type as src.
1089     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1090     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1091     * @param sigmaX Gaussian kernel standard deviation in X direction.
1092     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1093     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1094     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1095     * sigmaX, and sigmaY.
1096     *
1097     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1098     */
1099    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) {
1100        GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX);
1101    }
1102
1103
1104    //
1105    // C++:  void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
1106    //
1107
1108    /**
1109     * Applies the bilateral filter to an image.
1110     *
1111     * The function applies bilateral filtering to the input image, as described in
1112     * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
1113     * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
1114     * very slow compared to most filters.
1115     *
1116     * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (&lt;
1117     * 10), the filter will not have much effect, whereas if they are large (&gt; 150), they will have a very
1118     * strong effect, making the image look "cartoonish".
1119     *
1120     * _Filter size_: Large filters (d &gt; 5) are very slow, so it is recommended to use d=5 for real-time
1121     * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
1122     *
1123     * This filter does not work inplace.
1124     * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
1125     * @param dst Destination image of the same size and type as src .
1126     * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
1127     * it is computed from sigmaSpace.
1128     * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
1129     * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
1130     * in larger areas of semi-equal color.
1131     * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
1132     * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
1133     * ). When d&gt;0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
1134     * proportional to sigmaSpace.
1135     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
1136     */
1137    public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType) {
1138        bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType);
1139    }
1140
1141    /**
1142     * Applies the bilateral filter to an image.
1143     *
1144     * The function applies bilateral filtering to the input image, as described in
1145     * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
1146     * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
1147     * very slow compared to most filters.
1148     *
1149     * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (&lt;
1150     * 10), the filter will not have much effect, whereas if they are large (&gt; 150), they will have a very
1151     * strong effect, making the image look "cartoonish".
1152     *
1153     * _Filter size_: Large filters (d &gt; 5) are very slow, so it is recommended to use d=5 for real-time
1154     * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
1155     *
1156     * This filter does not work inplace.
1157     * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
1158     * @param dst Destination image of the same size and type as src .
1159     * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
1160     * it is computed from sigmaSpace.
1161     * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
1162     * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
1163     * in larger areas of semi-equal color.
1164     * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
1165     * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
1166     * ). When d&gt;0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
1167     * proportional to sigmaSpace.
1168     */
1169    public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace) {
1170        bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace);
1171    }
1172
1173
1174    //
1175    // C++:  void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
1176    //
1177
1178    /**
1179     * Blurs an image using the box filter.
1180     *
1181     * The function smooths an image using the kernel:
1182     *
1183     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1184     *
1185     * where
1186     *
1187     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1188     *
1189     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1190     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1191     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1192     *
1193     * @param src input image.
1194     * @param dst output image of the same size and type as src.
1195     * @param ddepth the output image depth (-1 to use src.depth()).
1196     * @param ksize blurring kernel size.
1197     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1198     * center.
1199     * @param normalize flag, specifying whether the kernel is normalized by its area or not.
1200     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1201     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1202     */
1203    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) {
1204        boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType);
1205    }
1206
1207    /**
1208     * Blurs an image using the box filter.
1209     *
1210     * The function smooths an image using the kernel:
1211     *
1212     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1213     *
1214     * where
1215     *
1216     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1217     *
1218     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1219     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1220     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1221     *
1222     * @param src input image.
1223     * @param dst output image of the same size and type as src.
1224     * @param ddepth the output image depth (-1 to use src.depth()).
1225     * @param ksize blurring kernel size.
1226     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1227     * center.
1228     * @param normalize flag, specifying whether the kernel is normalized by its area or not.
1229     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1230     */
1231    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) {
1232        boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize);
1233    }
1234
1235    /**
1236     * Blurs an image using the box filter.
1237     *
1238     * The function smooths an image using the kernel:
1239     *
1240     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1241     *
1242     * where
1243     *
1244     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1245     *
1246     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1247     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1248     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1249     *
1250     * @param src input image.
1251     * @param dst output image of the same size and type as src.
1252     * @param ddepth the output image depth (-1 to use src.depth()).
1253     * @param ksize blurring kernel size.
1254     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1255     * center.
1256     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1257     */
1258    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) {
1259        boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y);
1260    }
1261
1262    /**
1263     * Blurs an image using the box filter.
1264     *
1265     * The function smooths an image using the kernel:
1266     *
1267     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1268     *
1269     * where
1270     *
1271     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1272     *
1273     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1274     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1275     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1276     *
1277     * @param src input image.
1278     * @param dst output image of the same size and type as src.
1279     * @param ddepth the output image depth (-1 to use src.depth()).
1280     * @param ksize blurring kernel size.
1281     * center.
1282     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1283     */
1284    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize) {
1285        boxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height);
1286    }
1287
1288
1289    //
1290    // C++:  void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT)
1291    //
1292
1293    /**
1294     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1295     *
1296     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1297     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1298     *
1299     * The unnormalized square box filter can be useful in computing local image statistics such as the the local
1300     * variance and standard deviation around the neighborhood of a pixel.
1301     *
1302     * @param src input image
1303     * @param dst output image of the same size and type as _src
1304     * @param ddepth the output image depth (-1 to use src.depth())
1305     * @param ksize kernel size
1306     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1307     * center.
1308     * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
1309     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1310     * SEE: boxFilter
1311     */
1312    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) {
1313        sqrBoxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType);
1314    }
1315
1316    /**
1317     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1318     *
1319     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1320     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1321     *
1322     * The unnormalized square box filter can be useful in computing local image statistics such as the the local
1323     * variance and standard deviation around the neighborhood of a pixel.
1324     *
1325     * @param src input image
1326     * @param dst output image of the same size and type as _src
1327     * @param ddepth the output image depth (-1 to use src.depth())
1328     * @param ksize kernel size
1329     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1330     * center.
1331     * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
1332     * SEE: boxFilter
1333     */
1334    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) {
1335        sqrBoxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize);
1336    }
1337
1338    /**
1339     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1340     *
1341     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1342     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1343     *
1344     * The unnormalized square box filter can be useful in computing local image statistics such as the the local
1345     * variance and standard deviation around the neighborhood of a pixel.
1346     *
1347     * @param src input image
1348     * @param dst output image of the same size and type as _src
1349     * @param ddepth the output image depth (-1 to use src.depth())
1350     * @param ksize kernel size
1351     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1352     * center.
1353     * SEE: boxFilter
1354     */
1355    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) {
1356        sqrBoxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y);
1357    }
1358
1359    /**
1360     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1361     *
1362     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1363     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1364     *
1365     * The unnormalized square box filter can be useful in computing local image statistics such as the the local
1366     * variance and standard deviation around the neighborhood of a pixel.
1367     *
1368     * @param src input image
1369     * @param dst output image of the same size and type as _src
1370     * @param ddepth the output image depth (-1 to use src.depth())
1371     * @param ksize kernel size
1372     * center.
1373     * SEE: boxFilter
1374     */
1375    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize) {
1376        sqrBoxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height);
1377    }
1378
1379
1380    //
1381    // C++:  void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
1382    //
1383
1384    /**
1385     * Blurs an image using the normalized box filter.
1386     *
1387     * The function smooths an image using the kernel:
1388     *
1389     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1390     *
1391     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1392     * anchor, true, borderType)`.
1393     *
1394     * @param src input image; it can have any number of channels, which are processed independently, but
1395     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1396     * @param dst output image of the same size and type as src.
1397     * @param ksize blurring kernel size.
1398     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1399     * center.
1400     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1401     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1402     */
1403    public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType) {
1404        blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType);
1405    }
1406
1407    /**
1408     * Blurs an image using the normalized box filter.
1409     *
1410     * The function smooths an image using the kernel:
1411     *
1412     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1413     *
1414     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1415     * anchor, true, borderType)`.
1416     *
1417     * @param src input image; it can have any number of channels, which are processed independently, but
1418     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1419     * @param dst output image of the same size and type as src.
1420     * @param ksize blurring kernel size.
1421     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1422     * center.
1423     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1424     */
1425    public static void blur(Mat src, Mat dst, Size ksize, Point anchor) {
1426        blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y);
1427    }
1428
1429    /**
1430     * Blurs an image using the normalized box filter.
1431     *
1432     * The function smooths an image using the kernel:
1433     *
1434     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1435     *
1436     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1437     * anchor, true, borderType)`.
1438     *
1439     * @param src input image; it can have any number of channels, which are processed independently, but
1440     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1441     * @param dst output image of the same size and type as src.
1442     * @param ksize blurring kernel size.
1443     * center.
1444     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1445     */
1446    public static void blur(Mat src, Mat dst, Size ksize) {
1447        blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height);
1448    }
1449
1450
1451    //
1452    // C++:  void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
1453    //
1454
1455    /**
1456     * Convolves an image with the kernel.
1457     *
1458     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1459     * the aperture is partially outside the image, the function interpolates outlier pixel values
1460     * according to the specified border mode.
1461     *
1462     * The function does actually compute correlation, not the convolution:
1463     *
1464     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1465     *
1466     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1467     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1468     * anchor.y - 1)`.
1469     *
1470     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1471     * larger) and the direct algorithm for small kernels.
1472     *
1473     * @param src input image.
1474     * @param dst output image of the same size and the same number of channels as src.
1475     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1476     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1477     * matrix; if you want to apply different kernels to different channels, split the image into
1478     * separate color planes using split and process them individually.
1479     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1480     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1481     * is at the kernel center.
1482     * @param delta optional value added to the filtered pixels before storing them in dst.
1483     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1484     * SEE:  sepFilter2D, dft, matchTemplate
1485     */
1486    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) {
1487        filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType);
1488    }
1489
1490    /**
1491     * Convolves an image with the kernel.
1492     *
1493     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1494     * the aperture is partially outside the image, the function interpolates outlier pixel values
1495     * according to the specified border mode.
1496     *
1497     * The function does actually compute correlation, not the convolution:
1498     *
1499     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1500     *
1501     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1502     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1503     * anchor.y - 1)`.
1504     *
1505     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1506     * larger) and the direct algorithm for small kernels.
1507     *
1508     * @param src input image.
1509     * @param dst output image of the same size and the same number of channels as src.
1510     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1511     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1512     * matrix; if you want to apply different kernels to different channels, split the image into
1513     * separate color planes using split and process them individually.
1514     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1515     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1516     * is at the kernel center.
1517     * @param delta optional value added to the filtered pixels before storing them in dst.
1518     * SEE:  sepFilter2D, dft, matchTemplate
1519     */
1520    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta) {
1521        filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta);
1522    }
1523
1524    /**
1525     * Convolves an image with the kernel.
1526     *
1527     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1528     * the aperture is partially outside the image, the function interpolates outlier pixel values
1529     * according to the specified border mode.
1530     *
1531     * The function does actually compute correlation, not the convolution:
1532     *
1533     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1534     *
1535     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1536     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1537     * anchor.y - 1)`.
1538     *
1539     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1540     * larger) and the direct algorithm for small kernels.
1541     *
1542     * @param src input image.
1543     * @param dst output image of the same size and the same number of channels as src.
1544     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1545     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1546     * matrix; if you want to apply different kernels to different channels, split the image into
1547     * separate color planes using split and process them individually.
1548     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1549     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1550     * is at the kernel center.
1551     * SEE:  sepFilter2D, dft, matchTemplate
1552     */
1553    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor) {
1554        filter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y);
1555    }
1556
1557    /**
1558     * Convolves an image with the kernel.
1559     *
1560     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1561     * the aperture is partially outside the image, the function interpolates outlier pixel values
1562     * according to the specified border mode.
1563     *
1564     * The function does actually compute correlation, not the convolution:
1565     *
1566     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1567     *
1568     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1569     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1570     * anchor.y - 1)`.
1571     *
1572     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1573     * larger) and the direct algorithm for small kernels.
1574     *
1575     * @param src input image.
1576     * @param dst output image of the same size and the same number of channels as src.
1577     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1578     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1579     * matrix; if you want to apply different kernels to different channels, split the image into
1580     * separate color planes using split and process them individually.
1581     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1582     * is at the kernel center.
1583     * SEE:  sepFilter2D, dft, matchTemplate
1584     */
1585    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel) {
1586        filter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj);
1587    }
1588
1589
1590    //
1591    // C++:  void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
1592    //
1593
1594    /**
1595     * Applies a separable linear filter to an image.
1596     *
1597     * The function applies a separable linear filter to the image. That is, first, every row of src is
1598     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1599     * kernel kernelY. The final result shifted by delta is stored in dst .
1600     *
1601     * @param src Source image.
1602     * @param dst Destination image of the same size and the same number of channels as src .
1603     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1604     * @param kernelX Coefficients for filtering each row.
1605     * @param kernelY Coefficients for filtering each column.
1606     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1607     * is at the kernel center.
1608     * @param delta Value added to the filtered results before storing them.
1609     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1610     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1611     */
1612    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) {
1613        sepFilter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta, borderType);
1614    }
1615
1616    /**
1617     * Applies a separable linear filter to an image.
1618     *
1619     * The function applies a separable linear filter to the image. That is, first, every row of src is
1620     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1621     * kernel kernelY. The final result shifted by delta is stored in dst .
1622     *
1623     * @param src Source image.
1624     * @param dst Destination image of the same size and the same number of channels as src .
1625     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1626     * @param kernelX Coefficients for filtering each row.
1627     * @param kernelY Coefficients for filtering each column.
1628     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1629     * is at the kernel center.
1630     * @param delta Value added to the filtered results before storing them.
1631     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1632     */
1633    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta) {
1634        sepFilter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta);
1635    }
1636
1637    /**
1638     * Applies a separable linear filter to an image.
1639     *
1640     * The function applies a separable linear filter to the image. That is, first, every row of src is
1641     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1642     * kernel kernelY. The final result shifted by delta is stored in dst .
1643     *
1644     * @param src Source image.
1645     * @param dst Destination image of the same size and the same number of channels as src .
1646     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1647     * @param kernelX Coefficients for filtering each row.
1648     * @param kernelY Coefficients for filtering each column.
1649     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1650     * is at the kernel center.
1651     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1652     */
1653    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor) {
1654        sepFilter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y);
1655    }
1656
1657    /**
1658     * Applies a separable linear filter to an image.
1659     *
1660     * The function applies a separable linear filter to the image. That is, first, every row of src is
1661     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1662     * kernel kernelY. The final result shifted by delta is stored in dst .
1663     *
1664     * @param src Source image.
1665     * @param dst Destination image of the same size and the same number of channels as src .
1666     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1667     * @param kernelX Coefficients for filtering each row.
1668     * @param kernelY Coefficients for filtering each column.
1669     * is at the kernel center.
1670     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1671     */
1672    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY) {
1673        sepFilter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj);
1674    }
1675
1676
1677    //
1678    // C++:  void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
1679    //
1680
1681    /**
1682     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1683     *
1684     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1685     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1686     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1687     * or the second x- or y- derivatives.
1688     *
1689     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1690     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1691     *
1692     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1693     *
1694     * for the x-derivative, or transposed for the y-derivative.
1695     *
1696     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1697     *
1698     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1699     *
1700     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1701     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1702     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1703     * case corresponds to a kernel of:
1704     *
1705     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1706     *
1707     * The second case corresponds to a kernel of:
1708     *
1709     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1710     *
1711     * @param src input image.
1712     * @param dst output image of the same size and the same number of channels as src .
1713     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1714     *     8-bit input images it will result in truncated derivatives.
1715     * @param dx order of the derivative x.
1716     * @param dy order of the derivative y.
1717     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1718     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1719     * applied (see #getDerivKernels for details).
1720     * @param delta optional delta value that is added to the results prior to storing them in dst.
1721     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1722     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1723     */
1724    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) {
1725        Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType);
1726    }
1727
1728    /**
1729     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1730     *
1731     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1732     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1733     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1734     * or the second x- or y- derivatives.
1735     *
1736     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1737     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1738     *
1739     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1740     *
1741     * for the x-derivative, or transposed for the y-derivative.
1742     *
1743     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1744     *
1745     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1746     *
1747     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1748     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1749     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1750     * case corresponds to a kernel of:
1751     *
1752     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1753     *
1754     * The second case corresponds to a kernel of:
1755     *
1756     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1757     *
1758     * @param src input image.
1759     * @param dst output image of the same size and the same number of channels as src .
1760     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1761     *     8-bit input images it will result in truncated derivatives.
1762     * @param dx order of the derivative x.
1763     * @param dy order of the derivative y.
1764     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1765     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1766     * applied (see #getDerivKernels for details).
1767     * @param delta optional delta value that is added to the results prior to storing them in dst.
1768     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1769     */
1770    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta) {
1771        Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta);
1772    }
1773
1774    /**
1775     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1776     *
1777     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1778     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1779     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1780     * or the second x- or y- derivatives.
1781     *
1782     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1783     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1784     *
1785     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1786     *
1787     * for the x-derivative, or transposed for the y-derivative.
1788     *
1789     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1790     *
1791     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1792     *
1793     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1794     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1795     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1796     * case corresponds to a kernel of:
1797     *
1798     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1799     *
1800     * The second case corresponds to a kernel of:
1801     *
1802     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1803     *
1804     * @param src input image.
1805     * @param dst output image of the same size and the same number of channels as src .
1806     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1807     *     8-bit input images it will result in truncated derivatives.
1808     * @param dx order of the derivative x.
1809     * @param dy order of the derivative y.
1810     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1811     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1812     * applied (see #getDerivKernels for details).
1813     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1814     */
1815    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale) {
1816        Sobel_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale);
1817    }
1818
1819    /**
1820     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1821     *
1822     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1823     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1824     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1825     * or the second x- or y- derivatives.
1826     *
1827     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1828     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1829     *
1830     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1831     *
1832     * for the x-derivative, or transposed for the y-derivative.
1833     *
1834     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1835     *
1836     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1837     *
1838     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1839     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1840     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1841     * case corresponds to a kernel of:
1842     *
1843     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1844     *
1845     * The second case corresponds to a kernel of:
1846     *
1847     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1848     *
1849     * @param src input image.
1850     * @param dst output image of the same size and the same number of channels as src .
1851     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1852     *     8-bit input images it will result in truncated derivatives.
1853     * @param dx order of the derivative x.
1854     * @param dy order of the derivative y.
1855     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1856     * applied (see #getDerivKernels for details).
1857     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1858     */
1859    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize) {
1860        Sobel_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize);
1861    }
1862
1863    /**
1864     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1865     *
1866     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1867     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1868     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1869     * or the second x- or y- derivatives.
1870     *
1871     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1872     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1873     *
1874     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1875     *
1876     * for the x-derivative, or transposed for the y-derivative.
1877     *
1878     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1879     *
1880     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1881     *
1882     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1883     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1884     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1885     * case corresponds to a kernel of:
1886     *
1887     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1888     *
1889     * The second case corresponds to a kernel of:
1890     *
1891     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1892     *
1893     * @param src input image.
1894     * @param dst output image of the same size and the same number of channels as src .
1895     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1896     *     8-bit input images it will result in truncated derivatives.
1897     * @param dx order of the derivative x.
1898     * @param dy order of the derivative y.
1899     * applied (see #getDerivKernels for details).
1900     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1901     */
1902    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy) {
1903        Sobel_4(src.nativeObj, dst.nativeObj, ddepth, dx, dy);
1904    }
1905
1906
1907    //
1908    // C++:  void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT)
1909    //
1910
1911    /**
1912     * Calculates the first order image derivative in both x and y using a Sobel operator
1913     *
1914     * Equivalent to calling:
1915     *
1916     * <code>
1917     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1918     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1919     * </code>
1920     *
1921     * @param src input image.
1922     * @param dx output image with first-order derivative in x.
1923     * @param dy output image with first-order derivative in y.
1924     * @param ksize size of Sobel kernel. It must be 3.
1925     * @param borderType pixel extrapolation method, see #BorderTypes.
1926     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
1927     *
1928     * SEE: Sobel
1929     */
1930    public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType) {
1931        spatialGradient_0(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize, borderType);
1932    }
1933
1934    /**
1935     * Calculates the first order image derivative in both x and y using a Sobel operator
1936     *
1937     * Equivalent to calling:
1938     *
1939     * <code>
1940     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1941     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1942     * </code>
1943     *
1944     * @param src input image.
1945     * @param dx output image with first-order derivative in x.
1946     * @param dy output image with first-order derivative in y.
1947     * @param ksize size of Sobel kernel. It must be 3.
1948     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
1949     *
1950     * SEE: Sobel
1951     */
1952    public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize) {
1953        spatialGradient_1(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize);
1954    }
1955
1956    /**
1957     * Calculates the first order image derivative in both x and y using a Sobel operator
1958     *
1959     * Equivalent to calling:
1960     *
1961     * <code>
1962     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1963     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1964     * </code>
1965     *
1966     * @param src input image.
1967     * @param dx output image with first-order derivative in x.
1968     * @param dy output image with first-order derivative in y.
1969     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
1970     *
1971     * SEE: Sobel
1972     */
1973    public static void spatialGradient(Mat src, Mat dx, Mat dy) {
1974        spatialGradient_2(src.nativeObj, dx.nativeObj, dy.nativeObj);
1975    }
1976
1977
1978    //
1979    // C++:  void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
1980    //
1981
1982    /**
1983     * Calculates the first x- or y- image derivative using Scharr operator.
1984     *
1985     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
1986     * call
1987     *
1988     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
1989     *
1990     * is equivalent to
1991     *
1992     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
1993     *
1994     * @param src input image.
1995     * @param dst output image of the same size and the same number of channels as src.
1996     * @param ddepth output image depth, see REF: filter_depths "combinations"
1997     * @param dx order of the derivative x.
1998     * @param dy order of the derivative y.
1999     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2000     * applied (see #getDerivKernels for details).
2001     * @param delta optional delta value that is added to the results prior to storing them in dst.
2002     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
2003     * SEE:  cartToPolar
2004     */
2005    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) {
2006        Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType);
2007    }
2008
2009    /**
2010     * Calculates the first x- or y- image derivative using Scharr operator.
2011     *
2012     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2013     * call
2014     *
2015     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2016     *
2017     * is equivalent to
2018     *
2019     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2020     *
2021     * @param src input image.
2022     * @param dst output image of the same size and the same number of channels as src.
2023     * @param ddepth output image depth, see REF: filter_depths "combinations"
2024     * @param dx order of the derivative x.
2025     * @param dy order of the derivative y.
2026     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2027     * applied (see #getDerivKernels for details).
2028     * @param delta optional delta value that is added to the results prior to storing them in dst.
2029     * SEE:  cartToPolar
2030     */
2031    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta) {
2032        Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta);
2033    }
2034
2035    /**
2036     * Calculates the first x- or y- image derivative using Scharr operator.
2037     *
2038     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2039     * call
2040     *
2041     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2042     *
2043     * is equivalent to
2044     *
2045     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2046     *
2047     * @param src input image.
2048     * @param dst output image of the same size and the same number of channels as src.
2049     * @param ddepth output image depth, see REF: filter_depths "combinations"
2050     * @param dx order of the derivative x.
2051     * @param dy order of the derivative y.
2052     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2053     * applied (see #getDerivKernels for details).
2054     * SEE:  cartToPolar
2055     */
2056    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale) {
2057        Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale);
2058    }
2059
2060    /**
2061     * Calculates the first x- or y- image derivative using Scharr operator.
2062     *
2063     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2064     * call
2065     *
2066     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2067     *
2068     * is equivalent to
2069     *
2070     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2071     *
2072     * @param src input image.
2073     * @param dst output image of the same size and the same number of channels as src.
2074     * @param ddepth output image depth, see REF: filter_depths "combinations"
2075     * @param dx order of the derivative x.
2076     * @param dy order of the derivative y.
2077     * applied (see #getDerivKernels for details).
2078     * SEE:  cartToPolar
2079     */
2080    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy) {
2081        Scharr_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy);
2082    }
2083
2084
2085    //
2086    // C++:  void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
2087    //
2088
2089    /**
2090     * Calculates the Laplacian of an image.
2091     *
2092     * The function calculates the Laplacian of the source image by adding up the second x and y
2093     * derivatives calculated using the Sobel operator:
2094     *
2095     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2096     *
2097     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2098     * with the following \(3 \times 3\) aperture:
2099     *
2100     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2101     *
2102     * @param src Source image.
2103     * @param dst Destination image of the same size and the same number of channels as src .
2104     * @param ddepth Desired depth of the destination image.
2105     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2106     * details. The size must be positive and odd.
2107     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2108     * applied. See #getDerivKernels for details.
2109     * @param delta Optional delta value that is added to the results prior to storing them in dst .
2110     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
2111     * SEE:  Sobel, Scharr
2112     */
2113    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType) {
2114        Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType);
2115    }
2116
2117    /**
2118     * Calculates the Laplacian of an image.
2119     *
2120     * The function calculates the Laplacian of the source image by adding up the second x and y
2121     * derivatives calculated using the Sobel operator:
2122     *
2123     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2124     *
2125     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2126     * with the following \(3 \times 3\) aperture:
2127     *
2128     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2129     *
2130     * @param src Source image.
2131     * @param dst Destination image of the same size and the same number of channels as src .
2132     * @param ddepth Desired depth of the destination image.
2133     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2134     * details. The size must be positive and odd.
2135     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2136     * applied. See #getDerivKernels for details.
2137     * @param delta Optional delta value that is added to the results prior to storing them in dst .
2138     * SEE:  Sobel, Scharr
2139     */
2140    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta) {
2141        Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta);
2142    }
2143
2144    /**
2145     * Calculates the Laplacian of an image.
2146     *
2147     * The function calculates the Laplacian of the source image by adding up the second x and y
2148     * derivatives calculated using the Sobel operator:
2149     *
2150     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2151     *
2152     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2153     * with the following \(3 \times 3\) aperture:
2154     *
2155     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2156     *
2157     * @param src Source image.
2158     * @param dst Destination image of the same size and the same number of channels as src .
2159     * @param ddepth Desired depth of the destination image.
2160     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2161     * details. The size must be positive and odd.
2162     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2163     * applied. See #getDerivKernels for details.
2164     * SEE:  Sobel, Scharr
2165     */
2166    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale) {
2167        Laplacian_2(src.nativeObj, dst.nativeObj, ddepth, ksize, scale);
2168    }
2169
2170    /**
2171     * Calculates the Laplacian of an image.
2172     *
2173     * The function calculates the Laplacian of the source image by adding up the second x and y
2174     * derivatives calculated using the Sobel operator:
2175     *
2176     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2177     *
2178     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2179     * with the following \(3 \times 3\) aperture:
2180     *
2181     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2182     *
2183     * @param src Source image.
2184     * @param dst Destination image of the same size and the same number of channels as src .
2185     * @param ddepth Desired depth of the destination image.
2186     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2187     * details. The size must be positive and odd.
2188     * applied. See #getDerivKernels for details.
2189     * SEE:  Sobel, Scharr
2190     */
2191    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize) {
2192        Laplacian_3(src.nativeObj, dst.nativeObj, ddepth, ksize);
2193    }
2194
2195    /**
2196     * Calculates the Laplacian of an image.
2197     *
2198     * The function calculates the Laplacian of the source image by adding up the second x and y
2199     * derivatives calculated using the Sobel operator:
2200     *
2201     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2202     *
2203     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2204     * with the following \(3 \times 3\) aperture:
2205     *
2206     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2207     *
2208     * @param src Source image.
2209     * @param dst Destination image of the same size and the same number of channels as src .
2210     * @param ddepth Desired depth of the destination image.
2211     * details. The size must be positive and odd.
2212     * applied. See #getDerivKernels for details.
2213     * SEE:  Sobel, Scharr
2214     */
2215    public static void Laplacian(Mat src, Mat dst, int ddepth) {
2216        Laplacian_4(src.nativeObj, dst.nativeObj, ddepth);
2217    }
2218
2219
2220    //
2221    // C++:  void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
2222    //
2223
2224    /**
2225     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2226     *
2227     * The function finds edges in the input image and marks them in the output map edges using the
2228     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2229     * largest value is used to find initial segments of strong edges. See
2230     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2231     *
2232     * @param image 8-bit input image.
2233     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2234     * @param threshold1 first threshold for the hysteresis procedure.
2235     * @param threshold2 second threshold for the hysteresis procedure.
2236     * @param apertureSize aperture size for the Sobel operator.
2237     * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm
2238     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2239     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2240     * L2gradient=false ).
2241     */
2242    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) {
2243        Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient);
2244    }
2245
2246    /**
2247     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2248     *
2249     * The function finds edges in the input image and marks them in the output map edges using the
2250     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2251     * largest value is used to find initial segments of strong edges. See
2252     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2253     *
2254     * @param image 8-bit input image.
2255     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2256     * @param threshold1 first threshold for the hysteresis procedure.
2257     * @param threshold2 second threshold for the hysteresis procedure.
2258     * @param apertureSize aperture size for the Sobel operator.
2259     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2260     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2261     * L2gradient=false ).
2262     */
2263    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize) {
2264        Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize);
2265    }
2266
2267    /**
2268     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2269     *
2270     * The function finds edges in the input image and marks them in the output map edges using the
2271     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2272     * largest value is used to find initial segments of strong edges. See
2273     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2274     *
2275     * @param image 8-bit input image.
2276     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2277     * @param threshold1 first threshold for the hysteresis procedure.
2278     * @param threshold2 second threshold for the hysteresis procedure.
2279     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2280     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2281     * L2gradient=false ).
2282     */
2283    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) {
2284        Canny_2(image.nativeObj, edges.nativeObj, threshold1, threshold2);
2285    }
2286
2287
2288    //
2289    // C++:  void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false)
2290    //
2291
2292    /**
2293     * \overload
2294     *
2295     * Finds edges in an image using the Canny algorithm with custom image gradient.
2296     *
2297     * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
2298     * @param dy 16-bit y derivative of input image (same type as dx).
2299     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2300     * @param threshold1 first threshold for the hysteresis procedure.
2301     * @param threshold2 second threshold for the hysteresis procedure.
2302     * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm
2303     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2304     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2305     * L2gradient=false ).
2306     */
2307    public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2, boolean L2gradient) {
2308        Canny_3(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2, L2gradient);
2309    }
2310
2311    /**
2312     * \overload
2313     *
2314     * Finds edges in an image using the Canny algorithm with custom image gradient.
2315     *
2316     * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
2317     * @param dy 16-bit y derivative of input image (same type as dx).
2318     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2319     * @param threshold1 first threshold for the hysteresis procedure.
2320     * @param threshold2 second threshold for the hysteresis procedure.
2321     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2322     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2323     * L2gradient=false ).
2324     */
2325    public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2) {
2326        Canny_4(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2);
2327    }
2328
2329
2330    //
2331    // C++:  void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
2332    //
2333
2334    /**
2335     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2336     *
2337     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2338     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2339     * of the formulae in the cornerEigenValsAndVecs description.
2340     *
2341     * @param src Input single-channel 8-bit or floating-point image.
2342     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2343     * src .
2344     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2345     * @param ksize Aperture parameter for the Sobel operator.
2346     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2347     */
2348    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType) {
2349        cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
2350    }
2351
2352    /**
2353     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2354     *
2355     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2356     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2357     * of the formulae in the cornerEigenValsAndVecs description.
2358     *
2359     * @param src Input single-channel 8-bit or floating-point image.
2360     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2361     * src .
2362     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2363     * @param ksize Aperture parameter for the Sobel operator.
2364     */
2365    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize) {
2366        cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
2367    }
2368
2369    /**
2370     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2371     *
2372     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2373     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2374     * of the formulae in the cornerEigenValsAndVecs description.
2375     *
2376     * @param src Input single-channel 8-bit or floating-point image.
2377     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2378     * src .
2379     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2380     */
2381    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize) {
2382        cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize);
2383    }
2384
2385
2386    //
2387    // C++:  void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
2388    //
2389
2390    /**
2391     * Harris corner detector.
2392     *
2393     * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
2394     * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance
2395     * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it
2396     * computes the following characteristic:
2397     *
2398     * \(\texttt{dst} (x,y) =  \mathrm{det} M^{(x,y)} - k  \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\)
2399     *
2400     * Corners in the image can be found as the local maxima of this response map.
2401     *
2402     * @param src Input single-channel 8-bit or floating-point image.
2403     * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
2404     * size as src .
2405     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2406     * @param ksize Aperture parameter for the Sobel operator.
2407     * @param k Harris detector free parameter. See the formula above.
2408     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2409     */
2410    public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType) {
2411        cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType);
2412    }
2413
2414    /**
2415     * Harris corner detector.
2416     *
2417     * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
2418     * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance
2419     * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it
2420     * computes the following characteristic:
2421     *
2422     * \(\texttt{dst} (x,y) =  \mathrm{det} M^{(x,y)} - k  \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\)
2423     *
2424     * Corners in the image can be found as the local maxima of this response map.
2425     *
2426     * @param src Input single-channel 8-bit or floating-point image.
2427     * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
2428     * size as src .
2429     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2430     * @param ksize Aperture parameter for the Sobel operator.
2431     * @param k Harris detector free parameter. See the formula above.
2432     */
2433    public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k) {
2434        cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k);
2435    }
2436
2437
2438    //
2439    // C++:  void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
2440    //
2441
2442    /**
2443     * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
2444     *
2445     * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize
2446     * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as:
2447     *
2448     * \(M =  \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 &amp;  \sum _{S(p)}dI/dx dI/dy  \\ \sum _{S(p)}dI/dx dI/dy &amp;  \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\)
2449     *
2450     * where the derivatives are computed using the Sobel operator.
2451     *
2452     * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as
2453     * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where
2454     *
2455     * <ul>
2456     *   <li>
2457     *    \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\)
2458     *   </li>
2459     *   <li>
2460     *    \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\)
2461     *   </li>
2462     *   <li>
2463     *    \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\)
2464     *   </li>
2465     * </ul>
2466     *
2467     * The output of the function can be used for robust edge or corner detection.
2468     *
2469     * @param src Input single-channel 8-bit or floating-point image.
2470     * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
2471     * @param blockSize Neighborhood size (see details below).
2472     * @param ksize Aperture parameter for the Sobel operator.
2473     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2474     *
2475     * SEE:  cornerMinEigenVal, cornerHarris, preCornerDetect
2476     */
2477    public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType) {
2478        cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
2479    }
2480
2481    /**
2482     * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
2483     *
2484     * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize
2485     * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as:
2486     *
2487     * \(M =  \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 &amp;  \sum _{S(p)}dI/dx dI/dy  \\ \sum _{S(p)}dI/dx dI/dy &amp;  \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\)
2488     *
2489     * where the derivatives are computed using the Sobel operator.
2490     *
2491     * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as
2492     * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where
2493     *
2494     * <ul>
2495     *   <li>
2496     *    \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\)
2497     *   </li>
2498     *   <li>
2499     *    \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\)
2500     *   </li>
2501     *   <li>
2502     *    \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\)
2503     *   </li>
2504     * </ul>
2505     *
2506     * The output of the function can be used for robust edge or corner detection.
2507     *
2508     * @param src Input single-channel 8-bit or floating-point image.
2509     * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
2510     * @param blockSize Neighborhood size (see details below).
2511     * @param ksize Aperture parameter for the Sobel operator.
2512     *
2513     * SEE:  cornerMinEigenVal, cornerHarris, preCornerDetect
2514     */
2515    public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize) {
2516        cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
2517    }
2518
2519
2520    //
2521    // C++:  void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT)
2522    //
2523
2524    /**
2525     * Calculates a feature map for corner detection.
2526     *
2527     * The function calculates the complex spatial derivative-based function of the source image
2528     *
2529     * \(\texttt{dst} = (D_x  \texttt{src} )^2  \cdot D_{yy}  \texttt{src} + (D_y  \texttt{src} )^2  \cdot D_{xx}  \texttt{src} - 2 D_x  \texttt{src} \cdot D_y  \texttt{src} \cdot D_{xy}  \texttt{src}\)
2530     *
2531     * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image
2532     * derivatives, and \(D_{xy}\) is the mixed derivative.
2533     *
2534     * The corners can be found as local maximums of the functions, as shown below:
2535     * <code>
2536     *     Mat corners, dilated_corners;
2537     *     preCornerDetect(image, corners, 3);
2538     *     // dilation with 3x3 rectangular structuring element
2539     *     dilate(corners, dilated_corners, Mat(), 1);
2540     *     Mat corner_mask = corners == dilated_corners;
2541     * </code>
2542     *
2543     * @param src Source single-channel 8-bit of floating-point image.
2544     * @param dst Output image that has the type CV_32F and the same size as src .
2545     * @param ksize %Aperture size of the Sobel .
2546     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2547     */
2548    public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType) {
2549        preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType);
2550    }
2551
2552    /**
2553     * Calculates a feature map for corner detection.
2554     *
2555     * The function calculates the complex spatial derivative-based function of the source image
2556     *
2557     * \(\texttt{dst} = (D_x  \texttt{src} )^2  \cdot D_{yy}  \texttt{src} + (D_y  \texttt{src} )^2  \cdot D_{xx}  \texttt{src} - 2 D_x  \texttt{src} \cdot D_y  \texttt{src} \cdot D_{xy}  \texttt{src}\)
2558     *
2559     * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image
2560     * derivatives, and \(D_{xy}\) is the mixed derivative.
2561     *
2562     * The corners can be found as local maximums of the functions, as shown below:
2563     * <code>
2564     *     Mat corners, dilated_corners;
2565     *     preCornerDetect(image, corners, 3);
2566     *     // dilation with 3x3 rectangular structuring element
2567     *     dilate(corners, dilated_corners, Mat(), 1);
2568     *     Mat corner_mask = corners == dilated_corners;
2569     * </code>
2570     *
2571     * @param src Source single-channel 8-bit of floating-point image.
2572     * @param dst Output image that has the type CV_32F and the same size as src .
2573     * @param ksize %Aperture size of the Sobel .
2574     */
2575    public static void preCornerDetect(Mat src, Mat dst, int ksize) {
2576        preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize);
2577    }
2578
2579
2580    //
2581    // C++:  void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria)
2582    //
2583
2584    /**
2585     * Refines the corner locations.
2586     *
2587     * The function iterates to find the sub-pixel accurate location of corners or radial saddle
2588     * points as described in CITE: forstner1987fast, and as shown on the figure below.
2589     *
2590     * ![image](pics/cornersubpix.png)
2591     *
2592     * Sub-pixel accurate corner locator is based on the observation that every vector from the center \(q\)
2593     * to a point \(p\) located within a neighborhood of \(q\) is orthogonal to the image gradient at \(p\)
2594     * subject to image and measurement noise. Consider the expression:
2595     *
2596     * \(\epsilon _i = {DI_{p_i}}^T  \cdot (q - p_i)\)
2597     *
2598     * where \({DI_{p_i}}\) is an image gradient at one of the points \(p_i\) in a neighborhood of \(q\) . The
2599     * value of \(q\) is to be found so that \(\epsilon_i\) is minimized. A system of equations may be set up
2600     * with \(\epsilon_i\) set to zero:
2601     *
2602     * \(\sum _i(DI_{p_i}  \cdot {DI_{p_i}}^T) \cdot q -  \sum _i(DI_{p_i}  \cdot {DI_{p_i}}^T  \cdot p_i)\)
2603     *
2604     * where the gradients are summed within a neighborhood ("search window") of \(q\) . Calling the first
2605     * gradient term \(G\) and the second gradient term \(b\) gives:
2606     *
2607     * \(q = G^{-1}  \cdot b\)
2608     *
2609     * The algorithm sets the center of the neighborhood window at this new center \(q\) and then iterates
2610     * until the center stays within a set threshold.
2611     *
2612     * @param image Input single-channel, 8-bit or float image.
2613     * @param corners Initial coordinates of the input corners and refined coordinates provided for
2614     * output.
2615     * @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,
2616     * then a \((5*2+1) \times (5*2+1) = 11 \times 11\) search window is used.
2617     * @param zeroZone Half of the size of the dead region in the middle of the search zone over which
2618     * the summation in the formula below is not done. It is used sometimes to avoid possible
2619     * singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such
2620     * a size.
2621     * @param criteria Criteria for termination of the iterative process of corner refinement. That is,
2622     * the process of corner position refinement stops either after criteria.maxCount iterations or when
2623     * the corner position moves by less than criteria.epsilon on some iteration.
2624     */
2625    public static void cornerSubPix(Mat image, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria) {
2626        cornerSubPix_0(image.nativeObj, corners.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon);
2627    }
2628
2629
2630    //
2631    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
2632    //
2633
2634    /**
2635     * Determines strong corners on an image.
2636     *
2637     * The function finds the most prominent corners in the image or in the specified image region, as
2638     * described in CITE: Shi94
2639     *
2640     * <ul>
2641     *   <li>
2642     *    Function calculates the corner quality measure at every source image pixel using the
2643     *     #cornerMinEigenVal or #cornerHarris .
2644     *   </li>
2645     *   <li>
2646     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2647     *     retained).
2648     *   </li>
2649     *   <li>
2650     *    The corners with the minimal eigenvalue less than
2651     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2652     *   </li>
2653     *   <li>
2654     *    The remaining corners are sorted by the quality measure in the descending order.
2655     *   </li>
2656     *   <li>
2657     *    Function throws away each corner for which there is a stronger corner at a distance less than
2658     *     maxDistance.
2659     *   </li>
2660     * </ul>
2661     *
2662     * The function can be used to initialize a point-based tracker of an object.
2663     *
2664     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2665     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2666     * with qualityLevel=B .
2667     *
2668     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2669     * @param corners Output vector of detected corners.
2670     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2671     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2672     * and all detected corners are returned.
2673     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2674     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2675     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2676     * quality measure less than the product are rejected. For example, if the best corner has the
2677     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2678     * less than 15 are rejected.
2679     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2680     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2681     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2682     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2683     * pixel neighborhood. See cornerEigenValsAndVecs .
2684     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
2685     * or #cornerMinEigenVal.
2686     * @param k Free parameter of the Harris detector.
2687     *
2688     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2689     */
2690    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k) {
2691        Mat corners_mat = corners;
2692        goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k);
2693    }
2694
2695    /**
2696     * Determines strong corners on an image.
2697     *
2698     * The function finds the most prominent corners in the image or in the specified image region, as
2699     * described in CITE: Shi94
2700     *
2701     * <ul>
2702     *   <li>
2703     *    Function calculates the corner quality measure at every source image pixel using the
2704     *     #cornerMinEigenVal or #cornerHarris .
2705     *   </li>
2706     *   <li>
2707     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2708     *     retained).
2709     *   </li>
2710     *   <li>
2711     *    The corners with the minimal eigenvalue less than
2712     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2713     *   </li>
2714     *   <li>
2715     *    The remaining corners are sorted by the quality measure in the descending order.
2716     *   </li>
2717     *   <li>
2718     *    Function throws away each corner for which there is a stronger corner at a distance less than
2719     *     maxDistance.
2720     *   </li>
2721     * </ul>
2722     *
2723     * The function can be used to initialize a point-based tracker of an object.
2724     *
2725     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2726     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2727     * with qualityLevel=B .
2728     *
2729     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2730     * @param corners Output vector of detected corners.
2731     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2732     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2733     * and all detected corners are returned.
2734     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2735     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2736     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2737     * quality measure less than the product are rejected. For example, if the best corner has the
2738     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2739     * less than 15 are rejected.
2740     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2741     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2742     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2743     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2744     * pixel neighborhood. See cornerEigenValsAndVecs .
2745     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
2746     * or #cornerMinEigenVal.
2747     *
2748     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2749     */
2750    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector) {
2751        Mat corners_mat = corners;
2752        goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector);
2753    }
2754
2755    /**
2756     * Determines strong corners on an image.
2757     *
2758     * The function finds the most prominent corners in the image or in the specified image region, as
2759     * described in CITE: Shi94
2760     *
2761     * <ul>
2762     *   <li>
2763     *    Function calculates the corner quality measure at every source image pixel using the
2764     *     #cornerMinEigenVal or #cornerHarris .
2765     *   </li>
2766     *   <li>
2767     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2768     *     retained).
2769     *   </li>
2770     *   <li>
2771     *    The corners with the minimal eigenvalue less than
2772     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2773     *   </li>
2774     *   <li>
2775     *    The remaining corners are sorted by the quality measure in the descending order.
2776     *   </li>
2777     *   <li>
2778     *    Function throws away each corner for which there is a stronger corner at a distance less than
2779     *     maxDistance.
2780     *   </li>
2781     * </ul>
2782     *
2783     * The function can be used to initialize a point-based tracker of an object.
2784     *
2785     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2786     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2787     * with qualityLevel=B .
2788     *
2789     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2790     * @param corners Output vector of detected corners.
2791     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2792     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2793     * and all detected corners are returned.
2794     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2795     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2796     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2797     * quality measure less than the product are rejected. For example, if the best corner has the
2798     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2799     * less than 15 are rejected.
2800     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2801     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2802     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2803     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2804     * pixel neighborhood. See cornerEigenValsAndVecs .
2805     * or #cornerMinEigenVal.
2806     *
2807     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2808     */
2809    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize) {
2810        Mat corners_mat = corners;
2811        goodFeaturesToTrack_2(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize);
2812    }
2813
2814    /**
2815     * Determines strong corners on an image.
2816     *
2817     * The function finds the most prominent corners in the image or in the specified image region, as
2818     * described in CITE: Shi94
2819     *
2820     * <ul>
2821     *   <li>
2822     *    Function calculates the corner quality measure at every source image pixel using the
2823     *     #cornerMinEigenVal or #cornerHarris .
2824     *   </li>
2825     *   <li>
2826     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2827     *     retained).
2828     *   </li>
2829     *   <li>
2830     *    The corners with the minimal eigenvalue less than
2831     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2832     *   </li>
2833     *   <li>
2834     *    The remaining corners are sorted by the quality measure in the descending order.
2835     *   </li>
2836     *   <li>
2837     *    Function throws away each corner for which there is a stronger corner at a distance less than
2838     *     maxDistance.
2839     *   </li>
2840     * </ul>
2841     *
2842     * The function can be used to initialize a point-based tracker of an object.
2843     *
2844     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2845     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2846     * with qualityLevel=B .
2847     *
2848     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2849     * @param corners Output vector of detected corners.
2850     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2851     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2852     * and all detected corners are returned.
2853     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2854     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2855     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2856     * quality measure less than the product are rejected. For example, if the best corner has the
2857     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2858     * less than 15 are rejected.
2859     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2860     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2861     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2862     * pixel neighborhood. See cornerEigenValsAndVecs .
2863     * or #cornerMinEigenVal.
2864     *
2865     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2866     */
2867    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask) {
2868        Mat corners_mat = corners;
2869        goodFeaturesToTrack_3(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj);
2870    }
2871
2872    /**
2873     * Determines strong corners on an image.
2874     *
2875     * The function finds the most prominent corners in the image or in the specified image region, as
2876     * described in CITE: Shi94
2877     *
2878     * <ul>
2879     *   <li>
2880     *    Function calculates the corner quality measure at every source image pixel using the
2881     *     #cornerMinEigenVal or #cornerHarris .
2882     *   </li>
2883     *   <li>
2884     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2885     *     retained).
2886     *   </li>
2887     *   <li>
2888     *    The corners with the minimal eigenvalue less than
2889     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2890     *   </li>
2891     *   <li>
2892     *    The remaining corners are sorted by the quality measure in the descending order.
2893     *   </li>
2894     *   <li>
2895     *    Function throws away each corner for which there is a stronger corner at a distance less than
2896     *     maxDistance.
2897     *   </li>
2898     * </ul>
2899     *
2900     * The function can be used to initialize a point-based tracker of an object.
2901     *
2902     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2903     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2904     * with qualityLevel=B .
2905     *
2906     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2907     * @param corners Output vector of detected corners.
2908     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2909     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2910     * and all detected corners are returned.
2911     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2912     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2913     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2914     * quality measure less than the product are rejected. For example, if the best corner has the
2915     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2916     * less than 15 are rejected.
2917     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2918     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2919     * pixel neighborhood. See cornerEigenValsAndVecs .
2920     * or #cornerMinEigenVal.
2921     *
2922     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2923     */
2924    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance) {
2925        Mat corners_mat = corners;
2926        goodFeaturesToTrack_4(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance);
2927    }
2928
2929
2930    //
2931    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04)
2932    //
2933
2934    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector, double k) {
2935        Mat corners_mat = corners;
2936        goodFeaturesToTrack_5(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector, k);
2937    }
2938
2939    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector) {
2940        Mat corners_mat = corners;
2941        goodFeaturesToTrack_6(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector);
2942    }
2943
2944    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize) {
2945        Mat corners_mat = corners;
2946        goodFeaturesToTrack_7(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize);
2947    }
2948
2949
2950    //
2951    // C++:  void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04)
2952    //
2953
2954    /**
2955     * Same as above, but returns also quality measure of the detected corners.
2956     *
2957     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2958     * @param corners Output vector of detected corners.
2959     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2960     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2961     * and all detected corners are returned.
2962     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2963     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2964     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2965     * quality measure less than the product are rejected. For example, if the best corner has the
2966     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2967     * less than 15 are rejected.
2968     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2969     * @param mask Region of interest. If the image is not empty (it needs to have the type
2970     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2971     * @param cornersQuality Output vector of quality measure of the detected corners.
2972     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2973     * pixel neighborhood. See cornerEigenValsAndVecs .
2974     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
2975     * See cornerEigenValsAndVecs .
2976     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
2977     * or #cornerMinEigenVal.
2978     * @param k Free parameter of the Harris detector.
2979     */
2980    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector, double k) {
2981        goodFeaturesToTrackWithQuality_0(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector, k);
2982    }
2983
2984    /**
2985     * Same as above, but returns also quality measure of the detected corners.
2986     *
2987     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2988     * @param corners Output vector of detected corners.
2989     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2990     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2991     * and all detected corners are returned.
2992     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2993     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2994     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2995     * quality measure less than the product are rejected. For example, if the best corner has the
2996     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2997     * less than 15 are rejected.
2998     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2999     * @param mask Region of interest. If the image is not empty (it needs to have the type
3000     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3001     * @param cornersQuality Output vector of quality measure of the detected corners.
3002     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3003     * pixel neighborhood. See cornerEigenValsAndVecs .
3004     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
3005     * See cornerEigenValsAndVecs .
3006     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
3007     * or #cornerMinEigenVal.
3008     */
3009    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector) {
3010        goodFeaturesToTrackWithQuality_1(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector);
3011    }
3012
3013    /**
3014     * Same as above, but returns also quality measure of the detected corners.
3015     *
3016     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3017     * @param corners Output vector of detected corners.
3018     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3019     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3020     * and all detected corners are returned.
3021     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3022     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3023     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3024     * quality measure less than the product are rejected. For example, if the best corner has the
3025     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3026     * less than 15 are rejected.
3027     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3028     * @param mask Region of interest. If the image is not empty (it needs to have the type
3029     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3030     * @param cornersQuality Output vector of quality measure of the detected corners.
3031     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3032     * pixel neighborhood. See cornerEigenValsAndVecs .
3033     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
3034     * See cornerEigenValsAndVecs .
3035     * or #cornerMinEigenVal.
3036     */
3037    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize) {
3038        goodFeaturesToTrackWithQuality_2(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize);
3039    }
3040
3041    /**
3042     * Same as above, but returns also quality measure of the detected corners.
3043     *
3044     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3045     * @param corners Output vector of detected corners.
3046     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3047     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3048     * and all detected corners are returned.
3049     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3050     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3051     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3052     * quality measure less than the product are rejected. For example, if the best corner has the
3053     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3054     * less than 15 are rejected.
3055     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3056     * @param mask Region of interest. If the image is not empty (it needs to have the type
3057     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3058     * @param cornersQuality Output vector of quality measure of the detected corners.
3059     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3060     * pixel neighborhood. See cornerEigenValsAndVecs .
3061     * See cornerEigenValsAndVecs .
3062     * or #cornerMinEigenVal.
3063     */
3064    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize) {
3065        goodFeaturesToTrackWithQuality_3(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize);
3066    }
3067
3068    /**
3069     * Same as above, but returns also quality measure of the detected corners.
3070     *
3071     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3072     * @param corners Output vector of detected corners.
3073     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3074     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3075     * and all detected corners are returned.
3076     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3077     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3078     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3079     * quality measure less than the product are rejected. For example, if the best corner has the
3080     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3081     * less than 15 are rejected.
3082     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3083     * @param mask Region of interest. If the image is not empty (it needs to have the type
3084     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3085     * @param cornersQuality Output vector of quality measure of the detected corners.
3086     * pixel neighborhood. See cornerEigenValsAndVecs .
3087     * See cornerEigenValsAndVecs .
3088     * or #cornerMinEigenVal.
3089     */
3090    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality) {
3091        goodFeaturesToTrackWithQuality_4(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj);
3092    }
3093
3094
3095    //
3096    // C++:  void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
3097    //
3098
3099    /**
3100     * Finds lines in a binary image using the standard Hough transform.
3101     *
3102     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3103     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3104     * transform.
3105     *
3106     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3107     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3108     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of
3109     * the image). \(\theta\) is the line rotation angle in radians (
3110     * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ).
3111     * \(\textrm{votes}\) is the value of accumulator.
3112     * @param rho Distance resolution of the accumulator in pixels.
3113     * @param theta Angle resolution of the accumulator in radians.
3114     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3115     * votes ( \(&gt;\texttt{threshold}\) ).
3116     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
3117     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3118     * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
3119     * parameters should be positive.
3120     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3121     * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
3122     * Must fall between 0 and max_theta.
3123     * @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
3124     * Must fall between min_theta and CV_PI.
3125     */
3126    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) {
3127        HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta);
3128    }
3129
3130    /**
3131     * Finds lines in a binary image using the standard Hough transform.
3132     *
3133     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3134     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3135     * transform.
3136     *
3137     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3138     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3139     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of
3140     * the image). \(\theta\) is the line rotation angle in radians (
3141     * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ).
3142     * \(\textrm{votes}\) is the value of accumulator.
3143     * @param rho Distance resolution of the accumulator in pixels.
3144     * @param theta Angle resolution of the accumulator in radians.
3145     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3146     * votes ( \(&gt;\texttt{threshold}\) ).
3147     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
3148     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3149     * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
3150     * parameters should be positive.
3151     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3152     * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
3153     * Must fall between 0 and max_theta.
3154     * Must fall between min_theta and CV_PI.
3155     */
3156    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) {
3157        HoughLines_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta);
3158    }
3159
3160    /**
3161     * Finds lines in a binary image using the standard Hough transform.
3162     *
3163     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3164     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3165     * transform.
3166     *
3167     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3168     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3169     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of
3170     * the image). \(\theta\) is the line rotation angle in radians (
3171     * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ).
3172     * \(\textrm{votes}\) is the value of accumulator.
3173     * @param rho Distance resolution of the accumulator in pixels.
3174     * @param theta Angle resolution of the accumulator in radians.
3175     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3176     * votes ( \(&gt;\texttt{threshold}\) ).
3177     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
3178     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3179     * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
3180     * parameters should be positive.
3181     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3182     * Must fall between 0 and max_theta.
3183     * Must fall between min_theta and CV_PI.
3184     */
3185    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) {
3186        HoughLines_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn);
3187    }
3188
3189    /**
3190     * Finds lines in a binary image using the standard Hough transform.
3191     *
3192     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3193     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3194     * transform.
3195     *
3196     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3197     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3198     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of
3199     * the image). \(\theta\) is the line rotation angle in radians (
3200     * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ).
3201     * \(\textrm{votes}\) is the value of accumulator.
3202     * @param rho Distance resolution of the accumulator in pixels.
3203     * @param theta Angle resolution of the accumulator in radians.
3204     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3205     * votes ( \(&gt;\texttt{threshold}\) ).
3206     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
3207     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3208     * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
3209     * parameters should be positive.
3210     * Must fall between 0 and max_theta.
3211     * Must fall between min_theta and CV_PI.
3212     */
3213    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn) {
3214        HoughLines_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn);
3215    }
3216
3217    /**
3218     * Finds lines in a binary image using the standard Hough transform.
3219     *
3220     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3221     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3222     * transform.
3223     *
3224     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3225     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3226     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of
3227     * the image). \(\theta\) is the line rotation angle in radians (
3228     * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ).
3229     * \(\textrm{votes}\) is the value of accumulator.
3230     * @param rho Distance resolution of the accumulator in pixels.
3231     * @param theta Angle resolution of the accumulator in radians.
3232     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3233     * votes ( \(&gt;\texttt{threshold}\) ).
3234     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3235     * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these
3236     * parameters should be positive.
3237     * Must fall between 0 and max_theta.
3238     * Must fall between min_theta and CV_PI.
3239     */
3240    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold) {
3241        HoughLines_4(image.nativeObj, lines.nativeObj, rho, theta, threshold);
3242    }
3243
3244
3245    //
3246    // C++:  void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
3247    //
3248
3249    /**
3250     * Finds line segments in a binary image using the probabilistic Hough transform.
3251     *
3252     * The function implements the probabilistic Hough transform algorithm for line detection, described
3253     * in CITE: Matas00
3254     *
3255     * See the line detection example below:
3256     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3257     * This is a sample picture the function parameters have been tuned for:
3258     *
3259     * ![image](pics/building.jpg)
3260     *
3261     * And this is the output of the above program in case of the probabilistic Hough transform:
3262     *
3263     * ![image](pics/houghp.png)
3264     *
3265     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3266     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3267     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3268     * line segment.
3269     * @param rho Distance resolution of the accumulator in pixels.
3270     * @param theta Angle resolution of the accumulator in radians.
3271     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3272     * votes ( \(&gt;\texttt{threshold}\) ).
3273     * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
3274     * @param maxLineGap Maximum allowed gap between points on the same line to link them.
3275     *
3276     * SEE: LineSegmentDetector
3277     */
3278    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) {
3279        HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap);
3280    }
3281
3282    /**
3283     * Finds line segments in a binary image using the probabilistic Hough transform.
3284     *
3285     * The function implements the probabilistic Hough transform algorithm for line detection, described
3286     * in CITE: Matas00
3287     *
3288     * See the line detection example below:
3289     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3290     * This is a sample picture the function parameters have been tuned for:
3291     *
3292     * ![image](pics/building.jpg)
3293     *
3294     * And this is the output of the above program in case of the probabilistic Hough transform:
3295     *
3296     * ![image](pics/houghp.png)
3297     *
3298     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3299     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3300     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3301     * line segment.
3302     * @param rho Distance resolution of the accumulator in pixels.
3303     * @param theta Angle resolution of the accumulator in radians.
3304     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3305     * votes ( \(&gt;\texttt{threshold}\) ).
3306     * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
3307     *
3308     * SEE: LineSegmentDetector
3309     */
3310    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength) {
3311        HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength);
3312    }
3313
3314    /**
3315     * Finds line segments in a binary image using the probabilistic Hough transform.
3316     *
3317     * The function implements the probabilistic Hough transform algorithm for line detection, described
3318     * in CITE: Matas00
3319     *
3320     * See the line detection example below:
3321     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3322     * This is a sample picture the function parameters have been tuned for:
3323     *
3324     * ![image](pics/building.jpg)
3325     *
3326     * And this is the output of the above program in case of the probabilistic Hough transform:
3327     *
3328     * ![image](pics/houghp.png)
3329     *
3330     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3331     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3332     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3333     * line segment.
3334     * @param rho Distance resolution of the accumulator in pixels.
3335     * @param theta Angle resolution of the accumulator in radians.
3336     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3337     * votes ( \(&gt;\texttt{threshold}\) ).
3338     *
3339     * SEE: LineSegmentDetector
3340     */
3341    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) {
3342        HoughLinesP_2(image.nativeObj, lines.nativeObj, rho, theta, threshold);
3343    }
3344
3345
3346    //
3347    // C++:  void cv::HoughLinesPointSet(Mat _point, Mat& _lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step)
3348    //
3349
3350    /**
3351     * Finds lines in a set of points using the standard Hough transform.
3352     *
3353     * The function finds lines in a set of points using a modification of the Hough transform.
3354     * INCLUDE: snippets/imgproc_HoughLinesPointSet.cpp
3355     * @param _point Input vector of points. Each vector must be encoded as a Point vector \((x,y)\). Type must be CV_32FC2 or CV_32SC2.
3356     * @param _lines Output vector of found lines. Each vector is encoded as a vector&lt;Vec3d&gt; \((votes, rho, theta)\).
3357     * The larger the value of 'votes', the higher the reliability of the Hough line.
3358     * @param lines_max Max count of hough lines.
3359     * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
3360     * votes ( \(&gt;\texttt{threshold}\) )
3361     * @param min_rho Minimum Distance value of the accumulator in pixels.
3362     * @param max_rho Maximum Distance value of the accumulator in pixels.
3363     * @param rho_step Distance resolution of the accumulator in pixels.
3364     * @param min_theta Minimum angle value of the accumulator in radians.
3365     * @param max_theta Maximum angle value of the accumulator in radians.
3366     * @param theta_step Angle resolution of the accumulator in radians.
3367     */
3368    public static void HoughLinesPointSet(Mat _point, Mat _lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step) {
3369        HoughLinesPointSet_0(_point.nativeObj, _lines.nativeObj, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step);
3370    }
3371
3372
3373    //
3374    // C++:  void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
3375    //
3376
3377    /**
3378     * Finds circles in a grayscale image using the Hough transform.
3379     *
3380     * The function finds circles in a grayscale image using a modification of the Hough transform.
3381     *
3382     * Example: :
3383     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3384     *
3385     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3386     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3387     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3388     * to return centers only without radius search, and find the correct radius using an additional procedure.
3389     *
3390     * It also helps to smooth image a bit unless it's already soft. For example,
3391     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3392     *
3393     * @param image 8-bit, single-channel, grayscale input image.
3394     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3395     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3396     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3397     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3398     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3399     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3400     * unless some small very circles need to be detected.
3401     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3402     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3403     * too large, some circles may be missed.
3404     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3405     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3406     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3407     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3408     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3409     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3410     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3411     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3412     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3413     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3414     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3415     * @param minRadius Minimum circle radius.
3416     * @param maxRadius Maximum circle radius. If &lt;= 0, uses the maximum image dimension. If &lt; 0, #HOUGH_GRADIENT returns
3417     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3418     *
3419     * SEE: fitEllipse, minEnclosingCircle
3420     */
3421    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius) {
3422        HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius);
3423    }
3424
3425    /**
3426     * Finds circles in a grayscale image using the Hough transform.
3427     *
3428     * The function finds circles in a grayscale image using a modification of the Hough transform.
3429     *
3430     * Example: :
3431     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3432     *
3433     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3434     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3435     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3436     * to return centers only without radius search, and find the correct radius using an additional procedure.
3437     *
3438     * It also helps to smooth image a bit unless it's already soft. For example,
3439     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3440     *
3441     * @param image 8-bit, single-channel, grayscale input image.
3442     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3443     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3444     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3445     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3446     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3447     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3448     * unless some small very circles need to be detected.
3449     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3450     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3451     * too large, some circles may be missed.
3452     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3453     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3454     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3455     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3456     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3457     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3458     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3459     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3460     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3461     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3462     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3463     * @param minRadius Minimum circle radius.
3464     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3465     *
3466     * SEE: fitEllipse, minEnclosingCircle
3467     */
3468    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius) {
3469        HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius);
3470    }
3471
3472    /**
3473     * Finds circles in a grayscale image using the Hough transform.
3474     *
3475     * The function finds circles in a grayscale image using a modification of the Hough transform.
3476     *
3477     * Example: :
3478     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3479     *
3480     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3481     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3482     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3483     * to return centers only without radius search, and find the correct radius using an additional procedure.
3484     *
3485     * It also helps to smooth image a bit unless it's already soft. For example,
3486     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3487     *
3488     * @param image 8-bit, single-channel, grayscale input image.
3489     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3490     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3491     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3492     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3493     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3494     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3495     * unless some small very circles need to be detected.
3496     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3497     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3498     * too large, some circles may be missed.
3499     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3500     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3501     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3502     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3503     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3504     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3505     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3506     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3507     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3508     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3509     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3510     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3511     *
3512     * SEE: fitEllipse, minEnclosingCircle
3513     */
3514    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2) {
3515        HoughCircles_2(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2);
3516    }
3517
3518    /**
3519     * Finds circles in a grayscale image using the Hough transform.
3520     *
3521     * The function finds circles in a grayscale image using a modification of the Hough transform.
3522     *
3523     * Example: :
3524     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3525     *
3526     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3527     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3528     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3529     * to return centers only without radius search, and find the correct radius using an additional procedure.
3530     *
3531     * It also helps to smooth image a bit unless it's already soft. For example,
3532     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3533     *
3534     * @param image 8-bit, single-channel, grayscale input image.
3535     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3536     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3537     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3538     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3539     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3540     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3541     * unless some small very circles need to be detected.
3542     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3543     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3544     * too large, some circles may be missed.
3545     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3546     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3547     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3548     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3549     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3550     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3551     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3552     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3553     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3554     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3555     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3556     *
3557     * SEE: fitEllipse, minEnclosingCircle
3558     */
3559    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1) {
3560        HoughCircles_3(image.nativeObj, circles.nativeObj, method, dp, minDist, param1);
3561    }
3562
3563    /**
3564     * Finds circles in a grayscale image using the Hough transform.
3565     *
3566     * The function finds circles in a grayscale image using a modification of the Hough transform.
3567     *
3568     * Example: :
3569     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3570     *
3571     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3572     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3573     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3574     * to return centers only without radius search, and find the correct radius using an additional procedure.
3575     *
3576     * It also helps to smooth image a bit unless it's already soft. For example,
3577     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3578     *
3579     * @param image 8-bit, single-channel, grayscale input image.
3580     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3581     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3582     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3583     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3584     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3585     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3586     * unless some small very circles need to be detected.
3587     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3588     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3589     * too large, some circles may be missed.
3590     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3591     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3592     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3593     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3594     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3595     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3596     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3597     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3598     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3599     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3600     *
3601     * SEE: fitEllipse, minEnclosingCircle
3602     */
3603    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist) {
3604        HoughCircles_4(image.nativeObj, circles.nativeObj, method, dp, minDist);
3605    }
3606
3607
3608    //
3609    // C++:  void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3610    //
3611
3612    /**
3613     * Erodes an image by using a specific structuring element.
3614     *
3615     * The function erodes the source image using the specified structuring element that determines the
3616     * shape of a pixel neighborhood over which the minimum is taken:
3617     *
3618     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3619     *
3620     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3621     * case of multi-channel images, each channel is processed independently.
3622     *
3623     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3624     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3625     * @param dst output image of the same size and type as src.
3626     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3627     * structuring element is used. Kernel can be created using #getStructuringElement.
3628     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3629     * anchor is at the element center.
3630     * @param iterations number of times erosion is applied.
3631     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3632     * @param borderValue border value in case of a constant border
3633     * SEE:  dilate, morphologyEx, getStructuringElement
3634     */
3635    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3636        erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3637    }
3638
3639    /**
3640     * Erodes an image by using a specific structuring element.
3641     *
3642     * The function erodes the source image using the specified structuring element that determines the
3643     * shape of a pixel neighborhood over which the minimum is taken:
3644     *
3645     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3646     *
3647     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3648     * case of multi-channel images, each channel is processed independently.
3649     *
3650     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3651     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3652     * @param dst output image of the same size and type as src.
3653     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3654     * structuring element is used. Kernel can be created using #getStructuringElement.
3655     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3656     * anchor is at the element center.
3657     * @param iterations number of times erosion is applied.
3658     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3659     * SEE:  dilate, morphologyEx, getStructuringElement
3660     */
3661    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
3662        erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3663    }
3664
3665    /**
3666     * Erodes an image by using a specific structuring element.
3667     *
3668     * The function erodes the source image using the specified structuring element that determines the
3669     * shape of a pixel neighborhood over which the minimum is taken:
3670     *
3671     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3672     *
3673     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3674     * case of multi-channel images, each channel is processed independently.
3675     *
3676     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3677     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3678     * @param dst output image of the same size and type as src.
3679     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3680     * structuring element is used. Kernel can be created using #getStructuringElement.
3681     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3682     * anchor is at the element center.
3683     * @param iterations number of times erosion is applied.
3684     * SEE:  dilate, morphologyEx, getStructuringElement
3685     */
3686    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) {
3687        erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
3688    }
3689
3690    /**
3691     * Erodes an image by using a specific structuring element.
3692     *
3693     * The function erodes the source image using the specified structuring element that determines the
3694     * shape of a pixel neighborhood over which the minimum is taken:
3695     *
3696     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3697     *
3698     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3699     * case of multi-channel images, each channel is processed independently.
3700     *
3701     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3702     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3703     * @param dst output image of the same size and type as src.
3704     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3705     * structuring element is used. Kernel can be created using #getStructuringElement.
3706     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3707     * anchor is at the element center.
3708     * SEE:  dilate, morphologyEx, getStructuringElement
3709     */
3710    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor) {
3711        erode_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y);
3712    }
3713
3714    /**
3715     * Erodes an image by using a specific structuring element.
3716     *
3717     * The function erodes the source image using the specified structuring element that determines the
3718     * shape of a pixel neighborhood over which the minimum is taken:
3719     *
3720     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3721     *
3722     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3723     * case of multi-channel images, each channel is processed independently.
3724     *
3725     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3726     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3727     * @param dst output image of the same size and type as src.
3728     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3729     * structuring element is used. Kernel can be created using #getStructuringElement.
3730     * anchor is at the element center.
3731     * SEE:  dilate, morphologyEx, getStructuringElement
3732     */
3733    public static void erode(Mat src, Mat dst, Mat kernel) {
3734        erode_4(src.nativeObj, dst.nativeObj, kernel.nativeObj);
3735    }
3736
3737
3738    //
3739    // C++:  void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3740    //
3741
3742    /**
3743     * Dilates an image by using a specific structuring element.
3744     *
3745     * The function dilates the source image using the specified structuring element that determines the
3746     * shape of a pixel neighborhood over which the maximum is taken:
3747     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3748     *
3749     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3750     * case of multi-channel images, each channel is processed independently.
3751     *
3752     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3753     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3754     * @param dst output image of the same size and type as src.
3755     * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
3756     * structuring element is used. Kernel can be created using #getStructuringElement
3757     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3758     * anchor is at the element center.
3759     * @param iterations number of times dilation is applied.
3760     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
3761     * @param borderValue border value in case of a constant border
3762     * SEE:  erode, morphologyEx, getStructuringElement
3763     */
3764    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3765        dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3766    }
3767
3768    /**
3769     * Dilates an image by using a specific structuring element.
3770     *
3771     * The function dilates the source image using the specified structuring element that determines the
3772     * shape of a pixel neighborhood over which the maximum is taken:
3773     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3774     *
3775     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3776     * case of multi-channel images, each channel is processed independently.
3777     *
3778     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3779     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3780     * @param dst output image of the same size and type as src.
3781     * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
3782     * structuring element is used. Kernel can be created using #getStructuringElement
3783     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3784     * anchor is at the element center.
3785     * @param iterations number of times dilation is applied.
3786     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
3787     * SEE:  erode, morphologyEx, getStructuringElement
3788     */
3789    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
3790        dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3791    }
3792
3793    /**
3794     * Dilates an image by using a specific structuring element.
3795     *
3796     * The function dilates the source image using the specified structuring element that determines the
3797     * shape of a pixel neighborhood over which the maximum is taken:
3798     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3799     *
3800     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3801     * case of multi-channel images, each channel is processed independently.
3802     *
3803     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3804     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3805     * @param dst output image of the same size and type as src.
3806     * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
3807     * structuring element is used. Kernel can be created using #getStructuringElement
3808     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3809     * anchor is at the element center.
3810     * @param iterations number of times dilation is applied.
3811     * SEE:  erode, morphologyEx, getStructuringElement
3812     */
3813    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) {
3814        dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
3815    }
3816
3817    /**
3818     * Dilates an image by using a specific structuring element.
3819     *
3820     * The function dilates the source image using the specified structuring element that determines the
3821     * shape of a pixel neighborhood over which the maximum is taken:
3822     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3823     *
3824     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3825     * case of multi-channel images, each channel is processed independently.
3826     *
3827     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3828     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3829     * @param dst output image of the same size and type as src.
3830     * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
3831     * structuring element is used. Kernel can be created using #getStructuringElement
3832     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3833     * anchor is at the element center.
3834     * SEE:  erode, morphologyEx, getStructuringElement
3835     */
3836    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor) {
3837        dilate_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y);
3838    }
3839
3840    /**
3841     * Dilates an image by using a specific structuring element.
3842     *
3843     * The function dilates the source image using the specified structuring element that determines the
3844     * shape of a pixel neighborhood over which the maximum is taken:
3845     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3846     *
3847     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3848     * case of multi-channel images, each channel is processed independently.
3849     *
3850     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3851     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3852     * @param dst output image of the same size and type as src.
3853     * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
3854     * structuring element is used. Kernel can be created using #getStructuringElement
3855     * anchor is at the element center.
3856     * SEE:  erode, morphologyEx, getStructuringElement
3857     */
3858    public static void dilate(Mat src, Mat dst, Mat kernel) {
3859        dilate_4(src.nativeObj, dst.nativeObj, kernel.nativeObj);
3860    }
3861
3862
3863    //
3864    // C++:  void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3865    //
3866
3867    /**
3868     * Performs advanced morphological transformations.
3869     *
3870     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3871     * basic operations.
3872     *
3873     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3874     * processed independently.
3875     *
3876     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3877     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3878     * @param dst Destination image of the same size and type as source image.
3879     * @param op Type of a morphological operation, see #MorphTypes
3880     * @param kernel Structuring element. It can be created using #getStructuringElement.
3881     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3882     * kernel center.
3883     * @param iterations Number of times erosion and dilation are applied.
3884     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3885     * @param borderValue Border value in case of a constant border. The default value has a special
3886     * meaning.
3887     * SEE:  dilate, erode, getStructuringElement
3888     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3889     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3890     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3891     */
3892    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3893        morphologyEx_0(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3894    }
3895
3896    /**
3897     * Performs advanced morphological transformations.
3898     *
3899     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3900     * basic operations.
3901     *
3902     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3903     * processed independently.
3904     *
3905     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3906     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3907     * @param dst Destination image of the same size and type as source image.
3908     * @param op Type of a morphological operation, see #MorphTypes
3909     * @param kernel Structuring element. It can be created using #getStructuringElement.
3910     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3911     * kernel center.
3912     * @param iterations Number of times erosion and dilation are applied.
3913     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3914     * meaning.
3915     * SEE:  dilate, erode, getStructuringElement
3916     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3917     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3918     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3919     */
3920    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType) {
3921        morphologyEx_1(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3922    }
3923
3924    /**
3925     * Performs advanced morphological transformations.
3926     *
3927     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3928     * basic operations.
3929     *
3930     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3931     * processed independently.
3932     *
3933     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3934     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3935     * @param dst Destination image of the same size and type as source image.
3936     * @param op Type of a morphological operation, see #MorphTypes
3937     * @param kernel Structuring element. It can be created using #getStructuringElement.
3938     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3939     * kernel center.
3940     * @param iterations Number of times erosion and dilation are applied.
3941     * meaning.
3942     * SEE:  dilate, erode, getStructuringElement
3943     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3944     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3945     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3946     */
3947    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations) {
3948        morphologyEx_2(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations);
3949    }
3950
3951    /**
3952     * Performs advanced morphological transformations.
3953     *
3954     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3955     * basic operations.
3956     *
3957     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3958     * processed independently.
3959     *
3960     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3961     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3962     * @param dst Destination image of the same size and type as source image.
3963     * @param op Type of a morphological operation, see #MorphTypes
3964     * @param kernel Structuring element. It can be created using #getStructuringElement.
3965     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3966     * kernel center.
3967     * meaning.
3968     * SEE:  dilate, erode, getStructuringElement
3969     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3970     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3971     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3972     */
3973    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor) {
3974        morphologyEx_3(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y);
3975    }
3976
3977    /**
3978     * Performs advanced morphological transformations.
3979     *
3980     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3981     * basic operations.
3982     *
3983     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3984     * processed independently.
3985     *
3986     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3987     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3988     * @param dst Destination image of the same size and type as source image.
3989     * @param op Type of a morphological operation, see #MorphTypes
3990     * @param kernel Structuring element. It can be created using #getStructuringElement.
3991     * kernel center.
3992     * meaning.
3993     * SEE:  dilate, erode, getStructuringElement
3994     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3995     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3996     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3997     */
3998    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel) {
3999        morphologyEx_4(src.nativeObj, dst.nativeObj, op, kernel.nativeObj);
4000    }
4001
4002
4003    //
4004    // C++:  void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
4005    //
4006
4007    /**
4008     * Resizes an image.
4009     *
4010     * The function resize resizes the image src down to or up to the specified size. Note that the
4011     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4012     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4013     * you may call the function as follows:
4014     * <code>
4015     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4016     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4017     * </code>
4018     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4019     * way:
4020     * <code>
4021     *     // specify fx and fy and let the function compute the destination image size.
4022     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4023     * </code>
4024     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4025     * enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR
4026     * (faster but still looks OK).
4027     *
4028     * @param src input image.
4029     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4030     * src.size(), fx, and fy; the type of dst is the same as of src.
4031     * @param dsize output image size; if it equals zero, it is computed as:
4032     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4033     *  Either dsize or both fx and fy must be non-zero.
4034     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4035     * \(\texttt{(double)dsize.width/src.cols}\)
4036     * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
4037     * \(\texttt{(double)dsize.height/src.rows}\)
4038     * @param interpolation interpolation method, see #InterpolationFlags
4039     *
4040     * SEE:  warpAffine, warpPerspective, remap
4041     */
4042    public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interpolation) {
4043        resize_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy, interpolation);
4044    }
4045
4046    /**
4047     * Resizes an image.
4048     *
4049     * The function resize resizes the image src down to or up to the specified size. Note that the
4050     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4051     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4052     * you may call the function as follows:
4053     * <code>
4054     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4055     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4056     * </code>
4057     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4058     * way:
4059     * <code>
4060     *     // specify fx and fy and let the function compute the destination image size.
4061     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4062     * </code>
4063     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4064     * enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR
4065     * (faster but still looks OK).
4066     *
4067     * @param src input image.
4068     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4069     * src.size(), fx, and fy; the type of dst is the same as of src.
4070     * @param dsize output image size; if it equals zero, it is computed as:
4071     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4072     *  Either dsize or both fx and fy must be non-zero.
4073     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4074     * \(\texttt{(double)dsize.width/src.cols}\)
4075     * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
4076     * \(\texttt{(double)dsize.height/src.rows}\)
4077     *
4078     * SEE:  warpAffine, warpPerspective, remap
4079     */
4080    public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy) {
4081        resize_1(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy);
4082    }
4083
4084    /**
4085     * Resizes an image.
4086     *
4087     * The function resize resizes the image src down to or up to the specified size. Note that the
4088     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4089     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4090     * you may call the function as follows:
4091     * <code>
4092     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4093     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4094     * </code>
4095     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4096     * way:
4097     * <code>
4098     *     // specify fx and fy and let the function compute the destination image size.
4099     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4100     * </code>
4101     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4102     * enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR
4103     * (faster but still looks OK).
4104     *
4105     * @param src input image.
4106     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4107     * src.size(), fx, and fy; the type of dst is the same as of src.
4108     * @param dsize output image size; if it equals zero, it is computed as:
4109     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4110     *  Either dsize or both fx and fy must be non-zero.
4111     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4112     * \(\texttt{(double)dsize.width/src.cols}\)
4113     * \(\texttt{(double)dsize.height/src.rows}\)
4114     *
4115     * SEE:  warpAffine, warpPerspective, remap
4116     */
4117    public static void resize(Mat src, Mat dst, Size dsize, double fx) {
4118        resize_2(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx);
4119    }
4120
4121    /**
4122     * Resizes an image.
4123     *
4124     * The function resize resizes the image src down to or up to the specified size. Note that the
4125     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4126     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4127     * you may call the function as follows:
4128     * <code>
4129     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4130     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4131     * </code>
4132     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4133     * way:
4134     * <code>
4135     *     // specify fx and fy and let the function compute the destination image size.
4136     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4137     * </code>
4138     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4139     * enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR
4140     * (faster but still looks OK).
4141     *
4142     * @param src input image.
4143     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4144     * src.size(), fx, and fy; the type of dst is the same as of src.
4145     * @param dsize output image size; if it equals zero, it is computed as:
4146     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4147     *  Either dsize or both fx and fy must be non-zero.
4148     * \(\texttt{(double)dsize.width/src.cols}\)
4149     * \(\texttt{(double)dsize.height/src.rows}\)
4150     *
4151     * SEE:  warpAffine, warpPerspective, remap
4152     */
4153    public static void resize(Mat src, Mat dst, Size dsize) {
4154        resize_3(src.nativeObj, dst.nativeObj, dsize.width, dsize.height);
4155    }
4156
4157
4158    //
4159    // C++:  void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4160    //
4161
4162    /**
4163     * Applies an affine transformation to an image.
4164     *
4165     * The function warpAffine transforms the source image using the specified matrix:
4166     *
4167     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4168     *
4169     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4170     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4171     * operate in-place.
4172     *
4173     * @param src input image.
4174     * @param dst output image that has the size dsize and the same type as src .
4175     * @param M \(2\times 3\) transformation matrix.
4176     * @param dsize size of the output image.
4177     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4178     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4179     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4180     * @param borderMode pixel extrapolation method (see #BorderTypes); when
4181     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4182     * the "outliers" in the source image are not modified by the function.
4183     * @param borderValue value used in case of a constant border; by default, it is 0.
4184     *
4185     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4186     */
4187    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) {
4188        warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4189    }
4190
4191    /**
4192     * Applies an affine transformation to an image.
4193     *
4194     * The function warpAffine transforms the source image using the specified matrix:
4195     *
4196     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4197     *
4198     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4199     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4200     * operate in-place.
4201     *
4202     * @param src input image.
4203     * @param dst output image that has the size dsize and the same type as src .
4204     * @param M \(2\times 3\) transformation matrix.
4205     * @param dsize size of the output image.
4206     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4207     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4208     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4209     * @param borderMode pixel extrapolation method (see #BorderTypes); when
4210     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4211     * the "outliers" in the source image are not modified by the function.
4212     *
4213     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4214     */
4215    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) {
4216        warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode);
4217    }
4218
4219    /**
4220     * Applies an affine transformation to an image.
4221     *
4222     * The function warpAffine transforms the source image using the specified matrix:
4223     *
4224     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4225     *
4226     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4227     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4228     * operate in-place.
4229     *
4230     * @param src input image.
4231     * @param dst output image that has the size dsize and the same type as src .
4232     * @param M \(2\times 3\) transformation matrix.
4233     * @param dsize size of the output image.
4234     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4235     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4236     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4237     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4238     * the "outliers" in the source image are not modified by the function.
4239     *
4240     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4241     */
4242    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags) {
4243        warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
4244    }
4245
4246    /**
4247     * Applies an affine transformation to an image.
4248     *
4249     * The function warpAffine transforms the source image using the specified matrix:
4250     *
4251     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4252     *
4253     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4254     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4255     * operate in-place.
4256     *
4257     * @param src input image.
4258     * @param dst output image that has the size dsize and the same type as src .
4259     * @param M \(2\times 3\) transformation matrix.
4260     * @param dsize size of the output image.
4261     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4262     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4263     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4264     * the "outliers" in the source image are not modified by the function.
4265     *
4266     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4267     */
4268    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize) {
4269        warpAffine_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
4270    }
4271
4272
4273    //
4274    // C++:  void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4275    //
4276
4277    /**
4278     * Applies a perspective transformation to an image.
4279     *
4280     * The function warpPerspective transforms the source image using the specified matrix:
4281     *
4282     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4283     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4284     *
4285     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4286     * and then put in the formula above instead of M. The function cannot operate in-place.
4287     *
4288     * @param src input image.
4289     * @param dst output image that has the size dsize and the same type as src .
4290     * @param M \(3\times 3\) transformation matrix.
4291     * @param dsize size of the output image.
4292     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4293     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4294     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4295     * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
4296     * @param borderValue value used in case of a constant border; by default, it equals 0.
4297     *
4298     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4299     */
4300    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) {
4301        warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4302    }
4303
4304    /**
4305     * Applies a perspective transformation to an image.
4306     *
4307     * The function warpPerspective transforms the source image using the specified matrix:
4308     *
4309     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4310     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4311     *
4312     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4313     * and then put in the formula above instead of M. The function cannot operate in-place.
4314     *
4315     * @param src input image.
4316     * @param dst output image that has the size dsize and the same type as src .
4317     * @param M \(3\times 3\) transformation matrix.
4318     * @param dsize size of the output image.
4319     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4320     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4321     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4322     * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
4323     *
4324     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4325     */
4326    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) {
4327        warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode);
4328    }
4329
4330    /**
4331     * Applies a perspective transformation to an image.
4332     *
4333     * The function warpPerspective transforms the source image using the specified matrix:
4334     *
4335     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4336     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4337     *
4338     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4339     * and then put in the formula above instead of M. The function cannot operate in-place.
4340     *
4341     * @param src input image.
4342     * @param dst output image that has the size dsize and the same type as src .
4343     * @param M \(3\times 3\) transformation matrix.
4344     * @param dsize size of the output image.
4345     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4346     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4347     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4348     *
4349     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4350     */
4351    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags) {
4352        warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
4353    }
4354
4355    /**
4356     * Applies a perspective transformation to an image.
4357     *
4358     * The function warpPerspective transforms the source image using the specified matrix:
4359     *
4360     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4361     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4362     *
4363     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4364     * and then put in the formula above instead of M. The function cannot operate in-place.
4365     *
4366     * @param src input image.
4367     * @param dst output image that has the size dsize and the same type as src .
4368     * @param M \(3\times 3\) transformation matrix.
4369     * @param dsize size of the output image.
4370     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4371     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4372     *
4373     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4374     */
4375    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize) {
4376        warpPerspective_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
4377    }
4378
4379
4380    //
4381    // C++:  void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4382    //
4383
4384    /**
4385     * Applies a generic geometrical transformation to an image.
4386     *
4387     * The function remap transforms the source image using the specified map:
4388     *
4389     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4390     *
4391     * where values of pixels with non-integer coordinates are computed using one of available
4392     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4393     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4394     * \(map_1\), or fixed-point maps created by using convertMaps. The reason you might want to
4395     * convert from floating to fixed-point representations of a map is that they can yield much faster
4396     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4397     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4398     *
4399     * This function cannot operate in-place.
4400     *
4401     * @param src Source image.
4402     * @param dst Destination image. It has the same size as map1 and the same type as src .
4403     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4404     * CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point
4405     * representation to fixed-point for speed.
4406     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4407     * if map1 is (x,y) points), respectively.
4408     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4409     * and #INTER_LINEAR_EXACT are not supported by this function.
4410     * @param borderMode Pixel extrapolation method (see #BorderTypes). When
4411     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4412     * corresponds to the "outliers" in the source image are not modified by the function.
4413     * @param borderValue Value used in case of a constant border. By default, it is 0.
4414     * <b>Note:</b>
4415     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4416     */
4417    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) {
4418        remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4419    }
4420
4421    /**
4422     * Applies a generic geometrical transformation to an image.
4423     *
4424     * The function remap transforms the source image using the specified map:
4425     *
4426     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4427     *
4428     * where values of pixels with non-integer coordinates are computed using one of available
4429     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4430     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4431     * \(map_1\), or fixed-point maps created by using convertMaps. The reason you might want to
4432     * convert from floating to fixed-point representations of a map is that they can yield much faster
4433     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4434     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4435     *
4436     * This function cannot operate in-place.
4437     *
4438     * @param src Source image.
4439     * @param dst Destination image. It has the same size as map1 and the same type as src .
4440     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4441     * CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point
4442     * representation to fixed-point for speed.
4443     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4444     * if map1 is (x,y) points), respectively.
4445     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4446     * and #INTER_LINEAR_EXACT are not supported by this function.
4447     * @param borderMode Pixel extrapolation method (see #BorderTypes). When
4448     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4449     * corresponds to the "outliers" in the source image are not modified by the function.
4450     * <b>Note:</b>
4451     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4452     */
4453    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode) {
4454        remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode);
4455    }
4456
4457    /**
4458     * Applies a generic geometrical transformation to an image.
4459     *
4460     * The function remap transforms the source image using the specified map:
4461     *
4462     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4463     *
4464     * where values of pixels with non-integer coordinates are computed using one of available
4465     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4466     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4467     * \(map_1\), or fixed-point maps created by using convertMaps. The reason you might want to
4468     * convert from floating to fixed-point representations of a map is that they can yield much faster
4469     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4470     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4471     *
4472     * This function cannot operate in-place.
4473     *
4474     * @param src Source image.
4475     * @param dst Destination image. It has the same size as map1 and the same type as src .
4476     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4477     * CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point
4478     * representation to fixed-point for speed.
4479     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4480     * if map1 is (x,y) points), respectively.
4481     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4482     * and #INTER_LINEAR_EXACT are not supported by this function.
4483     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4484     * corresponds to the "outliers" in the source image are not modified by the function.
4485     * <b>Note:</b>
4486     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4487     */
4488    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation) {
4489        remap_2(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation);
4490    }
4491
4492
4493    //
4494    // C++:  void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
4495    //
4496
4497    /**
4498     * Converts image transformation maps from one representation to another.
4499     *
4500     * The function converts a pair of maps for remap from one representation to another. The following
4501     * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are
4502     * supported:
4503     *
4504     * <ul>
4505     *   <li>
4506     *  \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the
4507     * most frequently used conversion operation, in which the original floating-point maps (see remap )
4508     * are converted to a more compact and much faster fixed-point representation. The first output array
4509     * contains the rounded coordinates and the second array (created only when nninterpolation=false )
4510     * contains indices in the interpolation tables.
4511     *   </li>
4512     * </ul>
4513     *
4514     * <ul>
4515     *   <li>
4516     *  \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but
4517     * the original maps are stored in one 2-channel matrix.
4518     *   </li>
4519     * </ul>
4520     *
4521     * <ul>
4522     *   <li>
4523     *  Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
4524     * as the originals.
4525     *   </li>
4526     * </ul>
4527     *
4528     * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
4529     * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
4530     * respectively.
4531     * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
4532     * @param dstmap2 The second output map.
4533     * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
4534     * CV_32FC2 .
4535     * @param nninterpolation Flag indicating whether the fixed-point maps are used for the
4536     * nearest-neighbor or for a more complex interpolation.
4537     *
4538     * SEE:  remap, undistort, initUndistortRectifyMap
4539     */
4540    public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation) {
4541        convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation);
4542    }
4543
4544    /**
4545     * Converts image transformation maps from one representation to another.
4546     *
4547     * The function converts a pair of maps for remap from one representation to another. The following
4548     * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are
4549     * supported:
4550     *
4551     * <ul>
4552     *   <li>
4553     *  \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the
4554     * most frequently used conversion operation, in which the original floating-point maps (see remap )
4555     * are converted to a more compact and much faster fixed-point representation. The first output array
4556     * contains the rounded coordinates and the second array (created only when nninterpolation=false )
4557     * contains indices in the interpolation tables.
4558     *   </li>
4559     * </ul>
4560     *
4561     * <ul>
4562     *   <li>
4563     *  \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but
4564     * the original maps are stored in one 2-channel matrix.
4565     *   </li>
4566     * </ul>
4567     *
4568     * <ul>
4569     *   <li>
4570     *  Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
4571     * as the originals.
4572     *   </li>
4573     * </ul>
4574     *
4575     * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
4576     * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
4577     * respectively.
4578     * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
4579     * @param dstmap2 The second output map.
4580     * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
4581     * CV_32FC2 .
4582     * nearest-neighbor or for a more complex interpolation.
4583     *
4584     * SEE:  remap, undistort, initUndistortRectifyMap
4585     */
4586    public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type) {
4587        convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type);
4588    }
4589
4590
4591    //
4592    // C++:  Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale)
4593    //
4594
4595    /**
4596     * Calculates an affine matrix of 2D rotation.
4597     *
4598     * The function calculates the following matrix:
4599     *
4600     * \(\begin{bmatrix} \alpha &amp;  \beta &amp; (1- \alpha )  \cdot \texttt{center.x} -  \beta \cdot \texttt{center.y} \\ - \beta &amp;  \alpha &amp;  \beta \cdot \texttt{center.x} + (1- \alpha )  \cdot \texttt{center.y} \end{bmatrix}\)
4601     *
4602     * where
4603     *
4604     * \(\begin{array}{l} \alpha =  \texttt{scale} \cdot \cos \texttt{angle} , \\ \beta =  \texttt{scale} \cdot \sin \texttt{angle} \end{array}\)
4605     *
4606     * The transformation maps the rotation center to itself. If this is not the target, adjust the shift.
4607     *
4608     * @param center Center of the rotation in the source image.
4609     * @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the
4610     * coordinate origin is assumed to be the top-left corner).
4611     * @param scale Isotropic scale factor.
4612     *
4613     * SEE:  getAffineTransform, warpAffine, transform
4614     * @return automatically generated
4615     */
4616    public static Mat getRotationMatrix2D(Point center, double angle, double scale) {
4617        return new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale));
4618    }
4619
4620
4621    //
4622    // C++:  void cv::invertAffineTransform(Mat M, Mat& iM)
4623    //
4624
4625    /**
4626     * Inverts an affine transformation.
4627     *
4628     * The function computes an inverse affine transformation represented by \(2 \times 3\) matrix M:
4629     *
4630     * \(\begin{bmatrix} a_{11} &amp; a_{12} &amp; b_1  \\ a_{21} &amp; a_{22} &amp; b_2 \end{bmatrix}\)
4631     *
4632     * The result is also a \(2 \times 3\) matrix of the same type as M.
4633     *
4634     * @param M Original affine transformation.
4635     * @param iM Output reverse affine transformation.
4636     */
4637    public static void invertAffineTransform(Mat M, Mat iM) {
4638        invertAffineTransform_0(M.nativeObj, iM.nativeObj);
4639    }
4640
4641
4642    //
4643    // C++:  Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU)
4644    //
4645
4646    /**
4647     * Calculates a perspective transform from four pairs of the corresponding points.
4648     *
4649     * The function calculates the \(3 \times 3\) matrix of a perspective transform so that:
4650     *
4651     * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\)
4652     *
4653     * where
4654     *
4655     * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\)
4656     *
4657     * @param src Coordinates of quadrangle vertices in the source image.
4658     * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
4659     * @param solveMethod method passed to cv::solve (#DecompTypes)
4660     *
4661     * SEE:  findHomography, warpPerspective, perspectiveTransform
4662     * @return automatically generated
4663     */
4664    public static Mat getPerspectiveTransform(Mat src, Mat dst, int solveMethod) {
4665        return new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj, solveMethod));
4666    }
4667
4668    /**
4669     * Calculates a perspective transform from four pairs of the corresponding points.
4670     *
4671     * The function calculates the \(3 \times 3\) matrix of a perspective transform so that:
4672     *
4673     * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\)
4674     *
4675     * where
4676     *
4677     * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\)
4678     *
4679     * @param src Coordinates of quadrangle vertices in the source image.
4680     * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
4681     *
4682     * SEE:  findHomography, warpPerspective, perspectiveTransform
4683     * @return automatically generated
4684     */
4685    public static Mat getPerspectiveTransform(Mat src, Mat dst) {
4686        return new Mat(getPerspectiveTransform_1(src.nativeObj, dst.nativeObj));
4687    }
4688
4689
4690    //
4691    // C++:  Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst)
4692    //
4693
4694    public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) {
4695        Mat src_mat = src;
4696        Mat dst_mat = dst;
4697        return new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj));
4698    }
4699
4700
4701    //
4702    // C++:  void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
4703    //
4704
4705    /**
4706     * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
4707     *
4708     * The function getRectSubPix extracts pixels from src:
4709     *
4710     * \(patch(x, y) = src(x +  \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y +  \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\)
4711     *
4712     * where the values of the pixels at non-integer coordinates are retrieved using bilinear
4713     * interpolation. Every channel of multi-channel images is processed independently. Also
4714     * the image should be a single channel or three channel image. While the center of the
4715     * rectangle must be inside the image, parts of the rectangle may be outside.
4716     *
4717     * @param image Source image.
4718     * @param patchSize Size of the extracted patch.
4719     * @param center Floating point coordinates of the center of the extracted rectangle within the
4720     * source image. The center must be inside the image.
4721     * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
4722     * @param patchType Depth of the extracted pixels. By default, they have the same depth as src .
4723     *
4724     * SEE:  warpAffine, warpPerspective
4725     */
4726    public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType) {
4727        getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType);
4728    }
4729
4730    /**
4731     * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
4732     *
4733     * The function getRectSubPix extracts pixels from src:
4734     *
4735     * \(patch(x, y) = src(x +  \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y +  \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\)
4736     *
4737     * where the values of the pixels at non-integer coordinates are retrieved using bilinear
4738     * interpolation. Every channel of multi-channel images is processed independently. Also
4739     * the image should be a single channel or three channel image. While the center of the
4740     * rectangle must be inside the image, parts of the rectangle may be outside.
4741     *
4742     * @param image Source image.
4743     * @param patchSize Size of the extracted patch.
4744     * @param center Floating point coordinates of the center of the extracted rectangle within the
4745     * source image. The center must be inside the image.
4746     * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
4747     *
4748     * SEE:  warpAffine, warpPerspective
4749     */
4750    public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) {
4751        getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj);
4752    }
4753
4754
4755    //
4756    // C++:  void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags)
4757    //
4758
4759    /**
4760     * Remaps an image to semilog-polar coordinates space.
4761     *
4762     * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG);
4763     *
4764     *
4765     * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image d)"):
4766     * \(\begin{array}{l}
4767     *   dst( \rho , \phi ) = src(x,y) \\
4768     *   dst.size() \leftarrow src.size()
4769     * \end{array}\)
4770     *
4771     * where
4772     * \(\begin{array}{l}
4773     *   I = (dx,dy) = (x - center.x,y - center.y) \\
4774     *   \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\
4775     *   \phi = Kangle \cdot \texttt{angle} (I) \\
4776     * \end{array}\)
4777     *
4778     * and
4779     * \(\begin{array}{l}
4780     *   M = src.cols / log_e(maxRadius) \\
4781     *   Kangle = src.rows / 2\Pi \\
4782     * \end{array}\)
4783     *
4784     * The function emulates the human "foveal" vision and can be used for fast scale and
4785     * rotation-invariant template matching, for object tracking and so forth.
4786     * @param src Source image
4787     * @param dst Destination image. It will have same size and type as src.
4788     * @param center The transformation center; where the output precision is maximal
4789     * @param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too.
4790     * @param flags A combination of interpolation methods, see #InterpolationFlags
4791     *
4792     * <b>Note:</b>
4793     * <ul>
4794     *   <li>
4795     *    The function can not operate in-place.
4796     *   </li>
4797     *   <li>
4798     *    To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
4799     *   </li>
4800     * </ul>
4801     *
4802     * SEE: cv::linearPolar
4803     */
4804    @Deprecated
4805    public static void logPolar(Mat src, Mat dst, Point center, double M, int flags) {
4806        logPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, M, flags);
4807    }
4808
4809
4810    //
4811    // C++:  void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags)
4812    //
4813
4814    /**
4815     * Remaps an image to polar coordinates space.
4816     *
4817     * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags)
4818     *
4819     *
4820     * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image c)"):
4821     * \(\begin{array}{l}
4822     *   dst( \rho , \phi ) = src(x,y) \\
4823     *   dst.size() \leftarrow src.size()
4824     * \end{array}\)
4825     *
4826     * where
4827     * \(\begin{array}{l}
4828     *   I = (dx,dy) = (x - center.x,y - center.y) \\
4829     *   \rho = Kmag \cdot \texttt{magnitude} (I) ,\\
4830     *   \phi = angle \cdot \texttt{angle} (I)
4831     * \end{array}\)
4832     *
4833     * and
4834     * \(\begin{array}{l}
4835     *   Kx = src.cols / maxRadius \\
4836     *   Ky = src.rows / 2\Pi
4837     * \end{array}\)
4838     *
4839     *
4840     * @param src Source image
4841     * @param dst Destination image. It will have same size and type as src.
4842     * @param center The transformation center;
4843     * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
4844     * @param flags A combination of interpolation methods, see #InterpolationFlags
4845     *
4846     * <b>Note:</b>
4847     * <ul>
4848     *   <li>
4849     *    The function can not operate in-place.
4850     *   </li>
4851     *   <li>
4852     *    To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
4853     *   </li>
4854     * </ul>
4855     *
4856     * SEE: cv::logPolar
4857     */
4858    @Deprecated
4859    public static void linearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) {
4860        linearPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, maxRadius, flags);
4861    }
4862
4863
4864    //
4865    // C++:  void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags)
4866    //
4867
4868    /**
4869     * Remaps an image to polar or semilog-polar coordinates space
4870     *
4871     *  polar_remaps_reference_image
4872     * ![Polar remaps reference](pics/polar_remap_doc.png)
4873     *
4874     * Transform the source image using the following transformation:
4875     * \(
4876     * dst(\rho , \phi ) = src(x,y)
4877     * \)
4878     *
4879     * where
4880     * \(
4881     * \begin{array}{l}
4882     * \vec{I} = (x - center.x, \;y - center.y) \\
4883     * \phi = Kangle \cdot \texttt{angle} (\vec{I}) \\
4884     * \rho = \left\{\begin{matrix}
4885     * Klin \cdot \texttt{magnitude} (\vec{I}) &amp; default \\
4886     * Klog \cdot log_e(\texttt{magnitude} (\vec{I})) &amp; if \; semilog \\
4887     * \end{matrix}\right.
4888     * \end{array}
4889     * \)
4890     *
4891     * and
4892     * \(
4893     * \begin{array}{l}
4894     * Kangle = dsize.height / 2\Pi \\
4895     * Klin = dsize.width / maxRadius \\
4896     * Klog = dsize.width / log_e(maxRadius) \\
4897     * \end{array}
4898     * \)
4899     *
4900     *
4901     * \par Linear vs semilog mapping
4902     *
4903     * Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to {@code flags} to specify the polar mapping mode.
4904     *
4905     * Linear is the default mode.
4906     *
4907     * The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of sight (central vision)
4908     * in contrast to peripheral vision where acuity is minor.
4909     *
4910     * \par Option on {@code dsize}:
4911     *
4912     * <ul>
4913     *   <li>
4914     *  if both values in {@code dsize &lt;=0 } (default),
4915     * the destination image will have (almost) same area of source bounding circle:
4916     * \(\begin{array}{l}
4917     * dsize.area  \leftarrow (maxRadius^2 \cdot \Pi) \\
4918     * dsize.width = \texttt{cvRound}(maxRadius) \\
4919     * dsize.height = \texttt{cvRound}(maxRadius \cdot \Pi) \\
4920     * \end{array}\)
4921     *   </li>
4922     * </ul>
4923     *
4924     *
4925     * <ul>
4926     *   <li>
4927     *  if only {@code dsize.height &lt;= 0},
4928     * the destination image area will be proportional to the bounding circle area but scaled by {@code Kx * Kx}:
4929     * \(\begin{array}{l}
4930     * dsize.height = \texttt{cvRound}(dsize.width \cdot \Pi) \\
4931     * \end{array}
4932     * \)
4933     *   </li>
4934     * </ul>
4935     *
4936     * <ul>
4937     *   <li>
4938     *  if both values in {@code dsize &gt; 0 },
4939     * the destination image will have the given size therefore the area of the bounding circle will be scaled to {@code dsize}.
4940     *   </li>
4941     * </ul>
4942     *
4943     *
4944     * \par Reverse mapping
4945     *
4946     * You can get reverse mapping adding #WARP_INVERSE_MAP to {@code flags}
4947     * \snippet polar_transforms.cpp InverseMap
4948     *
4949     * In addiction, to calculate the original coordinate from a polar mapped coordinate \((rho, phi)-&gt;(x, y)\):
4950     * \snippet polar_transforms.cpp InverseCoordinate
4951     *
4952     * @param src Source image.
4953     * @param dst Destination image. It will have same type as src.
4954     * @param dsize The destination image size (see description for valid options).
4955     * @param center The transformation center.
4956     * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
4957     * @param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode.
4958     * <ul>
4959     *   <li>
4960     *              Add #WARP_POLAR_LINEAR to select linear polar mapping (default)
4961     *   </li>
4962     *   <li>
4963     *              Add #WARP_POLAR_LOG to select semilog polar mapping
4964     *   </li>
4965     *   <li>
4966     *              Add #WARP_INVERSE_MAP for reverse mapping.
4967     *   </li>
4968     * </ul>
4969     * <b>Note:</b>
4970     * <ul>
4971     *   <li>
4972     *   The function can not operate in-place.
4973     *   </li>
4974     *   <li>
4975     *   To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
4976     *   </li>
4977     *   <li>
4978     *   This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4979     *   </li>
4980     * </ul>
4981     *
4982     * SEE: cv::remap
4983     */
4984    public static void warpPolar(Mat src, Mat dst, Size dsize, Point center, double maxRadius, int flags) {
4985        warpPolar_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, center.x, center.y, maxRadius, flags);
4986    }
4987
4988
4989    //
4990    // C++:  void cv::integral(Mat src, Mat& sum, int sdepth = -1)
4991    //
4992
4993    public static void integral(Mat src, Mat sum, int sdepth) {
4994        integral_0(src.nativeObj, sum.nativeObj, sdepth);
4995    }
4996
4997    public static void integral(Mat src, Mat sum) {
4998        integral_1(src.nativeObj, sum.nativeObj);
4999    }
5000
5001
5002    //
5003    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1)
5004    //
5005
5006    public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth, int sqdepth) {
5007        integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth, sqdepth);
5008    }
5009
5010    public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth) {
5011        integral2_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth);
5012    }
5013
5014    public static void integral2(Mat src, Mat sum, Mat sqsum) {
5015        integral2_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj);
5016    }
5017
5018
5019    //
5020    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1)
5021    //
5022
5023    /**
5024     * Calculates the integral of an image.
5025     *
5026     * The function calculates one or more integral images for the source image as follows:
5027     *
5028     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5029     *
5030     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5031     *
5032     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5033     *
5034     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5035     * up-right or rotated rectangular region of the image in a constant time, for example:
5036     *
5037     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5038     *
5039     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5040     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5041     *
5042     * As a practical example, the next figure shows the calculation of the integral of a straight
5043     * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5044     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5045     *
5046     * ![integral calculation example](pics/integral.png)
5047     *
5048     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5049     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5050     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5051     * floating-point (64f) array.
5052     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5053     * the same data type as sum.
5054     * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
5055     * CV_64F.
5056     * @param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.
5057     */
5058    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth, int sqdepth) {
5059        integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth, sqdepth);
5060    }
5061
5062    /**
5063     * Calculates the integral of an image.
5064     *
5065     * The function calculates one or more integral images for the source image as follows:
5066     *
5067     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5068     *
5069     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5070     *
5071     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5072     *
5073     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5074     * up-right or rotated rectangular region of the image in a constant time, for example:
5075     *
5076     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5077     *
5078     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5079     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5080     *
5081     * As a practical example, the next figure shows the calculation of the integral of a straight
5082     * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5083     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5084     *
5085     * ![integral calculation example](pics/integral.png)
5086     *
5087     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5088     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5089     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5090     * floating-point (64f) array.
5091     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5092     * the same data type as sum.
5093     * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
5094     * CV_64F.
5095     */
5096    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth) {
5097        integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth);
5098    }
5099
5100    /**
5101     * Calculates the integral of an image.
5102     *
5103     * The function calculates one or more integral images for the source image as follows:
5104     *
5105     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5106     *
5107     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5108     *
5109     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5110     *
5111     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5112     * up-right or rotated rectangular region of the image in a constant time, for example:
5113     *
5114     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5115     *
5116     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5117     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5118     *
5119     * As a practical example, the next figure shows the calculation of the integral of a straight
5120     * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5121     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5122     *
5123     * ![integral calculation example](pics/integral.png)
5124     *
5125     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5126     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5127     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5128     * floating-point (64f) array.
5129     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5130     * the same data type as sum.
5131     * CV_64F.
5132     */
5133    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted) {
5134        integral3_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj);
5135    }
5136
5137
5138    //
5139    // C++:  void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat())
5140    //
5141
5142    /**
5143     * Adds an image to the accumulator image.
5144     *
5145     * The function adds src or some of its elements to dst :
5146     *
5147     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5148     *
5149     * The function supports multi-channel images. Each channel is processed independently.
5150     *
5151     * The function cv::accumulate can be used, for example, to collect statistics of a scene background
5152     * viewed by a still camera and for the further foreground-background segmentation.
5153     *
5154     * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
5155     * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
5156     * @param mask Optional operation mask.
5157     *
5158     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5159     */
5160    public static void accumulate(Mat src, Mat dst, Mat mask) {
5161        accumulate_0(src.nativeObj, dst.nativeObj, mask.nativeObj);
5162    }
5163
5164    /**
5165     * Adds an image to the accumulator image.
5166     *
5167     * The function adds src or some of its elements to dst :
5168     *
5169     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5170     *
5171     * The function supports multi-channel images. Each channel is processed independently.
5172     *
5173     * The function cv::accumulate can be used, for example, to collect statistics of a scene background
5174     * viewed by a still camera and for the further foreground-background segmentation.
5175     *
5176     * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
5177     * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
5178     *
5179     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5180     */
5181    public static void accumulate(Mat src, Mat dst) {
5182        accumulate_1(src.nativeObj, dst.nativeObj);
5183    }
5184
5185
5186    //
5187    // C++:  void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
5188    //
5189
5190    /**
5191     * Adds the square of a source image to the accumulator image.
5192     *
5193     * The function adds the input image src or its selected region, raised to a power of 2, to the
5194     * accumulator dst :
5195     *
5196     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)^2  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5197     *
5198     * The function supports multi-channel images. Each channel is processed independently.
5199     *
5200     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5201     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5202     * floating-point.
5203     * @param mask Optional operation mask.
5204     *
5205     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5206     */
5207    public static void accumulateSquare(Mat src, Mat dst, Mat mask) {
5208        accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj);
5209    }
5210
5211    /**
5212     * Adds the square of a source image to the accumulator image.
5213     *
5214     * The function adds the input image src or its selected region, raised to a power of 2, to the
5215     * accumulator dst :
5216     *
5217     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)^2  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5218     *
5219     * The function supports multi-channel images. Each channel is processed independently.
5220     *
5221     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5222     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5223     * floating-point.
5224     *
5225     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5226     */
5227    public static void accumulateSquare(Mat src, Mat dst) {
5228        accumulateSquare_1(src.nativeObj, dst.nativeObj);
5229    }
5230
5231
5232    //
5233    // C++:  void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
5234    //
5235
5236    /**
5237     * Adds the per-element product of two input images to the accumulator image.
5238     *
5239     * The function adds the product of two images or their selected regions to the accumulator dst :
5240     *
5241     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src1} (x,y)  \cdot \texttt{src2} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5242     *
5243     * The function supports multi-channel images. Each channel is processed independently.
5244     *
5245     * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
5246     * @param src2 Second input image of the same type and the same size as src1 .
5247     * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
5248     * floating-point.
5249     * @param mask Optional operation mask.
5250     *
5251     * SEE:  accumulate, accumulateSquare, accumulateWeighted
5252     */
5253    public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask) {
5254        accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj);
5255    }
5256
5257    /**
5258     * Adds the per-element product of two input images to the accumulator image.
5259     *
5260     * The function adds the product of two images or their selected regions to the accumulator dst :
5261     *
5262     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src1} (x,y)  \cdot \texttt{src2} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5263     *
5264     * The function supports multi-channel images. Each channel is processed independently.
5265     *
5266     * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
5267     * @param src2 Second input image of the same type and the same size as src1 .
5268     * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
5269     * floating-point.
5270     *
5271     * SEE:  accumulate, accumulateSquare, accumulateWeighted
5272     */
5273    public static void accumulateProduct(Mat src1, Mat src2, Mat dst) {
5274        accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj);
5275    }
5276
5277
5278    //
5279    // C++:  void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
5280    //
5281
5282    /**
5283     * Updates a running average.
5284     *
5285     * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
5286     * becomes a running average of a frame sequence:
5287     *
5288     * \(\texttt{dst} (x,y)  \leftarrow (1- \texttt{alpha} )  \cdot \texttt{dst} (x,y) +  \texttt{alpha} \cdot \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5289     *
5290     * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
5291     * The function supports multi-channel images. Each channel is processed independently.
5292     *
5293     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5294     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5295     * floating-point.
5296     * @param alpha Weight of the input image.
5297     * @param mask Optional operation mask.
5298     *
5299     * SEE:  accumulate, accumulateSquare, accumulateProduct
5300     */
5301    public static void accumulateWeighted(Mat src, Mat dst, double alpha, Mat mask) {
5302        accumulateWeighted_0(src.nativeObj, dst.nativeObj, alpha, mask.nativeObj);
5303    }
5304
5305    /**
5306     * Updates a running average.
5307     *
5308     * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
5309     * becomes a running average of a frame sequence:
5310     *
5311     * \(\texttt{dst} (x,y)  \leftarrow (1- \texttt{alpha} )  \cdot \texttt{dst} (x,y) +  \texttt{alpha} \cdot \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5312     *
5313     * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
5314     * The function supports multi-channel images. Each channel is processed independently.
5315     *
5316     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5317     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5318     * floating-point.
5319     * @param alpha Weight of the input image.
5320     *
5321     * SEE:  accumulate, accumulateSquare, accumulateProduct
5322     */
5323    public static void accumulateWeighted(Mat src, Mat dst, double alpha) {
5324        accumulateWeighted_1(src.nativeObj, dst.nativeObj, alpha);
5325    }
5326
5327
5328    //
5329    // C++:  Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
5330    //
5331
5332    /**
5333     * The function is used to detect translational shifts that occur between two images.
5334     *
5335     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5336     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5337     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5338     *
5339     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5340     * with getOptimalDFTSize.
5341     *
5342     * The function performs the following equations:
5343     * <ul>
5344     *   <li>
5345     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5346     * image to remove possible edge effects. This window is cached until the array size changes to speed
5347     * up processing time.
5348     *   </li>
5349     *   <li>
5350     *  Next it computes the forward DFTs of each source array:
5351     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5352     * where \(\mathcal{F}\) is the forward DFT.
5353     *   </li>
5354     *   <li>
5355     *  It then computes the cross-power spectrum of each frequency domain array:
5356     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5357     *   </li>
5358     *   <li>
5359     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5360     * \(r = \mathcal{F}^{-1}\{R\}\)
5361     *   </li>
5362     *   <li>
5363     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5364     * achieve sub-pixel accuracy.
5365     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5366     *   </li>
5367     *   <li>
5368     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5369     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5370     * peak) and will be smaller when there are multiple peaks.
5371     *   </li>
5372     * </ul>
5373     *
5374     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5375     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5376     * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
5377     * @param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional).
5378     * @return detected phase shift (sub-pixel) between the two arrays.
5379     *
5380     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5381     */
5382    public static Point phaseCorrelate(Mat src1, Mat src2, Mat window, double[] response) {
5383        double[] response_out = new double[1];
5384        Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out));
5385        if(response!=null) response[0] = (double)response_out[0];
5386        return retVal;
5387    }
5388
5389    /**
5390     * The function is used to detect translational shifts that occur between two images.
5391     *
5392     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5393     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5394     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5395     *
5396     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5397     * with getOptimalDFTSize.
5398     *
5399     * The function performs the following equations:
5400     * <ul>
5401     *   <li>
5402     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5403     * image to remove possible edge effects. This window is cached until the array size changes to speed
5404     * up processing time.
5405     *   </li>
5406     *   <li>
5407     *  Next it computes the forward DFTs of each source array:
5408     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5409     * where \(\mathcal{F}\) is the forward DFT.
5410     *   </li>
5411     *   <li>
5412     *  It then computes the cross-power spectrum of each frequency domain array:
5413     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5414     *   </li>
5415     *   <li>
5416     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5417     * \(r = \mathcal{F}^{-1}\{R\}\)
5418     *   </li>
5419     *   <li>
5420     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5421     * achieve sub-pixel accuracy.
5422     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5423     *   </li>
5424     *   <li>
5425     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5426     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5427     * peak) and will be smaller when there are multiple peaks.
5428     *   </li>
5429     * </ul>
5430     *
5431     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5432     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5433     * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
5434     * @return detected phase shift (sub-pixel) between the two arrays.
5435     *
5436     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5437     */
5438    public static Point phaseCorrelate(Mat src1, Mat src2, Mat window) {
5439        return new Point(phaseCorrelate_1(src1.nativeObj, src2.nativeObj, window.nativeObj));
5440    }
5441
5442    /**
5443     * The function is used to detect translational shifts that occur between two images.
5444     *
5445     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5446     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5447     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5448     *
5449     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5450     * with getOptimalDFTSize.
5451     *
5452     * The function performs the following equations:
5453     * <ul>
5454     *   <li>
5455     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5456     * image to remove possible edge effects. This window is cached until the array size changes to speed
5457     * up processing time.
5458     *   </li>
5459     *   <li>
5460     *  Next it computes the forward DFTs of each source array:
5461     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5462     * where \(\mathcal{F}\) is the forward DFT.
5463     *   </li>
5464     *   <li>
5465     *  It then computes the cross-power spectrum of each frequency domain array:
5466     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5467     *   </li>
5468     *   <li>
5469     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5470     * \(r = \mathcal{F}^{-1}\{R\}\)
5471     *   </li>
5472     *   <li>
5473     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5474     * achieve sub-pixel accuracy.
5475     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5476     *   </li>
5477     *   <li>
5478     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5479     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5480     * peak) and will be smaller when there are multiple peaks.
5481     *   </li>
5482     * </ul>
5483     *
5484     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5485     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5486     * @return detected phase shift (sub-pixel) between the two arrays.
5487     *
5488     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5489     */
5490    public static Point phaseCorrelate(Mat src1, Mat src2) {
5491        return new Point(phaseCorrelate_2(src1.nativeObj, src2.nativeObj));
5492    }
5493
5494
5495    //
5496    // C++:  void cv::createHanningWindow(Mat& dst, Size winSize, int type)
5497    //
5498
5499    /**
5500     * This function computes a Hanning window coefficients in two dimensions.
5501     *
5502     * See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function)
5503     * for more information.
5504     *
5505     * An example is shown below:
5506     * <code>
5507     *     // create hanning window of size 100x100 and type CV_32F
5508     *     Mat hann;
5509     *     createHanningWindow(hann, Size(100, 100), CV_32F);
5510     * </code>
5511     * @param dst Destination array to place Hann coefficients in
5512     * @param winSize The window size specifications (both width and height must be &gt; 1)
5513     * @param type Created array type
5514     */
5515    public static void createHanningWindow(Mat dst, Size winSize, int type) {
5516        createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type);
5517    }
5518
5519
5520    //
5521    // C++:  double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type)
5522    //
5523
5524    /**
5525     * Applies a fixed-level threshold to each array element.
5526     *
5527     * The function applies fixed-level thresholding to a multiple-channel array. The function is typically
5528     * used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for
5529     * this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
5530     * values. There are several types of thresholding supported by the function. They are determined by
5531     * type parameter.
5532     *
5533     * Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the
5534     * above values. In these cases, the function determines the optimal threshold value using the Otsu's
5535     * or Triangle algorithm and uses it instead of the specified thresh.
5536     *
5537     * <b>Note:</b> Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images.
5538     *
5539     * @param src input array (multiple-channel, 8-bit or 32-bit floating point).
5540     * @param dst output array of the same size  and type and the same number of channels as src.
5541     * @param thresh threshold value.
5542     * @param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding
5543     * types.
5544     * @param type thresholding type (see #ThresholdTypes).
5545     * @return the computed threshold value if Otsu's or Triangle methods used.
5546     *
5547     * SEE:  adaptiveThreshold, findContours, compare, min, max
5548     */
5549    public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type) {
5550        return threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type);
5551    }
5552
5553
5554    //
5555    // C++:  void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
5556    //
5557
5558    /**
5559     * Applies an adaptive threshold to an array.
5560     *
5561     * The function transforms a grayscale image to a binary image according to the formulae:
5562     * <ul>
5563     *   <li>
5564     *    <b>THRESH_BINARY</b>
5565     *     \(dst(x,y) =  \fork{\texttt{maxValue}}{if \(src(x,y) &gt; T(x,y)\)}{0}{otherwise}\)
5566     *   </li>
5567     *   <li>
5568     *    <b>THRESH_BINARY_INV</b>
5569     *     \(dst(x,y) =  \fork{0}{if \(src(x,y) &gt; T(x,y)\)}{\texttt{maxValue}}{otherwise}\)
5570     * where \(T(x,y)\) is a threshold calculated individually for each pixel (see adaptiveMethod parameter).
5571     *   </li>
5572     * </ul>
5573     *
5574     * The function can process the image in-place.
5575     *
5576     * @param src Source 8-bit single-channel image.
5577     * @param dst Destination image of the same size and the same type as src.
5578     * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied
5579     * @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.
5580     * The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.
5581     * @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,
5582     * see #ThresholdTypes.
5583     * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the
5584     * pixel: 3, 5, 7, and so on.
5585     * @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it
5586     * is positive but may be zero or negative as well.
5587     *
5588     * SEE:  threshold, blur, GaussianBlur
5589     */
5590    public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) {
5591        adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C);
5592    }
5593
5594
5595    //
5596    // C++:  void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
5597    //
5598
5599    /**
5600     * Blurs an image and downsamples it.
5601     *
5602     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5603     * any case, the following conditions should be satisfied:
5604     *
5605     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5606     *
5607     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5608     * convolves the source image with the kernel:
5609     *
5610     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5611     *
5612     * Then, it downsamples the image by rejecting even rows and columns.
5613     *
5614     * @param src input image.
5615     * @param dst output image; it has the specified size and the same type as src.
5616     * @param dstsize size of the output image.
5617     * @param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported)
5618     */
5619    public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) {
5620        pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType);
5621    }
5622
5623    /**
5624     * Blurs an image and downsamples it.
5625     *
5626     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5627     * any case, the following conditions should be satisfied:
5628     *
5629     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5630     *
5631     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5632     * convolves the source image with the kernel:
5633     *
5634     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5635     *
5636     * Then, it downsamples the image by rejecting even rows and columns.
5637     *
5638     * @param src input image.
5639     * @param dst output image; it has the specified size and the same type as src.
5640     * @param dstsize size of the output image.
5641     */
5642    public static void pyrDown(Mat src, Mat dst, Size dstsize) {
5643        pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height);
5644    }
5645
5646    /**
5647     * Blurs an image and downsamples it.
5648     *
5649     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5650     * any case, the following conditions should be satisfied:
5651     *
5652     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5653     *
5654     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5655     * convolves the source image with the kernel:
5656     *
5657     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5658     *
5659     * Then, it downsamples the image by rejecting even rows and columns.
5660     *
5661     * @param src input image.
5662     * @param dst output image; it has the specified size and the same type as src.
5663     */
5664    public static void pyrDown(Mat src, Mat dst) {
5665        pyrDown_2(src.nativeObj, dst.nativeObj);
5666    }
5667
5668
5669    //
5670    // C++:  void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
5671    //
5672
5673    /**
5674     * Upsamples an image and then blurs it.
5675     *
5676     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5677     * case, the following conditions should be satisfied:
5678     *
5679     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5680     *
5681     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5682     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5683     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5684     * pyrDown multiplied by 4.
5685     *
5686     * @param src input image.
5687     * @param dst output image. It has the specified size and the same type as src .
5688     * @param dstsize size of the output image.
5689     * @param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported)
5690     */
5691    public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) {
5692        pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType);
5693    }
5694
5695    /**
5696     * Upsamples an image and then blurs it.
5697     *
5698     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5699     * case, the following conditions should be satisfied:
5700     *
5701     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5702     *
5703     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5704     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5705     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5706     * pyrDown multiplied by 4.
5707     *
5708     * @param src input image.
5709     * @param dst output image. It has the specified size and the same type as src .
5710     * @param dstsize size of the output image.
5711     */
5712    public static void pyrUp(Mat src, Mat dst, Size dstsize) {
5713        pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height);
5714    }
5715
5716    /**
5717     * Upsamples an image and then blurs it.
5718     *
5719     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5720     * case, the following conditions should be satisfied:
5721     *
5722     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5723     *
5724     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5725     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5726     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5727     * pyrDown multiplied by 4.
5728     *
5729     * @param src input image.
5730     * @param dst output image. It has the specified size and the same type as src .
5731     */
5732    public static void pyrUp(Mat src, Mat dst) {
5733        pyrUp_2(src.nativeObj, dst.nativeObj);
5734    }
5735
5736
5737    //
5738    // C++:  void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
5739    //
5740
5741    public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges, boolean accumulate) {
5742        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5743        Mat channels_mat = channels;
5744        Mat histSize_mat = histSize;
5745        Mat ranges_mat = ranges;
5746        calcHist_0(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj, accumulate);
5747    }
5748
5749    public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges) {
5750        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5751        Mat channels_mat = channels;
5752        Mat histSize_mat = histSize;
5753        Mat ranges_mat = ranges;
5754        calcHist_1(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj);
5755    }
5756
5757
5758    //
5759    // C++:  void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
5760    //
5761
5762    public static void calcBackProject(List<Mat> images, MatOfInt channels, Mat hist, Mat dst, MatOfFloat ranges, double scale) {
5763        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5764        Mat channels_mat = channels;
5765        Mat ranges_mat = ranges;
5766        calcBackProject_0(images_mat.nativeObj, channels_mat.nativeObj, hist.nativeObj, dst.nativeObj, ranges_mat.nativeObj, scale);
5767    }
5768
5769
5770    //
5771    // C++:  double cv::compareHist(Mat H1, Mat H2, int method)
5772    //
5773
5774    /**
5775     * Compares two histograms.
5776     *
5777     * The function cv::compareHist compares two dense or two sparse histograms using the specified method.
5778     *
5779     * The function returns \(d(H_1, H_2)\) .
5780     *
5781     * While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable
5782     * for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling
5783     * problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms
5784     * or more general sparse configurations of weighted points, consider using the #EMD function.
5785     *
5786     * @param H1 First compared histogram.
5787     * @param H2 Second compared histogram of the same size as H1 .
5788     * @param method Comparison method, see #HistCompMethods
5789     * @return automatically generated
5790     */
5791    public static double compareHist(Mat H1, Mat H2, int method) {
5792        return compareHist_0(H1.nativeObj, H2.nativeObj, method);
5793    }
5794
5795
5796    //
5797    // C++:  void cv::equalizeHist(Mat src, Mat& dst)
5798    //
5799
5800    /**
5801     * Equalizes the histogram of a grayscale image.
5802     *
5803     * The function equalizes the histogram of the input image using the following algorithm:
5804     *
5805     * <ul>
5806     *   <li>
5807     *  Calculate the histogram \(H\) for src .
5808     *   </li>
5809     *   <li>
5810     *  Normalize the histogram so that the sum of histogram bins is 255.
5811     *   </li>
5812     *   <li>
5813     *  Compute the integral of the histogram:
5814     * \(H'_i =  \sum _{0  \le j &lt; i} H(j)\)
5815     *   </li>
5816     *   <li>
5817     *  Transform the image using \(H'\) as a look-up table: \(\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\)
5818     *   </li>
5819     * </ul>
5820     *
5821     * The algorithm normalizes the brightness and increases the contrast of the image.
5822     *
5823     * @param src Source 8-bit single channel image.
5824     * @param dst Destination image of the same size and type as src .
5825     */
5826    public static void equalizeHist(Mat src, Mat dst) {
5827        equalizeHist_0(src.nativeObj, dst.nativeObj);
5828    }
5829
5830
5831    //
5832    // C++:  Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
5833    //
5834
5835    /**
5836     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5837     *
5838     * @param clipLimit Threshold for contrast limiting.
5839     * @param tileGridSize Size of grid for histogram equalization. Input image will be divided into
5840     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5841     * @return automatically generated
5842     */
5843    public static CLAHE createCLAHE(double clipLimit, Size tileGridSize) {
5844        return CLAHE.__fromPtr__(createCLAHE_0(clipLimit, tileGridSize.width, tileGridSize.height));
5845    }
5846
5847    /**
5848     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5849     *
5850     * @param clipLimit Threshold for contrast limiting.
5851     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5852     * @return automatically generated
5853     */
5854    public static CLAHE createCLAHE(double clipLimit) {
5855        return CLAHE.__fromPtr__(createCLAHE_1(clipLimit));
5856    }
5857
5858    /**
5859     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5860     *
5861     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5862     * @return automatically generated
5863     */
5864    public static CLAHE createCLAHE() {
5865        return CLAHE.__fromPtr__(createCLAHE_2());
5866    }
5867
5868
5869    //
5870    // C++:  float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat())
5871    //
5872
5873    /**
5874     * Computes the "minimal work" distance between two weighted point configurations.
5875     *
5876     * The function computes the earth mover distance and/or a lower boundary of the distance between the
5877     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
5878     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
5879     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
5880     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
5881     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
5882     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
5883     * same object.
5884     *
5885     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
5886     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
5887     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
5888     * non-negative and have at least one non-zero value.
5889     * @param signature2 Second signature of the same format as signature1 , though the number of rows
5890     * may be different. The total weights may be different. In this case an extra "dummy" point is added
5891     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
5892     * value.
5893     * @param distType Used metric. See #DistanceTypes.
5894     * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix
5895     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
5896     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
5897     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
5898     * if the signatures consist of weights only (the signature matrices have a single column). You
5899     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
5900     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
5901     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
5902     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
5903     * should be set to 0.
5904     * @param flow Resultant \(\texttt{size1} \times \texttt{size2}\) flow matrix: \(\texttt{flow}_{i,j}\) is
5905     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
5906     * @return automatically generated
5907     */
5908    public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost, Mat flow) {
5909        return EMD_0(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj, flow.nativeObj);
5910    }
5911
5912    /**
5913     * Computes the "minimal work" distance between two weighted point configurations.
5914     *
5915     * The function computes the earth mover distance and/or a lower boundary of the distance between the
5916     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
5917     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
5918     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
5919     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
5920     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
5921     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
5922     * same object.
5923     *
5924     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
5925     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
5926     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
5927     * non-negative and have at least one non-zero value.
5928     * @param signature2 Second signature of the same format as signature1 , though the number of rows
5929     * may be different. The total weights may be different. In this case an extra "dummy" point is added
5930     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
5931     * value.
5932     * @param distType Used metric. See #DistanceTypes.
5933     * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix
5934     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
5935     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
5936     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
5937     * if the signatures consist of weights only (the signature matrices have a single column). You
5938     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
5939     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
5940     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
5941     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
5942     * should be set to 0.
5943     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
5944     * @return automatically generated
5945     */
5946    public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost) {
5947        return EMD_1(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj);
5948    }
5949
5950    /**
5951     * Computes the "minimal work" distance between two weighted point configurations.
5952     *
5953     * The function computes the earth mover distance and/or a lower boundary of the distance between the
5954     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
5955     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
5956     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
5957     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
5958     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
5959     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
5960     * same object.
5961     *
5962     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
5963     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
5964     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
5965     * non-negative and have at least one non-zero value.
5966     * @param signature2 Second signature of the same format as signature1 , though the number of rows
5967     * may be different. The total weights may be different. In this case an extra "dummy" point is added
5968     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
5969     * value.
5970     * @param distType Used metric. See #DistanceTypes.
5971     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
5972     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
5973     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
5974     * if the signatures consist of weights only (the signature matrices have a single column). You
5975     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
5976     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
5977     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
5978     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
5979     * should be set to 0.
5980     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
5981     * @return automatically generated
5982     */
5983    public static float EMD(Mat signature1, Mat signature2, int distType) {
5984        return EMD_3(signature1.nativeObj, signature2.nativeObj, distType);
5985    }
5986
5987
5988    //
5989    // C++:  void cv::watershed(Mat image, Mat& markers)
5990    //
5991
5992    /**
5993     * Performs a marker-based image segmentation using the watershed algorithm.
5994     *
5995     * The function implements one of the variants of watershed, non-parametric marker-based segmentation
5996     * algorithm, described in CITE: Meyer92 .
5997     *
5998     * Before passing the image to the function, you have to roughly outline the desired regions in the
5999     * image markers with positive (&gt;0) indices. So, every region is represented as one or more connected
6000     * components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary
6001     * mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are "seeds" of
6002     * the future image regions. All the other pixels in markers , whose relation to the outlined regions
6003     * is not known and should be defined by the algorithm, should be set to 0's. In the function output,
6004     * each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the
6005     * regions.
6006     *
6007     * <b>Note:</b> Any two neighbor connected components are not necessarily separated by a watershed boundary
6008     * (-1's pixels); for example, they can touch each other in the initial marker image passed to the
6009     * function.
6010     *
6011     * @param image Input 8-bit 3-channel image.
6012     * @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same
6013     * size as image .
6014     *
6015     * SEE: findContours
6016     */
6017    public static void watershed(Mat image, Mat markers) {
6018        watershed_0(image.nativeObj, markers.nativeObj);
6019    }
6020
6021
6022    //
6023    // C++:  void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
6024    //
6025
6026    /**
6027     * Performs initial step of meanshift segmentation of an image.
6028     *
6029     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6030     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6031     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6032     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6033     * considered:
6034     *
6035     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6036     *
6037     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6038     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6039     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6040     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6041     *
6042     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6043     *
6044     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6045     * the iterations started) are set to the final value (average color at the last iteration):
6046     *
6047     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6048     *
6049     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6050     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6051     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6052     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6053     * results will be actually different from the ones obtained by running the meanshift procedure on the
6054     * whole original image (i.e. when maxLevel==0).
6055     *
6056     * @param src The source 8-bit, 3-channel image.
6057     * @param dst The destination image of the same format and the same size as the source.
6058     * @param sp The spatial window radius.
6059     * @param sr The color window radius.
6060     * @param maxLevel Maximum level of the pyramid for the segmentation.
6061     * @param termcrit Termination criteria: when to stop meanshift iterations.
6062     */
6063    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel, TermCriteria termcrit) {
6064        pyrMeanShiftFiltering_0(src.nativeObj, dst.nativeObj, sp, sr, maxLevel, termcrit.type, termcrit.maxCount, termcrit.epsilon);
6065    }
6066
6067    /**
6068     * Performs initial step of meanshift segmentation of an image.
6069     *
6070     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6071     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6072     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6073     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6074     * considered:
6075     *
6076     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6077     *
6078     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6079     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6080     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6081     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6082     *
6083     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6084     *
6085     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6086     * the iterations started) are set to the final value (average color at the last iteration):
6087     *
6088     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6089     *
6090     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6091     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6092     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6093     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6094     * results will be actually different from the ones obtained by running the meanshift procedure on the
6095     * whole original image (i.e. when maxLevel==0).
6096     *
6097     * @param src The source 8-bit, 3-channel image.
6098     * @param dst The destination image of the same format and the same size as the source.
6099     * @param sp The spatial window radius.
6100     * @param sr The color window radius.
6101     * @param maxLevel Maximum level of the pyramid for the segmentation.
6102     */
6103    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel) {
6104        pyrMeanShiftFiltering_1(src.nativeObj, dst.nativeObj, sp, sr, maxLevel);
6105    }
6106
6107    /**
6108     * Performs initial step of meanshift segmentation of an image.
6109     *
6110     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6111     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6112     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6113     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6114     * considered:
6115     *
6116     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6117     *
6118     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6119     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6120     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6121     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6122     *
6123     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6124     *
6125     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6126     * the iterations started) are set to the final value (average color at the last iteration):
6127     *
6128     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6129     *
6130     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6131     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6132     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6133     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6134     * results will be actually different from the ones obtained by running the meanshift procedure on the
6135     * whole original image (i.e. when maxLevel==0).
6136     *
6137     * @param src The source 8-bit, 3-channel image.
6138     * @param dst The destination image of the same format and the same size as the source.
6139     * @param sp The spatial window radius.
6140     * @param sr The color window radius.
6141     */
6142    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr) {
6143        pyrMeanShiftFiltering_2(src.nativeObj, dst.nativeObj, sp, sr);
6144    }
6145
6146
6147    //
6148    // C++:  void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
6149    //
6150
6151    /**
6152     * Runs the GrabCut algorithm.
6153     *
6154     * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
6155     *
6156     * @param img Input 8-bit 3-channel image.
6157     * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
6158     * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
6159     * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
6160     * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
6161     * @param bgdModel Temporary array for the background model. Do not modify it while you are
6162     * processing the same image.
6163     * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
6164     * processing the same image.
6165     * @param iterCount Number of iterations the algorithm should make before returning the result. Note
6166     * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
6167     * mode==GC_EVAL .
6168     * @param mode Operation mode that could be one of the #GrabCutModes
6169     */
6170    public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode) {
6171        grabCut_0(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount, mode);
6172    }
6173
6174    /**
6175     * Runs the GrabCut algorithm.
6176     *
6177     * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
6178     *
6179     * @param img Input 8-bit 3-channel image.
6180     * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
6181     * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
6182     * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
6183     * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
6184     * @param bgdModel Temporary array for the background model. Do not modify it while you are
6185     * processing the same image.
6186     * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
6187     * processing the same image.
6188     * @param iterCount Number of iterations the algorithm should make before returning the result. Note
6189     * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
6190     * mode==GC_EVAL .
6191     */
6192    public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount) {
6193        grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount);
6194    }
6195
6196
6197    //
6198    // C++:  void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
6199    //
6200
6201    /**
6202     * Calculates the distance to the closest zero pixel for each pixel of the source image.
6203     *
6204     * The function cv::distanceTransform calculates the approximate or precise distance from every binary
6205     * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
6206     *
6207     * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
6208     * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
6209     *
6210     * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
6211     * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
6212     * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall
6213     * distance is calculated as a sum of these basic distances. Since the distance function should be
6214     * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
6215     * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the
6216     * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated
6217     * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
6218     * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV
6219     * uses the values suggested in the original paper:
6220     * <ul>
6221     *   <li>
6222     *  DIST_L1: {@code a = 1, b = 2}
6223     *   </li>
6224     *   <li>
6225     *  DIST_L2:
6226     *   <ul>
6227     *     <li>
6228     *      {@code 3 x 3}: {@code a=0.955, b=1.3693}
6229     *     </li>
6230     *     <li>
6231     *      {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969}
6232     *     </li>
6233     *   </ul>
6234     *   <li>
6235     *  DIST_C: {@code a = 1, b = 1}
6236     *   </li>
6237     * </ul>
6238     *
6239     * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a
6240     * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used.
6241     * Note that both the precise and the approximate algorithms are linear on the number of pixels.
6242     *
6243     * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\)
6244     * but also identifies the nearest connected component consisting of zero pixels
6245     * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
6246     * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function
6247     * automatically finds connected components of zero pixels in the input image and marks them with
6248     * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
6249     * marks all the zero pixels with distinct labels.
6250     *
6251     * In this mode, the complexity is still linear. That is, the function provides a very fast way to
6252     * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
6253     * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
6254     * yet.
6255     *
6256     * @param src 8-bit, single-channel (binary) source image.
6257     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6258     * single-channel image of the same size as src.
6259     * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
6260     * CV_32SC1 and the same size as src.
6261     * @param distanceType Type of distance, see #DistanceTypes
6262     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
6263     * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
6264     * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times
6265     * 5\) or any larger aperture.
6266     * @param labelType Type of the label array to build, see #DistanceTransformLabelTypes.
6267     */
6268    public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) {
6269        distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType);
6270    }
6271
6272    /**
6273     * Calculates the distance to the closest zero pixel for each pixel of the source image.
6274     *
6275     * The function cv::distanceTransform calculates the approximate or precise distance from every binary
6276     * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
6277     *
6278     * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
6279     * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
6280     *
6281     * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
6282     * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
6283     * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall
6284     * distance is calculated as a sum of these basic distances. Since the distance function should be
6285     * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
6286     * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the
6287     * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated
6288     * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
6289     * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV
6290     * uses the values suggested in the original paper:
6291     * <ul>
6292     *   <li>
6293     *  DIST_L1: {@code a = 1, b = 2}
6294     *   </li>
6295     *   <li>
6296     *  DIST_L2:
6297     *   <ul>
6298     *     <li>
6299     *      {@code 3 x 3}: {@code a=0.955, b=1.3693}
6300     *     </li>
6301     *     <li>
6302     *      {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969}
6303     *     </li>
6304     *   </ul>
6305     *   <li>
6306     *  DIST_C: {@code a = 1, b = 1}
6307     *   </li>
6308     * </ul>
6309     *
6310     * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a
6311     * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used.
6312     * Note that both the precise and the approximate algorithms are linear on the number of pixels.
6313     *
6314     * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\)
6315     * but also identifies the nearest connected component consisting of zero pixels
6316     * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
6317     * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function
6318     * automatically finds connected components of zero pixels in the input image and marks them with
6319     * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
6320     * marks all the zero pixels with distinct labels.
6321     *
6322     * In this mode, the complexity is still linear. That is, the function provides a very fast way to
6323     * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
6324     * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
6325     * yet.
6326     *
6327     * @param src 8-bit, single-channel (binary) source image.
6328     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6329     * single-channel image of the same size as src.
6330     * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
6331     * CV_32SC1 and the same size as src.
6332     * @param distanceType Type of distance, see #DistanceTypes
6333     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
6334     * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
6335     * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times
6336     * 5\) or any larger aperture.
6337     */
6338    public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize) {
6339        distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize);
6340    }
6341
6342
6343    //
6344    // C++:  void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F)
6345    //
6346
6347    /**
6348     *
6349     * @param src 8-bit, single-channel (binary) source image.
6350     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6351     * single-channel image of the same size as src .
6352     * @param distanceType Type of distance, see #DistanceTypes
6353     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
6354     * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives
6355     * the same result as \(5\times 5\) or any larger aperture.
6356     * @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for
6357     * the first variant of the function and distanceType == #DIST_L1.
6358     */
6359    public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize, int dstType) {
6360        distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize, dstType);
6361    }
6362
6363    /**
6364     *
6365     * @param src 8-bit, single-channel (binary) source image.
6366     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6367     * single-channel image of the same size as src .
6368     * @param distanceType Type of distance, see #DistanceTypes
6369     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
6370     * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives
6371     * the same result as \(5\times 5\) or any larger aperture.
6372     * the first variant of the function and distanceType == #DIST_L1.
6373     */
6374    public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize) {
6375        distanceTransform_1(src.nativeObj, dst.nativeObj, distanceType, maskSize);
6376    }
6377
6378
6379    //
6380    // C++:  int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
6381    //
6382
6383    /**
6384     * Fills a connected component with the given color.
6385     *
6386     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6387     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6388     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6389     *
6390     * <ul>
6391     *   <li>
6392     *  in case of a grayscale image and floating range
6393     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6394     *   </li>
6395     * </ul>
6396     *
6397     *
6398     * <ul>
6399     *   <li>
6400     *  in case of a grayscale image and fixed range
6401     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6402     *   </li>
6403     * </ul>
6404     *
6405     *
6406     * <ul>
6407     *   <li>
6408     *  in case of a color image and floating range
6409     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6410     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6411     * and
6412     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6413     *   </li>
6414     * </ul>
6415     *
6416     *
6417     * <ul>
6418     *   <li>
6419     *  in case of a color image and fixed range
6420     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6421     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6422     * and
6423     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6424     *   </li>
6425     * </ul>
6426     *
6427     *
6428     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6429     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6430     * be close enough to:
6431     * <ul>
6432     *   <li>
6433     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6434     * of a floating range.
6435     *   </li>
6436     *   <li>
6437     *  Color/brightness of the seed point in case of a fixed range.
6438     *   </li>
6439     * </ul>
6440     *
6441     * Use these functions to either mark a connected component with the specified color in-place, or build
6442     * a mask and then extract the contour, or copy the region to another image, and so on.
6443     *
6444     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6445     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6446     * the details below.
6447     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6448     * taller than image. Since this is both an input and output parameter, you must take responsibility
6449     * of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
6450     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6451     * mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
6452     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6453     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6454     * to make sure the filled areas do not overlap.
6455     * @param seedPoint Starting point.
6456     * @param newVal New value of the repainted domain pixels.
6457     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6458     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6459     * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
6460     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6461     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6462     * repainted domain.
6463     * @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of
6464     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6465     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6466     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6467     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6468     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6469     * bits and therefore may be further combined with the connectivity and mask fill values using
6470     * bit-wise or (|), see #FloodFillFlags.
6471     *
6472     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6473     * pixel \((x+1, y+1)\) in the mask .
6474     *
6475     * SEE: findContours
6476     * @return automatically generated
6477     */
6478    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) {
6479        double[] rect_out = new double[4];
6480        int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags);
6481        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6482        return retVal;
6483    }
6484
6485    /**
6486     * Fills a connected component with the given color.
6487     *
6488     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6489     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6490     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6491     *
6492     * <ul>
6493     *   <li>
6494     *  in case of a grayscale image and floating range
6495     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6496     *   </li>
6497     * </ul>
6498     *
6499     *
6500     * <ul>
6501     *   <li>
6502     *  in case of a grayscale image and fixed range
6503     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6504     *   </li>
6505     * </ul>
6506     *
6507     *
6508     * <ul>
6509     *   <li>
6510     *  in case of a color image and floating range
6511     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6512     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6513     * and
6514     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6515     *   </li>
6516     * </ul>
6517     *
6518     *
6519     * <ul>
6520     *   <li>
6521     *  in case of a color image and fixed range
6522     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6523     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6524     * and
6525     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6526     *   </li>
6527     * </ul>
6528     *
6529     *
6530     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6531     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6532     * be close enough to:
6533     * <ul>
6534     *   <li>
6535     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6536     * of a floating range.
6537     *   </li>
6538     *   <li>
6539     *  Color/brightness of the seed point in case of a fixed range.
6540     *   </li>
6541     * </ul>
6542     *
6543     * Use these functions to either mark a connected component with the specified color in-place, or build
6544     * a mask and then extract the contour, or copy the region to another image, and so on.
6545     *
6546     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6547     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6548     * the details below.
6549     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6550     * taller than image. Since this is both an input and output parameter, you must take responsibility
6551     * of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
6552     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6553     * mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
6554     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6555     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6556     * to make sure the filled areas do not overlap.
6557     * @param seedPoint Starting point.
6558     * @param newVal New value of the repainted domain pixels.
6559     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6560     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6561     * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
6562     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6563     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6564     * repainted domain.
6565     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6566     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6567     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6568     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6569     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6570     * bits and therefore may be further combined with the connectivity and mask fill values using
6571     * bit-wise or (|), see #FloodFillFlags.
6572     *
6573     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6574     * pixel \((x+1, y+1)\) in the mask .
6575     *
6576     * SEE: findContours
6577     * @return automatically generated
6578     */
6579    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff) {
6580        double[] rect_out = new double[4];
6581        int retVal = floodFill_1(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3]);
6582        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6583        return retVal;
6584    }
6585
6586    /**
6587     * Fills a connected component with the given color.
6588     *
6589     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6590     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6591     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6592     *
6593     * <ul>
6594     *   <li>
6595     *  in case of a grayscale image and floating range
6596     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6597     *   </li>
6598     * </ul>
6599     *
6600     *
6601     * <ul>
6602     *   <li>
6603     *  in case of a grayscale image and fixed range
6604     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6605     *   </li>
6606     * </ul>
6607     *
6608     *
6609     * <ul>
6610     *   <li>
6611     *  in case of a color image and floating range
6612     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6613     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6614     * and
6615     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6616     *   </li>
6617     * </ul>
6618     *
6619     *
6620     * <ul>
6621     *   <li>
6622     *  in case of a color image and fixed range
6623     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6624     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6625     * and
6626     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6627     *   </li>
6628     * </ul>
6629     *
6630     *
6631     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6632     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6633     * be close enough to:
6634     * <ul>
6635     *   <li>
6636     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6637     * of a floating range.
6638     *   </li>
6639     *   <li>
6640     *  Color/brightness of the seed point in case of a fixed range.
6641     *   </li>
6642     * </ul>
6643     *
6644     * Use these functions to either mark a connected component with the specified color in-place, or build
6645     * a mask and then extract the contour, or copy the region to another image, and so on.
6646     *
6647     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6648     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6649     * the details below.
6650     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6651     * taller than image. Since this is both an input and output parameter, you must take responsibility
6652     * of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
6653     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6654     * mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
6655     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6656     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6657     * to make sure the filled areas do not overlap.
6658     * @param seedPoint Starting point.
6659     * @param newVal New value of the repainted domain pixels.
6660     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6661     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6662     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6663     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6664     * repainted domain.
6665     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6666     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6667     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6668     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6669     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6670     * bits and therefore may be further combined with the connectivity and mask fill values using
6671     * bit-wise or (|), see #FloodFillFlags.
6672     *
6673     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6674     * pixel \((x+1, y+1)\) in the mask .
6675     *
6676     * SEE: findContours
6677     * @return automatically generated
6678     */
6679    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff) {
6680        double[] rect_out = new double[4];
6681        int retVal = floodFill_2(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3]);
6682        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6683        return retVal;
6684    }
6685
6686    /**
6687     * Fills a connected component with the given color.
6688     *
6689     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6690     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6691     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6692     *
6693     * <ul>
6694     *   <li>
6695     *  in case of a grayscale image and floating range
6696     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6697     *   </li>
6698     * </ul>
6699     *
6700     *
6701     * <ul>
6702     *   <li>
6703     *  in case of a grayscale image and fixed range
6704     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6705     *   </li>
6706     * </ul>
6707     *
6708     *
6709     * <ul>
6710     *   <li>
6711     *  in case of a color image and floating range
6712     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6713     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6714     * and
6715     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6716     *   </li>
6717     * </ul>
6718     *
6719     *
6720     * <ul>
6721     *   <li>
6722     *  in case of a color image and fixed range
6723     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6724     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6725     * and
6726     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6727     *   </li>
6728     * </ul>
6729     *
6730     *
6731     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6732     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6733     * be close enough to:
6734     * <ul>
6735     *   <li>
6736     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6737     * of a floating range.
6738     *   </li>
6739     *   <li>
6740     *  Color/brightness of the seed point in case of a fixed range.
6741     *   </li>
6742     * </ul>
6743     *
6744     * Use these functions to either mark a connected component with the specified color in-place, or build
6745     * a mask and then extract the contour, or copy the region to another image, and so on.
6746     *
6747     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6748     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6749     * the details below.
6750     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6751     * taller than image. Since this is both an input and output parameter, you must take responsibility
6752     * of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
6753     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6754     * mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
6755     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6756     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6757     * to make sure the filled areas do not overlap.
6758     * @param seedPoint Starting point.
6759     * @param newVal New value of the repainted domain pixels.
6760     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6761     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6762     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6763     * repainted domain.
6764     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6765     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6766     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6767     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6768     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6769     * bits and therefore may be further combined with the connectivity and mask fill values using
6770     * bit-wise or (|), see #FloodFillFlags.
6771     *
6772     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6773     * pixel \((x+1, y+1)\) in the mask .
6774     *
6775     * SEE: findContours
6776     * @return automatically generated
6777     */
6778    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect) {
6779        double[] rect_out = new double[4];
6780        int retVal = floodFill_3(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out);
6781        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6782        return retVal;
6783    }
6784
6785    /**
6786     * Fills a connected component with the given color.
6787     *
6788     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6789     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6790     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6791     *
6792     * <ul>
6793     *   <li>
6794     *  in case of a grayscale image and floating range
6795     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6796     *   </li>
6797     * </ul>
6798     *
6799     *
6800     * <ul>
6801     *   <li>
6802     *  in case of a grayscale image and fixed range
6803     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6804     *   </li>
6805     * </ul>
6806     *
6807     *
6808     * <ul>
6809     *   <li>
6810     *  in case of a color image and floating range
6811     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6812     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6813     * and
6814     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6815     *   </li>
6816     * </ul>
6817     *
6818     *
6819     * <ul>
6820     *   <li>
6821     *  in case of a color image and fixed range
6822     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6823     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6824     * and
6825     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6826     *   </li>
6827     * </ul>
6828     *
6829     *
6830     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6831     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6832     * be close enough to:
6833     * <ul>
6834     *   <li>
6835     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6836     * of a floating range.
6837     *   </li>
6838     *   <li>
6839     *  Color/brightness of the seed point in case of a fixed range.
6840     *   </li>
6841     * </ul>
6842     *
6843     * Use these functions to either mark a connected component with the specified color in-place, or build
6844     * a mask and then extract the contour, or copy the region to another image, and so on.
6845     *
6846     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6847     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6848     * the details below.
6849     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6850     * taller than image. Since this is both an input and output parameter, you must take responsibility
6851     * of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
6852     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6853     * mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
6854     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6855     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6856     * to make sure the filled areas do not overlap.
6857     * @param seedPoint Starting point.
6858     * @param newVal New value of the repainted domain pixels.
6859     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6860     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6861     * repainted domain.
6862     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6863     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6864     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6865     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6866     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6867     * bits and therefore may be further combined with the connectivity and mask fill values using
6868     * bit-wise or (|), see #FloodFillFlags.
6869     *
6870     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6871     * pixel \((x+1, y+1)\) in the mask .
6872     *
6873     * SEE: findContours
6874     * @return automatically generated
6875     */
6876    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal) {
6877        return floodFill_4(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3]);
6878    }
6879
6880
6881    //
6882    // C++:  void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst)
6883    //
6884
6885    public static void blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat dst) {
6886        blendLinear_0(src1.nativeObj, src2.nativeObj, weights1.nativeObj, weights2.nativeObj, dst.nativeObj);
6887    }
6888
6889
6890    //
6891    // C++:  void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0)
6892    //
6893
6894    /**
6895     * Converts an image from one color space to another.
6896     *
6897     * The function converts an input image from one color space to another. In case of a transformation
6898     * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
6899     * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
6900     * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
6901     * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
6902     * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
6903     *
6904     * The conventional ranges for R, G, and B channel values are:
6905     * <ul>
6906     *   <li>
6907     *    0 to 255 for CV_8U images
6908     *   </li>
6909     *   <li>
6910     *    0 to 65535 for CV_16U images
6911     *   </li>
6912     *   <li>
6913     *    0 to 1 for CV_32F images
6914     *   </li>
6915     * </ul>
6916     *
6917     * In case of linear transformations, the range does not matter. But in case of a non-linear
6918     * transformation, an input RGB image should be normalized to the proper value range to get the correct
6919     * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a
6920     * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
6921     * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
6922     * you need first to scale the image down:
6923     * <code>
6924     *     img *= 1./255;
6925     *     cvtColor(img, img, COLOR_BGR2Luv);
6926     * </code>
6927     * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
6928     * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
6929     * that need the full range of colors or that convert an image before an operation and then convert
6930     * back.
6931     *
6932     * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
6933     * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
6934     *
6935     * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
6936     * floating-point.
6937     * @param dst output image of the same size and depth as src.
6938     * @param code color space conversion code (see #ColorConversionCodes).
6939     * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
6940     * channels is derived automatically from src and code.
6941     *
6942     * SEE: REF: imgproc_color_conversions
6943     */
6944    public static void cvtColor(Mat src, Mat dst, int code, int dstCn) {
6945        cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn);
6946    }
6947
6948    /**
6949     * Converts an image from one color space to another.
6950     *
6951     * The function converts an input image from one color space to another. In case of a transformation
6952     * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
6953     * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
6954     * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
6955     * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
6956     * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
6957     *
6958     * The conventional ranges for R, G, and B channel values are:
6959     * <ul>
6960     *   <li>
6961     *    0 to 255 for CV_8U images
6962     *   </li>
6963     *   <li>
6964     *    0 to 65535 for CV_16U images
6965     *   </li>
6966     *   <li>
6967     *    0 to 1 for CV_32F images
6968     *   </li>
6969     * </ul>
6970     *
6971     * In case of linear transformations, the range does not matter. But in case of a non-linear
6972     * transformation, an input RGB image should be normalized to the proper value range to get the correct
6973     * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a
6974     * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
6975     * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
6976     * you need first to scale the image down:
6977     * <code>
6978     *     img *= 1./255;
6979     *     cvtColor(img, img, COLOR_BGR2Luv);
6980     * </code>
6981     * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
6982     * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
6983     * that need the full range of colors or that convert an image before an operation and then convert
6984     * back.
6985     *
6986     * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
6987     * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
6988     *
6989     * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
6990     * floating-point.
6991     * @param dst output image of the same size and depth as src.
6992     * @param code color space conversion code (see #ColorConversionCodes).
6993     * channels is derived automatically from src and code.
6994     *
6995     * SEE: REF: imgproc_color_conversions
6996     */
6997    public static void cvtColor(Mat src, Mat dst, int code) {
6998        cvtColor_1(src.nativeObj, dst.nativeObj, code);
6999    }
7000
7001
7002    //
7003    // C++:  void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code)
7004    //
7005
7006    /**
7007     * Converts an image from one color space to another where the source image is
7008     * stored in two planes.
7009     *
7010     * This function only supports YUV420 to RGB conversion as of now.
7011     *
7012     * <ul>
7013     *   <li>
7014     *  #COLOR_YUV2BGR_NV12
7015     *   </li>
7016     *   <li>
7017     *  #COLOR_YUV2RGB_NV12
7018     *   </li>
7019     *   <li>
7020     *  #COLOR_YUV2BGRA_NV12
7021     *   </li>
7022     *   <li>
7023     *  #COLOR_YUV2RGBA_NV12
7024     *   </li>
7025     *   <li>
7026     *  #COLOR_YUV2BGR_NV21
7027     *   </li>
7028     *   <li>
7029     *  #COLOR_YUV2RGB_NV21
7030     *   </li>
7031     *   <li>
7032     *  #COLOR_YUV2BGRA_NV21
7033     *   </li>
7034     *   <li>
7035     *  #COLOR_YUV2RGBA_NV21
7036     *   </li>
7037     * </ul>
7038     * @param src1 automatically generated
7039     * @param src2 automatically generated
7040     * @param dst automatically generated
7041     * @param code automatically generated
7042     */
7043    public static void cvtColorTwoPlane(Mat src1, Mat src2, Mat dst, int code) {
7044        cvtColorTwoPlane_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, code);
7045    }
7046
7047
7048    //
7049    // C++:  void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0)
7050    //
7051
7052    /**
7053     * main function for all demosaicing processes
7054     *
7055     * @param src input image: 8-bit unsigned or 16-bit unsigned.
7056     * @param dst output image of the same size and depth as src.
7057     * @param code Color space conversion code (see the description below).
7058     * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
7059     * channels is derived automatically from src and code.
7060     *
7061     * The function can do the following transformations:
7062     *
7063     * <ul>
7064     *   <li>
7065     *    Demosaicing using bilinear interpolation
7066     *   </li>
7067     * </ul>
7068     *
7069     *     #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
7070     *
7071     *     #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
7072     *
7073     * <ul>
7074     *   <li>
7075     *    Demosaicing using Variable Number of Gradients.
7076     *   </li>
7077     * </ul>
7078     *
7079     *     #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
7080     *
7081     * <ul>
7082     *   <li>
7083     *    Edge-Aware Demosaicing.
7084     *   </li>
7085     * </ul>
7086     *
7087     *     #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
7088     *
7089     * <ul>
7090     *   <li>
7091     *    Demosaicing with alpha channel
7092     *   </li>
7093     * </ul>
7094     *
7095     *     #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
7096     *
7097     * SEE: cvtColor
7098     */
7099    public static void demosaicing(Mat src, Mat dst, int code, int dstCn) {
7100        demosaicing_0(src.nativeObj, dst.nativeObj, code, dstCn);
7101    }
7102
7103    /**
7104     * main function for all demosaicing processes
7105     *
7106     * @param src input image: 8-bit unsigned or 16-bit unsigned.
7107     * @param dst output image of the same size and depth as src.
7108     * @param code Color space conversion code (see the description below).
7109     * channels is derived automatically from src and code.
7110     *
7111     * The function can do the following transformations:
7112     *
7113     * <ul>
7114     *   <li>
7115     *    Demosaicing using bilinear interpolation
7116     *   </li>
7117     * </ul>
7118     *
7119     *     #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
7120     *
7121     *     #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
7122     *
7123     * <ul>
7124     *   <li>
7125     *    Demosaicing using Variable Number of Gradients.
7126     *   </li>
7127     * </ul>
7128     *
7129     *     #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
7130     *
7131     * <ul>
7132     *   <li>
7133     *    Edge-Aware Demosaicing.
7134     *   </li>
7135     * </ul>
7136     *
7137     *     #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
7138     *
7139     * <ul>
7140     *   <li>
7141     *    Demosaicing with alpha channel
7142     *   </li>
7143     * </ul>
7144     *
7145     *     #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
7146     *
7147     * SEE: cvtColor
7148     */
7149    public static void demosaicing(Mat src, Mat dst, int code) {
7150        demosaicing_1(src.nativeObj, dst.nativeObj, code);
7151    }
7152
7153
7154    //
7155    // C++:  Moments cv::moments(Mat array, bool binaryImage = false)
7156    //
7157
7158    /**
7159     * Calculates all of the moments up to the third order of a polygon or rasterized shape.
7160     *
7161     * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
7162     * results are returned in the structure cv::Moments.
7163     *
7164     * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
7165     * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ).
7166     * @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is
7167     * used for images only.
7168     * @return moments.
7169     *
7170     * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy
7171     * type for the input array should be either np.int32 or np.float32.
7172     *
7173     * SEE:  contourArea, arcLength
7174     */
7175    public static Moments moments(Mat array, boolean binaryImage) {
7176        return new Moments(moments_0(array.nativeObj, binaryImage));
7177    }
7178
7179    /**
7180     * Calculates all of the moments up to the third order of a polygon or rasterized shape.
7181     *
7182     * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
7183     * results are returned in the structure cv::Moments.
7184     *
7185     * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
7186     * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ).
7187     * used for images only.
7188     * @return moments.
7189     *
7190     * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy
7191     * type for the input array should be either np.int32 or np.float32.
7192     *
7193     * SEE:  contourArea, arcLength
7194     */
7195    public static Moments moments(Mat array) {
7196        return new Moments(moments_1(array.nativeObj));
7197    }
7198
7199
7200    //
7201    // C++:  void cv::HuMoments(Moments m, Mat& hu)
7202    //
7203
7204    public static void HuMoments(Moments m, Mat hu) {
7205        HuMoments_0(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03, hu.nativeObj);
7206    }
7207
7208
7209    //
7210    // C++:  void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat())
7211    //
7212
7213    /**
7214     * Compares a template against overlapped image regions.
7215     *
7216     * The function slides through image , compares the overlapped patches of size \(w \times h\) against
7217     * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
7218     * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\)
7219     * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or
7220     * the image patch: \(x' = 0...w-1, y' = 0...h-1\)
7221     *
7222     * After the function finishes the comparison, the best matches can be found as global minimums (when
7223     * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
7224     * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
7225     * the denominator is done over all of the channels and separate mean values are used for each channel.
7226     * That is, the function can take a color template and a color image. The result will still be a
7227     * single-channel image, which is easier to analyze.
7228     *
7229     * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
7230     * @param templ Searched template. It must be not greater than the source image and have the same
7231     * data type.
7232     * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
7233     * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) .
7234     * @param method Parameter specifying the comparison method, see #TemplateMatchModes
7235     * @param mask Optional mask. It must have the same size as templ. It must either have the same number
7236     *             of channels as template or only one channel, which is then used for all template and
7237     *             image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
7238     *             meaning only elements where mask is nonzero are used and are kept unchanged independent
7239     *             of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
7240     *             used as weights. The exact formulas are documented in #TemplateMatchModes.
7241     */
7242    public static void matchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) {
7243        matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method, mask.nativeObj);
7244    }
7245
7246    /**
7247     * Compares a template against overlapped image regions.
7248     *
7249     * The function slides through image , compares the overlapped patches of size \(w \times h\) against
7250     * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
7251     * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\)
7252     * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or
7253     * the image patch: \(x' = 0...w-1, y' = 0...h-1\)
7254     *
7255     * After the function finishes the comparison, the best matches can be found as global minimums (when
7256     * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
7257     * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
7258     * the denominator is done over all of the channels and separate mean values are used for each channel.
7259     * That is, the function can take a color template and a color image. The result will still be a
7260     * single-channel image, which is easier to analyze.
7261     *
7262     * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
7263     * @param templ Searched template. It must be not greater than the source image and have the same
7264     * data type.
7265     * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
7266     * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) .
7267     * @param method Parameter specifying the comparison method, see #TemplateMatchModes
7268     *             of channels as template or only one channel, which is then used for all template and
7269     *             image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
7270     *             meaning only elements where mask is nonzero are used and are kept unchanged independent
7271     *             of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
7272     *             used as weights. The exact formulas are documented in #TemplateMatchModes.
7273     */
7274    public static void matchTemplate(Mat image, Mat templ, Mat result, int method) {
7275        matchTemplate_1(image.nativeObj, templ.nativeObj, result.nativeObj, method);
7276    }
7277
7278
7279    //
7280    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype)
7281    //
7282
7283    /**
7284     * computes the connected components labeled image of boolean image
7285     *
7286     * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
7287     * represents the background label. ltype specifies the output label image type, an important
7288     * consideration based on the total number of labels or alternatively the total number of pixels in
7289     * the source image. ccltype specifies the connected components labeling algorithm to use, currently
7290     * Grana (BBDT) and Wu's (SAUF) CITE: Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes
7291     * for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not.
7292     * This function uses parallel version of both Grana and Wu's algorithms if at least one allowed
7293     * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
7294     *
7295     * @param image the 8-bit single-channel image to be labeled
7296     * @param labels destination labeled image
7297     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7298     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7299     * @param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes).
7300     * @return automatically generated
7301     */
7302    public static int connectedComponentsWithAlgorithm(Mat image, Mat labels, int connectivity, int ltype, int ccltype) {
7303        return connectedComponentsWithAlgorithm_0(image.nativeObj, labels.nativeObj, connectivity, ltype, ccltype);
7304    }
7305
7306
7307    //
7308    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S)
7309    //
7310
7311    /**
7312     *
7313     *
7314     * @param image the 8-bit single-channel image to be labeled
7315     * @param labels destination labeled image
7316     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7317     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7318     * @return automatically generated
7319     */
7320    public static int connectedComponents(Mat image, Mat labels, int connectivity, int ltype) {
7321        return connectedComponents_0(image.nativeObj, labels.nativeObj, connectivity, ltype);
7322    }
7323
7324    /**
7325     *
7326     *
7327     * @param image the 8-bit single-channel image to be labeled
7328     * @param labels destination labeled image
7329     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7330     * @return automatically generated
7331     */
7332    public static int connectedComponents(Mat image, Mat labels, int connectivity) {
7333        return connectedComponents_1(image.nativeObj, labels.nativeObj, connectivity);
7334    }
7335
7336    /**
7337     *
7338     *
7339     * @param image the 8-bit single-channel image to be labeled
7340     * @param labels destination labeled image
7341     * @return automatically generated
7342     */
7343    public static int connectedComponents(Mat image, Mat labels) {
7344        return connectedComponents_2(image.nativeObj, labels.nativeObj);
7345    }
7346
7347
7348    //
7349    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype)
7350    //
7351
7352    /**
7353     * computes the connected components labeled image of boolean image and also produces a statistics output for each label
7354     *
7355     * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
7356     * represents the background label. ltype specifies the output label image type, an important
7357     * consideration based on the total number of labels or alternatively the total number of pixels in
7358     * the source image. ccltype specifies the connected components labeling algorithm to use, currently
7359     * Grana's (BBDT) and Wu's (SAUF) CITE: Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes
7360     * for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not.
7361     * This function uses parallel version of both Grana and Wu's algorithms (statistics included) if at least one allowed
7362     * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
7363     *
7364     * @param image the 8-bit single-channel image to be labeled
7365     * @param labels destination labeled image
7366     * @param stats statistics output for each label, including the background label.
7367     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7368     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7369     * @param centroids centroid output for each label, including the background label. Centroids are
7370     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7371     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7372     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7373     * @param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes).
7374     * @return automatically generated
7375     */
7376    public static int connectedComponentsWithStatsWithAlgorithm(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype) {
7377        return connectedComponentsWithStatsWithAlgorithm_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype, ccltype);
7378    }
7379
7380
7381    //
7382    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S)
7383    //
7384
7385    /**
7386     *
7387     * @param image the 8-bit single-channel image to be labeled
7388     * @param labels destination labeled image
7389     * @param stats statistics output for each label, including the background label.
7390     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7391     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7392     * @param centroids centroid output for each label, including the background label. Centroids are
7393     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7394     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7395     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7396     * @return automatically generated
7397     */
7398    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype) {
7399        return connectedComponentsWithStats_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype);
7400    }
7401
7402    /**
7403     *
7404     * @param image the 8-bit single-channel image to be labeled
7405     * @param labels destination labeled image
7406     * @param stats statistics output for each label, including the background label.
7407     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7408     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7409     * @param centroids centroid output for each label, including the background label. Centroids are
7410     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7411     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7412     * @return automatically generated
7413     */
7414    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity) {
7415        return connectedComponentsWithStats_1(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity);
7416    }
7417
7418    /**
7419     *
7420     * @param image the 8-bit single-channel image to be labeled
7421     * @param labels destination labeled image
7422     * @param stats statistics output for each label, including the background label.
7423     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7424     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7425     * @param centroids centroid output for each label, including the background label. Centroids are
7426     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7427     * @return automatically generated
7428     */
7429    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids) {
7430        return connectedComponentsWithStats_2(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj);
7431    }
7432
7433
7434    //
7435    // C++:  void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point())
7436    //
7437
7438    /**
7439     * Finds contours in a binary image.
7440     *
7441     * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
7442     * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
7443     * OpenCV sample directory.
7444     * <b>Note:</b> Since opencv 3.2 source image is not modified by this function.
7445     *
7446     * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
7447     * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
7448     * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
7449     * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
7450     * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
7451     * std::vector&lt;std::vector&lt;cv::Point&gt; &gt;).
7452     * @param hierarchy Optional output vector (e.g. std::vector&lt;cv::Vec4i&gt;), containing information about the image topology. It has
7453     * as many elements as the number of contours. For each i-th contour contours[i], the elements
7454     * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
7455     * in contours of the next and previous contours at the same hierarchical level, the first child
7456     * contour and the parent contour, respectively. If for the contour i there are no next, previous,
7457     * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
7458     * @param mode Contour retrieval mode, see #RetrievalModes
7459     * @param method Contour approximation method, see #ContourApproximationModes
7460     * @param offset Optional offset by which every contour point is shifted. This is useful if the
7461     * contours are extracted from the image ROI and then they should be analyzed in the whole image
7462     * context.
7463     */
7464    public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method, Point offset) {
7465        Mat contours_mat = new Mat();
7466        findContours_0(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method, offset.x, offset.y);
7467        Converters.Mat_to_vector_vector_Point(contours_mat, contours);
7468        contours_mat.release();
7469    }
7470
7471    /**
7472     * Finds contours in a binary image.
7473     *
7474     * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
7475     * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
7476     * OpenCV sample directory.
7477     * <b>Note:</b> Since opencv 3.2 source image is not modified by this function.
7478     *
7479     * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
7480     * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
7481     * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
7482     * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
7483     * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
7484     * std::vector&lt;std::vector&lt;cv::Point&gt; &gt;).
7485     * @param hierarchy Optional output vector (e.g. std::vector&lt;cv::Vec4i&gt;), containing information about the image topology. It has
7486     * as many elements as the number of contours. For each i-th contour contours[i], the elements
7487     * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
7488     * in contours of the next and previous contours at the same hierarchical level, the first child
7489     * contour and the parent contour, respectively. If for the contour i there are no next, previous,
7490     * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
7491     * @param mode Contour retrieval mode, see #RetrievalModes
7492     * @param method Contour approximation method, see #ContourApproximationModes
7493     * contours are extracted from the image ROI and then they should be analyzed in the whole image
7494     * context.
7495     */
7496    public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method) {
7497        Mat contours_mat = new Mat();
7498        findContours_1(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method);
7499        Converters.Mat_to_vector_vector_Point(contours_mat, contours);
7500        contours_mat.release();
7501    }
7502
7503
7504    //
7505    // C++:  void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
7506    //
7507
7508    /**
7509     * Approximates a polygonal curve(s) with the specified precision.
7510     *
7511     * The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less
7512     * vertices so that the distance between them is less or equal to the specified precision. It uses the
7513     * Douglas-Peucker algorithm &lt;http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm&gt;
7514     *
7515     * @param curve Input vector of a 2D point stored in std::vector or Mat
7516     * @param approxCurve Result of the approximation. The type should match the type of the input curve.
7517     * @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance
7518     * between the original curve and its approximation.
7519     * @param closed If true, the approximated curve is closed (its first and last vertices are
7520     * connected). Otherwise, it is not closed.
7521     */
7522    public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) {
7523        Mat curve_mat = curve;
7524        Mat approxCurve_mat = approxCurve;
7525        approxPolyDP_0(curve_mat.nativeObj, approxCurve_mat.nativeObj, epsilon, closed);
7526    }
7527
7528
7529    //
7530    // C++:  double cv::arcLength(vector_Point2f curve, bool closed)
7531    //
7532
7533    /**
7534     * Calculates a contour perimeter or a curve length.
7535     *
7536     * The function computes a curve length or a closed contour perimeter.
7537     *
7538     * @param curve Input vector of 2D points, stored in std::vector or Mat.
7539     * @param closed Flag indicating whether the curve is closed or not.
7540     * @return automatically generated
7541     */
7542    public static double arcLength(MatOfPoint2f curve, boolean closed) {
7543        Mat curve_mat = curve;
7544        return arcLength_0(curve_mat.nativeObj, closed);
7545    }
7546
7547
7548    //
7549    // C++:  Rect cv::boundingRect(Mat array)
7550    //
7551
7552    /**
7553     * Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
7554     *
7555     * The function calculates and returns the minimal up-right bounding rectangle for the specified point set or
7556     * non-zero pixels of gray-scale image.
7557     *
7558     * @param array Input gray-scale image or 2D point set, stored in std::vector or Mat.
7559     * @return automatically generated
7560     */
7561    public static Rect boundingRect(Mat array) {
7562        return new Rect(boundingRect_0(array.nativeObj));
7563    }
7564
7565
7566    //
7567    // C++:  double cv::contourArea(Mat contour, bool oriented = false)
7568    //
7569
7570    /**
7571     * Calculates a contour area.
7572     *
7573     * The function computes a contour area. Similarly to moments , the area is computed using the Green
7574     * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
7575     * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
7576     * results for contours with self-intersections.
7577     *
7578     * Example:
7579     * <code>
7580     *     vector&lt;Point&gt; contour;
7581     *     contour.push_back(Point2f(0, 0));
7582     *     contour.push_back(Point2f(10, 0));
7583     *     contour.push_back(Point2f(10, 10));
7584     *     contour.push_back(Point2f(5, 4));
7585     *
7586     *     double area0 = contourArea(contour);
7587     *     vector&lt;Point&gt; approx;
7588     *     approxPolyDP(contour, approx, 5, true);
7589     *     double area1 = contourArea(approx);
7590     *
7591     *     cout &lt;&lt; "area0 =" &lt;&lt; area0 &lt;&lt; endl &lt;&lt;
7592     *             "area1 =" &lt;&lt; area1 &lt;&lt; endl &lt;&lt;
7593     *             "approx poly vertices" &lt;&lt; approx.size() &lt;&lt; endl;
7594     * </code>
7595     * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
7596     * @param oriented Oriented area flag. If it is true, the function returns a signed area value,
7597     * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
7598     * determine orientation of a contour by taking the sign of an area. By default, the parameter is
7599     * false, which means that the absolute value is returned.
7600     * @return automatically generated
7601     */
7602    public static double contourArea(Mat contour, boolean oriented) {
7603        return contourArea_0(contour.nativeObj, oriented);
7604    }
7605
7606    /**
7607     * Calculates a contour area.
7608     *
7609     * The function computes a contour area. Similarly to moments , the area is computed using the Green
7610     * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
7611     * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
7612     * results for contours with self-intersections.
7613     *
7614     * Example:
7615     * <code>
7616     *     vector&lt;Point&gt; contour;
7617     *     contour.push_back(Point2f(0, 0));
7618     *     contour.push_back(Point2f(10, 0));
7619     *     contour.push_back(Point2f(10, 10));
7620     *     contour.push_back(Point2f(5, 4));
7621     *
7622     *     double area0 = contourArea(contour);
7623     *     vector&lt;Point&gt; approx;
7624     *     approxPolyDP(contour, approx, 5, true);
7625     *     double area1 = contourArea(approx);
7626     *
7627     *     cout &lt;&lt; "area0 =" &lt;&lt; area0 &lt;&lt; endl &lt;&lt;
7628     *             "area1 =" &lt;&lt; area1 &lt;&lt; endl &lt;&lt;
7629     *             "approx poly vertices" &lt;&lt; approx.size() &lt;&lt; endl;
7630     * </code>
7631     * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
7632     * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
7633     * determine orientation of a contour by taking the sign of an area. By default, the parameter is
7634     * false, which means that the absolute value is returned.
7635     * @return automatically generated
7636     */
7637    public static double contourArea(Mat contour) {
7638        return contourArea_1(contour.nativeObj);
7639    }
7640
7641
7642    //
7643    // C++:  RotatedRect cv::minAreaRect(vector_Point2f points)
7644    //
7645
7646    /**
7647     * Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
7648     *
7649     * The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a
7650     * specified point set. Developer should keep in mind that the returned RotatedRect can contain negative
7651     * indices when data is close to the containing Mat element boundary.
7652     *
7653     * @param points Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7654     * @return automatically generated
7655     */
7656    public static RotatedRect minAreaRect(MatOfPoint2f points) {
7657        Mat points_mat = points;
7658        return new RotatedRect(minAreaRect_0(points_mat.nativeObj));
7659    }
7660
7661
7662    //
7663    // C++:  void cv::boxPoints(RotatedRect box, Mat& points)
7664    //
7665
7666    /**
7667     * Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
7668     *
7669     * The function finds the four vertices of a rotated rectangle. This function is useful to draw the
7670     * rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please
7671     * visit the REF: tutorial_bounding_rotated_ellipses "tutorial on Creating Bounding rotated boxes and ellipses for contours" for more information.
7672     *
7673     * @param box The input rotated rectangle. It may be the output of
7674     * @param points The output array of four vertices of rectangles.
7675     */
7676    public static void boxPoints(RotatedRect box, Mat points) {
7677        boxPoints_0(box.center.x, box.center.y, box.size.width, box.size.height, box.angle, points.nativeObj);
7678    }
7679
7680
7681    //
7682    // C++:  void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
7683    //
7684
7685    /**
7686     * Finds a circle of the minimum area enclosing a 2D point set.
7687     *
7688     * The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm.
7689     *
7690     * @param points Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7691     * @param center Output center of the circle.
7692     * @param radius Output radius of the circle.
7693     */
7694    public static void minEnclosingCircle(MatOfPoint2f points, Point center, float[] radius) {
7695        Mat points_mat = points;
7696        double[] center_out = new double[2];
7697        double[] radius_out = new double[1];
7698        minEnclosingCircle_0(points_mat.nativeObj, center_out, radius_out);
7699        if(center!=null){ center.x = center_out[0]; center.y = center_out[1]; } 
7700        if(radius!=null) radius[0] = (float)radius_out[0];
7701    }
7702
7703
7704    //
7705    // C++:  double cv::minEnclosingTriangle(Mat points, Mat& triangle)
7706    //
7707
7708    /**
7709     * Finds a triangle of minimum area enclosing a 2D point set and returns its area.
7710     *
7711     * The function finds a triangle of minimum area enclosing the given set of 2D points and returns its
7712     * area. The output for a given 2D point set is shown in the image below. 2D points are depicted in
7713     * red* and the enclosing triangle in *yellow*.
7714     *
7715     * ![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png)
7716     *
7717     * The implementation of the algorithm is based on O'Rourke's CITE: ORourke86 and Klee and Laskowski's
7718     * CITE: KleeLaskowski85 papers. O'Rourke provides a \(\theta(n)\) algorithm for finding the minimal
7719     * enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function
7720     * takes a 2D point set as input an additional preprocessing step of computing the convex hull of the
7721     * 2D point set is required. The complexity of the #convexHull function is \(O(n log(n))\) which is higher
7722     * than \(\theta(n)\). Thus the overall complexity of the function is \(O(n log(n))\).
7723     *
7724     * @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector&lt;&gt; or Mat
7725     * @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth
7726     * of the OutputArray must be CV_32F.
7727     * @return automatically generated
7728     */
7729    public static double minEnclosingTriangle(Mat points, Mat triangle) {
7730        return minEnclosingTriangle_0(points.nativeObj, triangle.nativeObj);
7731    }
7732
7733
7734    //
7735    // C++:  double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter)
7736    //
7737
7738    /**
7739     * Compares two shapes.
7740     *
7741     * The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments)
7742     *
7743     * @param contour1 First contour or grayscale image.
7744     * @param contour2 Second contour or grayscale image.
7745     * @param method Comparison method, see #ShapeMatchModes
7746     * @param parameter Method-specific parameter (not supported now).
7747     * @return automatically generated
7748     */
7749    public static double matchShapes(Mat contour1, Mat contour2, int method, double parameter) {
7750        return matchShapes_0(contour1.nativeObj, contour2.nativeObj, method, parameter);
7751    }
7752
7753
7754    //
7755    // C++:  void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false,  _hidden_  returnPoints = true)
7756    //
7757
7758    /**
7759     * Finds the convex hull of a point set.
7760     *
7761     * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
7762     * that has *O(N logN)* complexity in the current implementation.
7763     *
7764     * @param points Input 2D point set, stored in std::vector or Mat.
7765     * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
7766     * the first case, the hull elements are 0-based indices of the convex hull points in the original
7767     * array (since the set of convex hull points is a subset of the original point set). In the second
7768     * case, hull elements are the convex hull points themselves.
7769     * @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.
7770     * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
7771     * to the right, and its Y axis pointing upwards.
7772     * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
7773     * output array is std::vector, the flag is ignored, and the output depends on the type of the
7774     * vector: std::vector&lt;int&gt; implies returnPoints=false, std::vector&lt;Point&gt; implies
7775     * returnPoints=true.
7776     *
7777     * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported.
7778     *
7779     * Check REF: tutorial_hull "the corresponding tutorial" for more details.
7780     *
7781     * useful links:
7782     *
7783     * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
7784     */
7785    public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise) {
7786        Mat points_mat = points;
7787        Mat hull_mat = hull;
7788        convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise);
7789    }
7790
7791    /**
7792     * Finds the convex hull of a point set.
7793     *
7794     * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
7795     * that has *O(N logN)* complexity in the current implementation.
7796     *
7797     * @param points Input 2D point set, stored in std::vector or Mat.
7798     * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
7799     * the first case, the hull elements are 0-based indices of the convex hull points in the original
7800     * array (since the set of convex hull points is a subset of the original point set). In the second
7801     * case, hull elements are the convex hull points themselves.
7802     * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
7803     * to the right, and its Y axis pointing upwards.
7804     * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
7805     * output array is std::vector, the flag is ignored, and the output depends on the type of the
7806     * vector: std::vector&lt;int&gt; implies returnPoints=false, std::vector&lt;Point&gt; implies
7807     * returnPoints=true.
7808     *
7809     * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported.
7810     *
7811     * Check REF: tutorial_hull "the corresponding tutorial" for more details.
7812     *
7813     * useful links:
7814     *
7815     * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
7816     */
7817    public static void convexHull(MatOfPoint points, MatOfInt hull) {
7818        Mat points_mat = points;
7819        Mat hull_mat = hull;
7820        convexHull_2(points_mat.nativeObj, hull_mat.nativeObj);
7821    }
7822
7823
7824    //
7825    // C++:  void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
7826    //
7827
7828    /**
7829     * Finds the convexity defects of a contour.
7830     *
7831     * The figure below displays convexity defects of a hand contour:
7832     *
7833     * ![image](pics/defects.png)
7834     *
7835     * @param contour Input contour.
7836     * @param convexhull Convex hull obtained using convexHull that should contain indices of the contour
7837     * points that make the hull.
7838     * @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java
7839     * interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i):
7840     * (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices
7841     * in the original contour of the convexity defect beginning, end and the farthest point, and
7842     * fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the
7843     * farthest contour point and the hull. That is, to get the floating-point value of the depth will be
7844     * fixpt_depth/256.0.
7845     */
7846    public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects) {
7847        Mat contour_mat = contour;
7848        Mat convexhull_mat = convexhull;
7849        Mat convexityDefects_mat = convexityDefects;
7850        convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj);
7851    }
7852
7853
7854    //
7855    // C++:  bool cv::isContourConvex(vector_Point contour)
7856    //
7857
7858    /**
7859     * Tests a contour convexity.
7860     *
7861     * The function tests whether the input contour is convex or not. The contour must be simple, that is,
7862     * without self-intersections. Otherwise, the function output is undefined.
7863     *
7864     * @param contour Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7865     * @return automatically generated
7866     */
7867    public static boolean isContourConvex(MatOfPoint contour) {
7868        Mat contour_mat = contour;
7869        return isContourConvex_0(contour_mat.nativeObj);
7870    }
7871
7872
7873    //
7874    // C++:  float cv::intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true)
7875    //
7876
7877    /**
7878     * Finds intersection of two convex polygons
7879     *
7880     * @param _p1 First polygon
7881     * @param _p2 Second polygon
7882     * @param _p12 Output polygon describing the intersecting area
7883     * @param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other.
7884     * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
7885     * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
7886     *
7887     * @return Absolute value of area of intersecting polygon
7888     *
7889     * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
7890     */
7891    public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12, boolean handleNested) {
7892        return intersectConvexConvex_0(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj, handleNested);
7893    }
7894
7895    /**
7896     * Finds intersection of two convex polygons
7897     *
7898     * @param _p1 First polygon
7899     * @param _p2 Second polygon
7900     * @param _p12 Output polygon describing the intersecting area
7901     * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
7902     * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
7903     *
7904     * @return Absolute value of area of intersecting polygon
7905     *
7906     * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
7907     */
7908    public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12) {
7909        return intersectConvexConvex_1(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj);
7910    }
7911
7912
7913    //
7914    // C++:  RotatedRect cv::fitEllipse(vector_Point2f points)
7915    //
7916
7917    /**
7918     * Fits an ellipse around a set of 2D points.
7919     *
7920     * The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of
7921     * all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by CITE: Fitzgibbon95
7922     * is used. Developer should keep in mind that it is possible that the returned
7923     * ellipse/rotatedRect data contains negative indices, due to the data points being close to the
7924     * border of the containing Mat element.
7925     *
7926     * @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
7927     * @return automatically generated
7928     */
7929    public static RotatedRect fitEllipse(MatOfPoint2f points) {
7930        Mat points_mat = points;
7931        return new RotatedRect(fitEllipse_0(points_mat.nativeObj));
7932    }
7933
7934
7935    //
7936    // C++:  RotatedRect cv::fitEllipseAMS(Mat points)
7937    //
7938
7939    /**
7940     * Fits an ellipse around a set of 2D points.
7941     *
7942     *  The function calculates the ellipse that fits a set of 2D points.
7943     *  It returns the rotated rectangle in which the ellipse is inscribed.
7944     *  The Approximate Mean Square (AMS) proposed by CITE: Taubin1991 is used.
7945     *
7946     *  For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \),
7947     *  which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \).
7948     *  However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \),
7949     *  the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines,
7950     *  quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
7951     *  If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used.
7952     *  The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves
7953     *  by imposing the condition that \( A^T ( D_x^T D_x  +   D_y^T D_y) A = 1 \) where
7954     *  the matrices \( Dx \) and \( Dy \) are the partial derivatives of the design matrix \( D \) with
7955     *  respect to x and y. The matrices are formed row by row applying the following to
7956     *  each of the points in the set:
7957     *  \(align*}{
7958     *  D(i,:)&amp;=\left\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\right\} &amp;
7959     *  D_x(i,:)&amp;=\left\{2 x_i,y_i,0,1,0,0\right\} &amp;
7960     *  D_y(i,:)&amp;=\left\{0,x_i,2 y_i,0,1,0\right\}
7961     *  \)
7962     *  The AMS method minimizes the cost function
7963     *  \(equation*}{
7964     *  \epsilon ^2=\frac{ A^T D^T D A }{ A^T (D_x^T D_x +  D_y^T D_y) A^T }
7965     *  \)
7966     *
7967     *  The minimum cost is found by solving the generalized eigenvalue problem.
7968     *
7969     *  \(equation*}{
7970     *  D^T D A = \lambda  \left( D_x^T D_x +  D_y^T D_y\right) A
7971     *  \)
7972     *
7973     *  @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
7974     * @return automatically generated
7975     */
7976    public static RotatedRect fitEllipseAMS(Mat points) {
7977        return new RotatedRect(fitEllipseAMS_0(points.nativeObj));
7978    }
7979
7980
7981    //
7982    // C++:  RotatedRect cv::fitEllipseDirect(Mat points)
7983    //
7984
7985    /**
7986     * Fits an ellipse around a set of 2D points.
7987     *
7988     *  The function calculates the ellipse that fits a set of 2D points.
7989     *  It returns the rotated rectangle in which the ellipse is inscribed.
7990     *  The Direct least square (Direct) method by CITE: Fitzgibbon1999 is used.
7991     *
7992     *  For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \),
7993     *  which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \).
7994     *  However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \),
7995     *  the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines,
7996     *  quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
7997     *  The Direct method confines the fit to ellipses by ensuring that \( 4 A_{xx} A_{yy}- A_{xy}^2 &gt; 0 \).
7998     *  The condition imposed is that \( 4 A_{xx} A_{yy}- A_{xy}^2=1 \) which satisfies the inequality
7999     *  and as the coefficients can be arbitrarily scaled is not overly restrictive.
8000     *
8001     *  \(equation*}{
8002     *  \epsilon ^2= A^T D^T D A \quad \text{with} \quad A^T C A =1 \quad \text{and} \quad C=\left(\begin{matrix}
8003     *  0 &amp; 0  &amp; 2  &amp; 0  &amp; 0  &amp;  0  \\
8004     *  0 &amp; -1  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8005     *  2 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8006     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8007     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8008     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0
8009     *  \end{matrix} \right)
8010     *  \)
8011     *
8012     *  The minimum cost is found by solving the generalized eigenvalue problem.
8013     *
8014     *  \(equation*}{
8015     *  D^T D A = \lambda  \left( C\right) A
8016     *  \)
8017     *
8018     *  The system produces only one positive eigenvalue \( \lambda\) which is chosen as the solution
8019     *  with its eigenvector \(\mathbf{u}\). These are used to find the coefficients
8020     *
8021     *  \(equation*}{
8022     *  A = \sqrt{\frac{1}{\mathbf{u}^T C \mathbf{u}}}  \mathbf{u}
8023     *  \)
8024     *  The scaling factor guarantees that  \(A^T C A =1\).
8025     *
8026     *  @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
8027     * @return automatically generated
8028     */
8029    public static RotatedRect fitEllipseDirect(Mat points) {
8030        return new RotatedRect(fitEllipseDirect_0(points.nativeObj));
8031    }
8032
8033
8034    //
8035    // C++:  void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps)
8036    //
8037
8038    /**
8039     * Fits a line to a 2D or 3D point set.
8040     *
8041     * The function fitLine fits a line to a 2D or 3D point set by minimizing \(\sum_i \rho(r_i)\) where
8042     * \(r_i\) is a distance between the \(i^{th}\) point, the line and \(\rho(r)\) is a distance function, one
8043     * of the following:
8044     * <ul>
8045     *   <li>
8046     *   DIST_L2
8047     * \(\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\)
8048     *   </li>
8049     *   <li>
8050     *  DIST_L1
8051     * \(\rho (r) = r\)
8052     *   </li>
8053     *   <li>
8054     *  DIST_L12
8055     * \(\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\)
8056     *   </li>
8057     *   <li>
8058     *  DIST_FAIR
8059     * \(\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\)
8060     *   </li>
8061     *   <li>
8062     *  DIST_WELSCH
8063     * \(\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\)
8064     *   </li>
8065     *   <li>
8066     *  DIST_HUBER
8067     * \(\rho (r) =  \fork{r^2/2}{if \(r &lt; C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\)
8068     *   </li>
8069     * </ul>
8070     *
8071     * The algorithm is based on the M-estimator ( &lt;http://en.wikipedia.org/wiki/M-estimator&gt; ) technique
8072     * that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
8073     * weights \(w_i\) are adjusted to be inversely proportional to \(\rho(r_i)\) .
8074     *
8075     * @param points Input vector of 2D or 3D points, stored in std::vector&lt;&gt; or Mat.
8076     * @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements
8077     * (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and
8078     * (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like
8079     * Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line
8080     * and (x0, y0, z0) is a point on the line.
8081     * @param distType Distance used by the M-estimator, see #DistanceTypes
8082     * @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
8083     * is chosen.
8084     * @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line).
8085     * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
8086     */
8087    public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps) {
8088        fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps);
8089    }
8090
8091
8092    //
8093    // C++:  double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
8094    //
8095
8096    /**
8097     * Performs a point-in-contour test.
8098     *
8099     * The function determines whether the point is inside a contour, outside, or lies on an edge (or
8100     * coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)
8101     * value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.
8102     * Otherwise, the return value is a signed distance between the point and the nearest contour edge.
8103     *
8104     * See below a sample output of the function where each image pixel is tested against the contour:
8105     *
8106     * ![sample output](pics/pointpolygon.png)
8107     *
8108     * @param contour Input contour.
8109     * @param pt Point tested against the contour.
8110     * @param measureDist If true, the function estimates the signed distance from the point to the
8111     * nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
8112     * @return automatically generated
8113     */
8114    public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) {
8115        Mat contour_mat = contour;
8116        return pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist);
8117    }
8118
8119
8120    //
8121    // C++:  int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion)
8122    //
8123
8124    /**
8125     * Finds out if there is any intersection between two rotated rectangles.
8126     *
8127     * If there is then the vertices of the intersecting region are returned as well.
8128     *
8129     * Below are some examples of intersection configurations. The hatched pattern indicates the
8130     * intersecting region and the red vertices are returned by the function.
8131     *
8132     * ![intersection examples](pics/intersection.png)
8133     *
8134     * @param rect1 First rectangle
8135     * @param rect2 Second rectangle
8136     * @param intersectingRegion The output array of the vertices of the intersecting region. It returns
8137     * at most 8 vertices. Stored as std::vector&lt;cv::Point2f&gt; or cv::Mat as Mx1 of type CV_32FC2.
8138     * @return One of #RectanglesIntersectTypes
8139     */
8140    public static int rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat intersectingRegion) {
8141        return rotatedRectangleIntersection_0(rect1.center.x, rect1.center.y, rect1.size.width, rect1.size.height, rect1.angle, rect2.center.x, rect2.center.y, rect2.size.width, rect2.size.height, rect2.angle, intersectingRegion.nativeObj);
8142    }
8143
8144
8145    //
8146    // C++:  Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard()
8147    //
8148
8149    /**
8150     * Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it.
8151     * @return automatically generated
8152     */
8153    public static GeneralizedHoughBallard createGeneralizedHoughBallard() {
8154        return GeneralizedHoughBallard.__fromPtr__(createGeneralizedHoughBallard_0());
8155    }
8156
8157
8158    //
8159    // C++:  Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil()
8160    //
8161
8162    /**
8163     * Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it.
8164     * @return automatically generated
8165     */
8166    public static GeneralizedHoughGuil createGeneralizedHoughGuil() {
8167        return GeneralizedHoughGuil.__fromPtr__(createGeneralizedHoughGuil_0());
8168    }
8169
8170
8171    //
8172    // C++:  void cv::applyColorMap(Mat src, Mat& dst, int colormap)
8173    //
8174
8175    /**
8176     * Applies a GNU Octave/MATLAB equivalent colormap on a given image.
8177     *
8178     * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
8179     * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
8180     * @param colormap The colormap to apply, see #ColormapTypes
8181     */
8182    public static void applyColorMap(Mat src, Mat dst, int colormap) {
8183        applyColorMap_0(src.nativeObj, dst.nativeObj, colormap);
8184    }
8185
8186
8187    //
8188    // C++:  void cv::applyColorMap(Mat src, Mat& dst, Mat userColor)
8189    //
8190
8191    /**
8192     * Applies a user colormap on a given image.
8193     *
8194     * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
8195     * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
8196     * @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256
8197     */
8198    public static void applyColorMap(Mat src, Mat dst, Mat userColor) {
8199        applyColorMap_1(src.nativeObj, dst.nativeObj, userColor.nativeObj);
8200    }
8201
8202
8203    //
8204    // C++:  void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8205    //
8206
8207    /**
8208     * Draws a line segment connecting two points.
8209     *
8210     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8211     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8212     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8213     * lines are drawn using Gaussian filtering.
8214     *
8215     * @param img Image.
8216     * @param pt1 First point of the line segment.
8217     * @param pt2 Second point of the line segment.
8218     * @param color Line color.
8219     * @param thickness Line thickness.
8220     * @param lineType Type of the line. See #LineTypes.
8221     * @param shift Number of fractional bits in the point coordinates.
8222     */
8223    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) {
8224        line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8225    }
8226
8227    /**
8228     * Draws a line segment connecting two points.
8229     *
8230     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8231     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8232     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8233     * lines are drawn using Gaussian filtering.
8234     *
8235     * @param img Image.
8236     * @param pt1 First point of the line segment.
8237     * @param pt2 Second point of the line segment.
8238     * @param color Line color.
8239     * @param thickness Line thickness.
8240     * @param lineType Type of the line. See #LineTypes.
8241     */
8242    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) {
8243        line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8244    }
8245
8246    /**
8247     * Draws a line segment connecting two points.
8248     *
8249     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8250     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8251     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8252     * lines are drawn using Gaussian filtering.
8253     *
8254     * @param img Image.
8255     * @param pt1 First point of the line segment.
8256     * @param pt2 Second point of the line segment.
8257     * @param color Line color.
8258     * @param thickness Line thickness.
8259     */
8260    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8261        line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8262    }
8263
8264    /**
8265     * Draws a line segment connecting two points.
8266     *
8267     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8268     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8269     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8270     * lines are drawn using Gaussian filtering.
8271     *
8272     * @param img Image.
8273     * @param pt1 First point of the line segment.
8274     * @param pt2 Second point of the line segment.
8275     * @param color Line color.
8276     */
8277    public static void line(Mat img, Point pt1, Point pt2, Scalar color) {
8278        line_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8279    }
8280
8281
8282    //
8283    // C++:  void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1)
8284    //
8285
8286    /**
8287     * Draws a arrow segment pointing from the first point to the second one.
8288     *
8289     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8290     *
8291     * @param img Image.
8292     * @param pt1 The point the arrow starts from.
8293     * @param pt2 The point the arrow points to.
8294     * @param color Line color.
8295     * @param thickness Line thickness.
8296     * @param line_type Type of the line. See #LineTypes
8297     * @param shift Number of fractional bits in the point coordinates.
8298     * @param tipLength The length of the arrow tip in relation to the arrow length
8299     */
8300    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift, double tipLength) {
8301        arrowedLine_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift, tipLength);
8302    }
8303
8304    /**
8305     * Draws a arrow segment pointing from the first point to the second one.
8306     *
8307     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8308     *
8309     * @param img Image.
8310     * @param pt1 The point the arrow starts from.
8311     * @param pt2 The point the arrow points to.
8312     * @param color Line color.
8313     * @param thickness Line thickness.
8314     * @param line_type Type of the line. See #LineTypes
8315     * @param shift Number of fractional bits in the point coordinates.
8316     */
8317    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift) {
8318        arrowedLine_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift);
8319    }
8320
8321    /**
8322     * Draws a arrow segment pointing from the first point to the second one.
8323     *
8324     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8325     *
8326     * @param img Image.
8327     * @param pt1 The point the arrow starts from.
8328     * @param pt2 The point the arrow points to.
8329     * @param color Line color.
8330     * @param thickness Line thickness.
8331     * @param line_type Type of the line. See #LineTypes
8332     */
8333    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type) {
8334        arrowedLine_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type);
8335    }
8336
8337    /**
8338     * Draws a arrow segment pointing from the first point to the second one.
8339     *
8340     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8341     *
8342     * @param img Image.
8343     * @param pt1 The point the arrow starts from.
8344     * @param pt2 The point the arrow points to.
8345     * @param color Line color.
8346     * @param thickness Line thickness.
8347     */
8348    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8349        arrowedLine_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8350    }
8351
8352    /**
8353     * Draws a arrow segment pointing from the first point to the second one.
8354     *
8355     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8356     *
8357     * @param img Image.
8358     * @param pt1 The point the arrow starts from.
8359     * @param pt2 The point the arrow points to.
8360     * @param color Line color.
8361     */
8362    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color) {
8363        arrowedLine_4(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8364    }
8365
8366
8367    //
8368    // C++:  void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8369    //
8370
8371    /**
8372     * Draws a simple, thick, or filled up-right rectangle.
8373     *
8374     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8375     * are pt1 and pt2.
8376     *
8377     * @param img Image.
8378     * @param pt1 Vertex of the rectangle.
8379     * @param pt2 Vertex of the rectangle opposite to pt1 .
8380     * @param color Rectangle color or brightness (grayscale image).
8381     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8382     * mean that the function has to draw a filled rectangle.
8383     * @param lineType Type of the line. See #LineTypes
8384     * @param shift Number of fractional bits in the point coordinates.
8385     */
8386    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) {
8387        rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8388    }
8389
8390    /**
8391     * Draws a simple, thick, or filled up-right rectangle.
8392     *
8393     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8394     * are pt1 and pt2.
8395     *
8396     * @param img Image.
8397     * @param pt1 Vertex of the rectangle.
8398     * @param pt2 Vertex of the rectangle opposite to pt1 .
8399     * @param color Rectangle color or brightness (grayscale image).
8400     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8401     * mean that the function has to draw a filled rectangle.
8402     * @param lineType Type of the line. See #LineTypes
8403     */
8404    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) {
8405        rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8406    }
8407
8408    /**
8409     * Draws a simple, thick, or filled up-right rectangle.
8410     *
8411     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8412     * are pt1 and pt2.
8413     *
8414     * @param img Image.
8415     * @param pt1 Vertex of the rectangle.
8416     * @param pt2 Vertex of the rectangle opposite to pt1 .
8417     * @param color Rectangle color or brightness (grayscale image).
8418     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8419     * mean that the function has to draw a filled rectangle.
8420     */
8421    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8422        rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8423    }
8424
8425    /**
8426     * Draws a simple, thick, or filled up-right rectangle.
8427     *
8428     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8429     * are pt1 and pt2.
8430     *
8431     * @param img Image.
8432     * @param pt1 Vertex of the rectangle.
8433     * @param pt2 Vertex of the rectangle opposite to pt1 .
8434     * @param color Rectangle color or brightness (grayscale image).
8435     * mean that the function has to draw a filled rectangle.
8436     */
8437    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) {
8438        rectangle_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8439    }
8440
8441
8442    //
8443    // C++:  void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8444    //
8445
8446    /**
8447     *
8448     *
8449     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8450     * r.br()-Point(1,1)` are opposite corners
8451     * @param img automatically generated
8452     * @param rec automatically generated
8453     * @param color automatically generated
8454     * @param thickness automatically generated
8455     * @param lineType automatically generated
8456     * @param shift automatically generated
8457     */
8458    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType, int shift) {
8459        rectangle_4(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8460    }
8461
8462    /**
8463     *
8464     *
8465     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8466     * r.br()-Point(1,1)` are opposite corners
8467     * @param img automatically generated
8468     * @param rec automatically generated
8469     * @param color automatically generated
8470     * @param thickness automatically generated
8471     * @param lineType automatically generated
8472     */
8473    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType) {
8474        rectangle_5(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8475    }
8476
8477    /**
8478     *
8479     *
8480     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8481     * r.br()-Point(1,1)` are opposite corners
8482     * @param img automatically generated
8483     * @param rec automatically generated
8484     * @param color automatically generated
8485     * @param thickness automatically generated
8486     */
8487    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness) {
8488        rectangle_6(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8489    }
8490
8491    /**
8492     *
8493     *
8494     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8495     * r.br()-Point(1,1)` are opposite corners
8496     * @param img automatically generated
8497     * @param rec automatically generated
8498     * @param color automatically generated
8499     */
8500    public static void rectangle(Mat img, Rect rec, Scalar color) {
8501        rectangle_7(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3]);
8502    }
8503
8504
8505    //
8506    // C++:  void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8507    //
8508
8509    /**
8510     * Draws a circle.
8511     *
8512     * The function cv::circle draws a simple or filled circle with a given center and radius.
8513     * @param img Image where the circle is drawn.
8514     * @param center Center of the circle.
8515     * @param radius Radius of the circle.
8516     * @param color Circle color.
8517     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8518     * mean that a filled circle is to be drawn.
8519     * @param lineType Type of the circle boundary. See #LineTypes
8520     * @param shift Number of fractional bits in the coordinates of the center and in the radius value.
8521     */
8522    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) {
8523        circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8524    }
8525
8526    /**
8527     * Draws a circle.
8528     *
8529     * The function cv::circle draws a simple or filled circle with a given center and radius.
8530     * @param img Image where the circle is drawn.
8531     * @param center Center of the circle.
8532     * @param radius Radius of the circle.
8533     * @param color Circle color.
8534     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8535     * mean that a filled circle is to be drawn.
8536     * @param lineType Type of the circle boundary. See #LineTypes
8537     */
8538    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType) {
8539        circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8540    }
8541
8542    /**
8543     * Draws a circle.
8544     *
8545     * The function cv::circle draws a simple or filled circle with a given center and radius.
8546     * @param img Image where the circle is drawn.
8547     * @param center Center of the circle.
8548     * @param radius Radius of the circle.
8549     * @param color Circle color.
8550     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8551     * mean that a filled circle is to be drawn.
8552     */
8553    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) {
8554        circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8555    }
8556
8557    /**
8558     * Draws a circle.
8559     *
8560     * The function cv::circle draws a simple or filled circle with a given center and radius.
8561     * @param img Image where the circle is drawn.
8562     * @param center Center of the circle.
8563     * @param radius Radius of the circle.
8564     * @param color Circle color.
8565     * mean that a filled circle is to be drawn.
8566     */
8567    public static void circle(Mat img, Point center, int radius, Scalar color) {
8568        circle_3(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]);
8569    }
8570
8571
8572    //
8573    // C++:  void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8574    //
8575
8576    /**
8577     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8578     *
8579     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8580     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8581     * A piecewise-linear curve is used to approximate the elliptic arc
8582     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8583     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8584     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8585     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8586     * the meaning of the parameters to draw the blue arc.
8587     *
8588     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8589     *
8590     * @param img Image.
8591     * @param center Center of the ellipse.
8592     * @param axes Half of the size of the ellipse main axes.
8593     * @param angle Ellipse rotation angle in degrees.
8594     * @param startAngle Starting angle of the elliptic arc in degrees.
8595     * @param endAngle Ending angle of the elliptic arc in degrees.
8596     * @param color Ellipse color.
8597     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8598     * a filled ellipse sector is to be drawn.
8599     * @param lineType Type of the ellipse boundary. See #LineTypes
8600     * @param shift Number of fractional bits in the coordinates of the center and values of axes.
8601     */
8602    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) {
8603        ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8604    }
8605
8606    /**
8607     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8608     *
8609     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8610     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8611     * A piecewise-linear curve is used to approximate the elliptic arc
8612     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8613     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8614     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8615     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8616     * the meaning of the parameters to draw the blue arc.
8617     *
8618     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8619     *
8620     * @param img Image.
8621     * @param center Center of the ellipse.
8622     * @param axes Half of the size of the ellipse main axes.
8623     * @param angle Ellipse rotation angle in degrees.
8624     * @param startAngle Starting angle of the elliptic arc in degrees.
8625     * @param endAngle Ending angle of the elliptic arc in degrees.
8626     * @param color Ellipse color.
8627     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8628     * a filled ellipse sector is to be drawn.
8629     * @param lineType Type of the ellipse boundary. See #LineTypes
8630     */
8631    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType) {
8632        ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8633    }
8634
8635    /**
8636     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8637     *
8638     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8639     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8640     * A piecewise-linear curve is used to approximate the elliptic arc
8641     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8642     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8643     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8644     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8645     * the meaning of the parameters to draw the blue arc.
8646     *
8647     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8648     *
8649     * @param img Image.
8650     * @param center Center of the ellipse.
8651     * @param axes Half of the size of the ellipse main axes.
8652     * @param angle Ellipse rotation angle in degrees.
8653     * @param startAngle Starting angle of the elliptic arc in degrees.
8654     * @param endAngle Ending angle of the elliptic arc in degrees.
8655     * @param color Ellipse color.
8656     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8657     * a filled ellipse sector is to be drawn.
8658     */
8659    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) {
8660        ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8661    }
8662
8663    /**
8664     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8665     *
8666     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8667     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8668     * A piecewise-linear curve is used to approximate the elliptic arc
8669     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8670     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8671     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8672     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8673     * the meaning of the parameters to draw the blue arc.
8674     *
8675     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8676     *
8677     * @param img Image.
8678     * @param center Center of the ellipse.
8679     * @param axes Half of the size of the ellipse main axes.
8680     * @param angle Ellipse rotation angle in degrees.
8681     * @param startAngle Starting angle of the elliptic arc in degrees.
8682     * @param endAngle Ending angle of the elliptic arc in degrees.
8683     * @param color Ellipse color.
8684     * a filled ellipse sector is to be drawn.
8685     */
8686    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) {
8687        ellipse_3(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]);
8688    }
8689
8690
8691    //
8692    // C++:  void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8)
8693    //
8694
8695    /**
8696     *
8697     * @param img Image.
8698     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8699     * an ellipse inscribed in the rotated rectangle.
8700     * @param color Ellipse color.
8701     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8702     * a filled ellipse sector is to be drawn.
8703     * @param lineType Type of the ellipse boundary. See #LineTypes
8704     */
8705    public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) {
8706        ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8707    }
8708
8709    /**
8710     *
8711     * @param img Image.
8712     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8713     * an ellipse inscribed in the rotated rectangle.
8714     * @param color Ellipse color.
8715     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8716     * a filled ellipse sector is to be drawn.
8717     */
8718    public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) {
8719        ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8720    }
8721
8722    /**
8723     *
8724     * @param img Image.
8725     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8726     * an ellipse inscribed in the rotated rectangle.
8727     * @param color Ellipse color.
8728     * a filled ellipse sector is to be drawn.
8729     */
8730    public static void ellipse(Mat img, RotatedRect box, Scalar color) {
8731        ellipse_6(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]);
8732    }
8733
8734
8735    //
8736    // C++:  void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8)
8737    //
8738
8739    /**
8740     * Draws a marker on a predefined position in an image.
8741     *
8742     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8743     * marker types are supported, see #MarkerTypes for more information.
8744     *
8745     * @param img Image.
8746     * @param position The point where the crosshair is positioned.
8747     * @param color Line color.
8748     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8749     * @param thickness Line thickness.
8750     * @param line_type Type of the line, See #LineTypes
8751     * @param markerSize The length of the marker axis [default = 20 pixels]
8752     */
8753    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness, int line_type) {
8754        drawMarker_0(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness, line_type);
8755    }
8756
8757    /**
8758     * Draws a marker on a predefined position in an image.
8759     *
8760     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8761     * marker types are supported, see #MarkerTypes for more information.
8762     *
8763     * @param img Image.
8764     * @param position The point where the crosshair is positioned.
8765     * @param color Line color.
8766     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8767     * @param thickness Line thickness.
8768     * @param markerSize The length of the marker axis [default = 20 pixels]
8769     */
8770    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness) {
8771        drawMarker_1(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness);
8772    }
8773
8774    /**
8775     * Draws a marker on a predefined position in an image.
8776     *
8777     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8778     * marker types are supported, see #MarkerTypes for more information.
8779     *
8780     * @param img Image.
8781     * @param position The point where the crosshair is positioned.
8782     * @param color Line color.
8783     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8784     * @param markerSize The length of the marker axis [default = 20 pixels]
8785     */
8786    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize) {
8787        drawMarker_2(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize);
8788    }
8789
8790    /**
8791     * Draws a marker on a predefined position in an image.
8792     *
8793     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8794     * marker types are supported, see #MarkerTypes for more information.
8795     *
8796     * @param img Image.
8797     * @param position The point where the crosshair is positioned.
8798     * @param color Line color.
8799     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8800     */
8801    public static void drawMarker(Mat img, Point position, Scalar color, int markerType) {
8802        drawMarker_3(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType);
8803    }
8804
8805    /**
8806     * Draws a marker on a predefined position in an image.
8807     *
8808     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8809     * marker types are supported, see #MarkerTypes for more information.
8810     *
8811     * @param img Image.
8812     * @param position The point where the crosshair is positioned.
8813     * @param color Line color.
8814     */
8815    public static void drawMarker(Mat img, Point position, Scalar color) {
8816        drawMarker_4(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8817    }
8818
8819
8820    //
8821    // C++:  void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0)
8822    //
8823
8824    /**
8825     * Fills a convex polygon.
8826     *
8827     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8828     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8829     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8830     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8831     *
8832     * @param img Image.
8833     * @param points Polygon vertices.
8834     * @param color Polygon color.
8835     * @param lineType Type of the polygon boundaries. See #LineTypes
8836     * @param shift Number of fractional bits in the vertex coordinates.
8837     */
8838    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) {
8839        Mat points_mat = points;
8840        fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift);
8841    }
8842
8843    /**
8844     * Fills a convex polygon.
8845     *
8846     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8847     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8848     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8849     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8850     *
8851     * @param img Image.
8852     * @param points Polygon vertices.
8853     * @param color Polygon color.
8854     * @param lineType Type of the polygon boundaries. See #LineTypes
8855     */
8856    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType) {
8857        Mat points_mat = points;
8858        fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType);
8859    }
8860
8861    /**
8862     * Fills a convex polygon.
8863     *
8864     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8865     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8866     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8867     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8868     *
8869     * @param img Image.
8870     * @param points Polygon vertices.
8871     * @param color Polygon color.
8872     */
8873    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) {
8874        Mat points_mat = points;
8875        fillConvexPoly_2(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]);
8876    }
8877
8878
8879    //
8880    // C++:  void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point())
8881    //
8882
8883    /**
8884     * Fills the area bounded by one or more polygons.
8885     *
8886     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
8887     * complex areas, for example, areas with holes, contours with self-intersections (some of their
8888     * parts), and so forth.
8889     *
8890     * @param img Image.
8891     * @param pts Array of polygons where each polygon is represented as an array of points.
8892     * @param color Polygon color.
8893     * @param lineType Type of the polygon boundaries. See #LineTypes
8894     * @param shift Number of fractional bits in the vertex coordinates.
8895     * @param offset Optional offset of all points of the contours.
8896     */
8897    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift, Point offset) {
8898        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8899        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8900        fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y);
8901    }
8902
8903    /**
8904     * Fills the area bounded by one or more polygons.
8905     *
8906     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
8907     * complex areas, for example, areas with holes, contours with self-intersections (some of their
8908     * parts), and so forth.
8909     *
8910     * @param img Image.
8911     * @param pts Array of polygons where each polygon is represented as an array of points.
8912     * @param color Polygon color.
8913     * @param lineType Type of the polygon boundaries. See #LineTypes
8914     * @param shift Number of fractional bits in the vertex coordinates.
8915     */
8916    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift) {
8917        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8918        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8919        fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift);
8920    }
8921
8922    /**
8923     * Fills the area bounded by one or more polygons.
8924     *
8925     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
8926     * complex areas, for example, areas with holes, contours with self-intersections (some of their
8927     * parts), and so forth.
8928     *
8929     * @param img Image.
8930     * @param pts Array of polygons where each polygon is represented as an array of points.
8931     * @param color Polygon color.
8932     * @param lineType Type of the polygon boundaries. See #LineTypes
8933     */
8934    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType) {
8935        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8936        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8937        fillPoly_2(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType);
8938    }
8939
8940    /**
8941     * Fills the area bounded by one or more polygons.
8942     *
8943     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
8944     * complex areas, for example, areas with holes, contours with self-intersections (some of their
8945     * parts), and so forth.
8946     *
8947     * @param img Image.
8948     * @param pts Array of polygons where each polygon is represented as an array of points.
8949     * @param color Polygon color.
8950     */
8951    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color) {
8952        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8953        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8954        fillPoly_3(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]);
8955    }
8956
8957
8958    //
8959    // C++:  void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8960    //
8961
8962    /**
8963     * Draws several polygonal curves.
8964     *
8965     * @param img Image.
8966     * @param pts Array of polygonal curves.
8967     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
8968     * the function draws a line from the last vertex of each curve to its first vertex.
8969     * @param color Polyline color.
8970     * @param thickness Thickness of the polyline edges.
8971     * @param lineType Type of the line segments. See #LineTypes
8972     * @param shift Number of fractional bits in the vertex coordinates.
8973     *
8974     * The function cv::polylines draws one or more polygonal curves.
8975     */
8976    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) {
8977        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8978        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8979        polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8980    }
8981
8982    /**
8983     * Draws several polygonal curves.
8984     *
8985     * @param img Image.
8986     * @param pts Array of polygonal curves.
8987     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
8988     * the function draws a line from the last vertex of each curve to its first vertex.
8989     * @param color Polyline color.
8990     * @param thickness Thickness of the polyline edges.
8991     * @param lineType Type of the line segments. See #LineTypes
8992     *
8993     * The function cv::polylines draws one or more polygonal curves.
8994     */
8995    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType) {
8996        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
8997        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
8998        polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8999    }
9000
9001    /**
9002     * Draws several polygonal curves.
9003     *
9004     * @param img Image.
9005     * @param pts Array of polygonal curves.
9006     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9007     * the function draws a line from the last vertex of each curve to its first vertex.
9008     * @param color Polyline color.
9009     * @param thickness Thickness of the polyline edges.
9010     *
9011     * The function cv::polylines draws one or more polygonal curves.
9012     */
9013    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness) {
9014        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9015        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9016        polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9017    }
9018
9019    /**
9020     * Draws several polygonal curves.
9021     *
9022     * @param img Image.
9023     * @param pts Array of polygonal curves.
9024     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9025     * the function draws a line from the last vertex of each curve to its first vertex.
9026     * @param color Polyline color.
9027     *
9028     * The function cv::polylines draws one or more polygonal curves.
9029     */
9030    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color) {
9031        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9032        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9033        polylines_3(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]);
9034    }
9035
9036
9037    //
9038    // C++:  void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
9039    //
9040
9041    /**
9042     * Draws contours outlines or filled contours.
9043     *
9044     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9045     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9046     * connected components from the binary image and label them: :
9047     * INCLUDE: snippets/imgproc_drawContours.cpp
9048     *
9049     * @param image Destination image.
9050     * @param contours All the input contours. Each contour is stored as a point vector.
9051     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9052     * @param color Color of the contours.
9053     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9054     * thickness=#FILLED ), the contour interiors are drawn.
9055     * @param lineType Line connectivity. See #LineTypes
9056     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9057     * some of the contours (see maxLevel ).
9058     * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
9059     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9060     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9061     * parameter is only taken into account when there is hierarchy available.
9062     * @param offset Optional contour shift parameter. Shift all the drawn contours by the specified
9063     * \(\texttt{offset}=(dx,dy)\) .
9064     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9065     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9066     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9067     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9068     * of contours, or iterate over the collection using contourIdx parameter.
9069     */
9070    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) {
9071        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9072        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9073        drawContours_0(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel, offset.x, offset.y);
9074    }
9075
9076    /**
9077     * Draws contours outlines or filled contours.
9078     *
9079     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9080     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9081     * connected components from the binary image and label them: :
9082     * INCLUDE: snippets/imgproc_drawContours.cpp
9083     *
9084     * @param image Destination image.
9085     * @param contours All the input contours. Each contour is stored as a point vector.
9086     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9087     * @param color Color of the contours.
9088     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9089     * thickness=#FILLED ), the contour interiors are drawn.
9090     * @param lineType Line connectivity. See #LineTypes
9091     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9092     * some of the contours (see maxLevel ).
9093     * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
9094     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9095     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9096     * parameter is only taken into account when there is hierarchy available.
9097     * \(\texttt{offset}=(dx,dy)\) .
9098     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9099     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9100     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9101     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9102     * of contours, or iterate over the collection using contourIdx parameter.
9103     */
9104    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel) {
9105        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9106        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9107        drawContours_1(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel);
9108    }
9109
9110    /**
9111     * Draws contours outlines or filled contours.
9112     *
9113     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9114     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9115     * connected components from the binary image and label them: :
9116     * INCLUDE: snippets/imgproc_drawContours.cpp
9117     *
9118     * @param image Destination image.
9119     * @param contours All the input contours. Each contour is stored as a point vector.
9120     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9121     * @param color Color of the contours.
9122     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9123     * thickness=#FILLED ), the contour interiors are drawn.
9124     * @param lineType Line connectivity. See #LineTypes
9125     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9126     * some of the contours (see maxLevel ).
9127     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9128     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9129     * parameter is only taken into account when there is hierarchy available.
9130     * \(\texttt{offset}=(dx,dy)\) .
9131     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9132     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9133     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9134     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9135     * of contours, or iterate over the collection using contourIdx parameter.
9136     */
9137    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy) {
9138        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9139        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9140        drawContours_2(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj);
9141    }
9142
9143    /**
9144     * Draws contours outlines or filled contours.
9145     *
9146     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9147     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9148     * connected components from the binary image and label them: :
9149     * INCLUDE: snippets/imgproc_drawContours.cpp
9150     *
9151     * @param image Destination image.
9152     * @param contours All the input contours. Each contour is stored as a point vector.
9153     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9154     * @param color Color of the contours.
9155     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9156     * thickness=#FILLED ), the contour interiors are drawn.
9157     * @param lineType Line connectivity. See #LineTypes
9158     * some of the contours (see maxLevel ).
9159     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9160     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9161     * parameter is only taken into account when there is hierarchy available.
9162     * \(\texttt{offset}=(dx,dy)\) .
9163     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9164     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9165     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9166     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9167     * of contours, or iterate over the collection using contourIdx parameter.
9168     */
9169    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType) {
9170        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9171        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9172        drawContours_3(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
9173    }
9174
9175    /**
9176     * Draws contours outlines or filled contours.
9177     *
9178     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9179     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9180     * connected components from the binary image and label them: :
9181     * INCLUDE: snippets/imgproc_drawContours.cpp
9182     *
9183     * @param image Destination image.
9184     * @param contours All the input contours. Each contour is stored as a point vector.
9185     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9186     * @param color Color of the contours.
9187     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9188     * thickness=#FILLED ), the contour interiors are drawn.
9189     * some of the contours (see maxLevel ).
9190     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9191     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9192     * parameter is only taken into account when there is hierarchy available.
9193     * \(\texttt{offset}=(dx,dy)\) .
9194     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9195     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9196     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9197     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9198     * of contours, or iterate over the collection using contourIdx parameter.
9199     */
9200    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness) {
9201        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9202        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9203        drawContours_4(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9204    }
9205
9206    /**
9207     * Draws contours outlines or filled contours.
9208     *
9209     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9210     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9211     * connected components from the binary image and label them: :
9212     * INCLUDE: snippets/imgproc_drawContours.cpp
9213     *
9214     * @param image Destination image.
9215     * @param contours All the input contours. Each contour is stored as a point vector.
9216     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9217     * @param color Color of the contours.
9218     * thickness=#FILLED ), the contour interiors are drawn.
9219     * some of the contours (see maxLevel ).
9220     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9221     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9222     * parameter is only taken into account when there is hierarchy available.
9223     * \(\texttt{offset}=(dx,dy)\) .
9224     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9225     * even when no hierarchy date is provided. This is done by analyzing all the outlines together
9226     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9227     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9228     * of contours, or iterate over the collection using contourIdx parameter.
9229     */
9230    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color) {
9231        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9232        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9233        drawContours_5(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3]);
9234    }
9235
9236
9237    //
9238    // C++:  bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2)
9239    //
9240
9241    /**
9242     *
9243     * @param imgRect Image rectangle.
9244     * @param pt1 First line point.
9245     * @param pt2 Second line point.
9246     * @return automatically generated
9247     */
9248    public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) {
9249        double[] pt1_out = new double[2];
9250        double[] pt2_out = new double[2];
9251        boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out);
9252        if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } 
9253        if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } 
9254        return retVal;
9255    }
9256
9257
9258    //
9259    // C++:  void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts)
9260    //
9261
9262    /**
9263     * Approximates an elliptic arc with a polyline.
9264     *
9265     * The function ellipse2Poly computes the vertices of a polyline that approximates the specified
9266     * elliptic arc. It is used by #ellipse. If {@code arcStart} is greater than {@code arcEnd}, they are swapped.
9267     *
9268     * @param center Center of the arc.
9269     * @param axes Half of the size of the ellipse main axes. See #ellipse for details.
9270     * @param angle Rotation angle of the ellipse in degrees. See #ellipse for details.
9271     * @param arcStart Starting angle of the elliptic arc in degrees.
9272     * @param arcEnd Ending angle of the elliptic arc in degrees.
9273     * @param delta Angle between the subsequent polyline vertices. It defines the approximation
9274     * accuracy.
9275     * @param pts Output vector of polyline vertices.
9276     */
9277    public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) {
9278        Mat pts_mat = pts;
9279        ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj);
9280    }
9281
9282
9283    //
9284    // C++:  void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false)
9285    //
9286
9287    /**
9288     * Draws a text string.
9289     *
9290     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9291     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9292     * example.
9293     *
9294     * @param img Image.
9295     * @param text Text string to be drawn.
9296     * @param org Bottom-left corner of the text string in the image.
9297     * @param fontFace Font type, see #HersheyFonts.
9298     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9299     * @param color Text color.
9300     * @param thickness Thickness of the lines used to draw a text.
9301     * @param lineType Line type. See #LineTypes
9302     * @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise,
9303     * it is at the top-left corner.
9304     */
9305    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) {
9306        putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin);
9307    }
9308
9309    /**
9310     * Draws a text string.
9311     *
9312     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9313     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9314     * example.
9315     *
9316     * @param img Image.
9317     * @param text Text string to be drawn.
9318     * @param org Bottom-left corner of the text string in the image.
9319     * @param fontFace Font type, see #HersheyFonts.
9320     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9321     * @param color Text color.
9322     * @param thickness Thickness of the lines used to draw a text.
9323     * @param lineType Line type. See #LineTypes
9324     * it is at the top-left corner.
9325     */
9326    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType) {
9327        putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
9328    }
9329
9330    /**
9331     * Draws a text string.
9332     *
9333     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9334     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9335     * example.
9336     *
9337     * @param img Image.
9338     * @param text Text string to be drawn.
9339     * @param org Bottom-left corner of the text string in the image.
9340     * @param fontFace Font type, see #HersheyFonts.
9341     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9342     * @param color Text color.
9343     * @param thickness Thickness of the lines used to draw a text.
9344     * it is at the top-left corner.
9345     */
9346    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) {
9347        putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9348    }
9349
9350    /**
9351     * Draws a text string.
9352     *
9353     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9354     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9355     * example.
9356     *
9357     * @param img Image.
9358     * @param text Text string to be drawn.
9359     * @param org Bottom-left corner of the text string in the image.
9360     * @param fontFace Font type, see #HersheyFonts.
9361     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9362     * @param color Text color.
9363     * it is at the top-left corner.
9364     */
9365    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) {
9366        putText_3(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]);
9367    }
9368
9369
9370    //
9371    // C++:  double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1)
9372    //
9373
9374    /**
9375     * Calculates the font-specific size to use to achieve a given height in pixels.
9376     *
9377     * @param fontFace Font to use, see cv::HersheyFonts.
9378     * @param pixelHeight Pixel height to compute the fontScale for
9379     * @param thickness Thickness of lines used to render the text.See putText for details.
9380     * @return The fontSize to use for cv::putText
9381     *
9382     * SEE: cv::putText
9383     */
9384    public static double getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness) {
9385        return getFontScaleFromHeight_0(fontFace, pixelHeight, thickness);
9386    }
9387
9388    /**
9389     * Calculates the font-specific size to use to achieve a given height in pixels.
9390     *
9391     * @param fontFace Font to use, see cv::HersheyFonts.
9392     * @param pixelHeight Pixel height to compute the fontScale for
9393     * @return The fontSize to use for cv::putText
9394     *
9395     * SEE: cv::putText
9396     */
9397    public static double getFontScaleFromHeight(int fontFace, int pixelHeight) {
9398        return getFontScaleFromHeight_1(fontFace, pixelHeight);
9399    }
9400
9401
9402    //
9403    // C++:  void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
9404    //
9405
9406    /**
9407     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9408     *
9409     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9410     *
9411     * SEE: HoughLines
9412     * @param image automatically generated
9413     * @param lines automatically generated
9414     * @param rho automatically generated
9415     * @param theta automatically generated
9416     * @param threshold automatically generated
9417     * @param srn automatically generated
9418     * @param stn automatically generated
9419     * @param min_theta automatically generated
9420     * @param max_theta automatically generated
9421     */
9422    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) {
9423        HoughLinesWithAccumulator_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta);
9424    }
9425
9426    /**
9427     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9428     *
9429     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9430     *
9431     * SEE: HoughLines
9432     * @param image automatically generated
9433     * @param lines automatically generated
9434     * @param rho automatically generated
9435     * @param theta automatically generated
9436     * @param threshold automatically generated
9437     * @param srn automatically generated
9438     * @param stn automatically generated
9439     * @param min_theta automatically generated
9440     */
9441    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) {
9442        HoughLinesWithAccumulator_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta);
9443    }
9444
9445    /**
9446     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9447     *
9448     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9449     *
9450     * SEE: HoughLines
9451     * @param image automatically generated
9452     * @param lines automatically generated
9453     * @param rho automatically generated
9454     * @param theta automatically generated
9455     * @param threshold automatically generated
9456     * @param srn automatically generated
9457     * @param stn automatically generated
9458     */
9459    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) {
9460        HoughLinesWithAccumulator_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn);
9461    }
9462
9463    /**
9464     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9465     *
9466     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9467     *
9468     * SEE: HoughLines
9469     * @param image automatically generated
9470     * @param lines automatically generated
9471     * @param rho automatically generated
9472     * @param theta automatically generated
9473     * @param threshold automatically generated
9474     * @param srn automatically generated
9475     */
9476    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn) {
9477        HoughLinesWithAccumulator_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn);
9478    }
9479
9480    /**
9481     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9482     *
9483     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9484     *
9485     * SEE: HoughLines
9486     * @param image automatically generated
9487     * @param lines automatically generated
9488     * @param rho automatically generated
9489     * @param theta automatically generated
9490     * @param threshold automatically generated
9491     */
9492    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold) {
9493        HoughLinesWithAccumulator_4(image.nativeObj, lines.nativeObj, rho, theta, threshold);
9494    }
9495
9496
9497
9498// C++: Size getTextSize(const String& text, int fontFace, double fontScale, int thickness, int* baseLine);
9499//javadoc:getTextSize(text, fontFace, fontScale, thickness, baseLine)
9500public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) {
9501    if(baseLine != null && baseLine.length != 1)
9502        throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'.");
9503    Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine));
9504    return retVal;
9505}
9506
9507
9508
9509
9510    // C++:  Ptr_LineSegmentDetector cv::createLineSegmentDetector(int _refine = LSD_REFINE_STD, double _scale = 0.8, double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5, double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024)
9511    private static native long createLineSegmentDetector_0(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps, double _density_th, int _n_bins);
9512    private static native long createLineSegmentDetector_1(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps, double _density_th);
9513    private static native long createLineSegmentDetector_2(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th, double _log_eps);
9514    private static native long createLineSegmentDetector_3(int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th);
9515    private static native long createLineSegmentDetector_4(int _refine, double _scale, double _sigma_scale, double _quant);
9516    private static native long createLineSegmentDetector_5(int _refine, double _scale, double _sigma_scale);
9517    private static native long createLineSegmentDetector_6(int _refine, double _scale);
9518    private static native long createLineSegmentDetector_7(int _refine);
9519    private static native long createLineSegmentDetector_8();
9520
9521    // C++:  Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
9522    private static native long getGaussianKernel_0(int ksize, double sigma, int ktype);
9523    private static native long getGaussianKernel_1(int ksize, double sigma);
9524
9525    // C++:  void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
9526    private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype);
9527    private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize);
9528    private static native void getDerivKernels_2(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize);
9529
9530    // C++:  Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
9531    private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype);
9532    private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi);
9533    private static native long getGaborKernel_2(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma);
9534
9535    // C++:  Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1))
9536    private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9537    private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height);
9538
9539    // C++:  void cv::medianBlur(Mat src, Mat& dst, int ksize)
9540    private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize);
9541
9542    // C++:  void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT)
9543    private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType);
9544    private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY);
9545    private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX);
9546
9547    // C++:  void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
9548    private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType);
9549    private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace);
9550
9551    // C++:  void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
9552    private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType);
9553    private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize);
9554    private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9555    private static native void boxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height);
9556
9557    // C++:  void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT)
9558    private static native void sqrBoxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType);
9559    private static native void sqrBoxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize);
9560    private static native void sqrBoxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9561    private static native void sqrBoxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height);
9562
9563    // C++:  void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
9564    private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType);
9565    private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9566    private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height);
9567
9568    // C++:  void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
9569    private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
9570    private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta);
9571    private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y);
9572    private static native void filter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj);
9573
9574    // C++:  void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
9575    private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
9576    private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta);
9577    private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y);
9578    private static native void sepFilter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj);
9579
9580    // C++:  void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9581    private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
9582    private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta);
9583    private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale);
9584    private static native void Sobel_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize);
9585    private static native void Sobel_4(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
9586
9587    // C++:  void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT)
9588    private static native void spatialGradient_0(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize, int borderType);
9589    private static native void spatialGradient_1(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize);
9590    private static native void spatialGradient_2(long src_nativeObj, long dx_nativeObj, long dy_nativeObj);
9591
9592    // C++:  void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9593    private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType);
9594    private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta);
9595    private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale);
9596    private static native void Scharr_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
9597
9598    // C++:  void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9599    private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType);
9600    private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta);
9601    private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale);
9602    private static native void Laplacian_3(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize);
9603    private static native void Laplacian_4(long src_nativeObj, long dst_nativeObj, int ddepth);
9604
9605    // C++:  void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
9606    private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient);
9607    private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize);
9608    private static native void Canny_2(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2);
9609
9610    // C++:  void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false)
9611    private static native void Canny_3(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2, boolean L2gradient);
9612    private static native void Canny_4(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2);
9613
9614    // C++:  void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
9615    private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
9616    private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
9617    private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize);
9618
9619    // C++:  void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
9620    private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType);
9621    private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k);
9622
9623    // C++:  void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
9624    private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
9625    private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
9626
9627    // C++:  void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT)
9628    private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType);
9629    private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize);
9630
9631    // C++:  void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria)
9632    private static native void cornerSubPix_0(long image_nativeObj, long corners_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon);
9633
9634    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
9635    private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k);
9636    private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector);
9637    private static native void goodFeaturesToTrack_2(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize);
9638    private static native void goodFeaturesToTrack_3(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj);
9639    private static native void goodFeaturesToTrack_4(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance);
9640
9641    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04)
9642    private static native void goodFeaturesToTrack_5(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k);
9643    private static native void goodFeaturesToTrack_6(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector);
9644    private static native void goodFeaturesToTrack_7(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize);
9645
9646    // C++:  void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04)
9647    private static native void goodFeaturesToTrackWithQuality_0(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k);
9648    private static native void goodFeaturesToTrackWithQuality_1(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector);
9649    private static native void goodFeaturesToTrackWithQuality_2(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize);
9650    private static native void goodFeaturesToTrackWithQuality_3(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize);
9651    private static native void goodFeaturesToTrackWithQuality_4(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj);
9652
9653    // C++:  void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
9654    private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta);
9655    private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta);
9656    private static native void HoughLines_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn);
9657    private static native void HoughLines_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn);
9658    private static native void HoughLines_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
9659
9660    // C++:  void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
9661    private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap);
9662    private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength);
9663    private static native void HoughLinesP_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
9664
9665    // C++:  void cv::HoughLinesPointSet(Mat _point, Mat& _lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step)
9666    private static native void HoughLinesPointSet_0(long _point_nativeObj, long _lines_nativeObj, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step);
9667
9668    // C++:  void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
9669    private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius);
9670    private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius);
9671    private static native void HoughCircles_2(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2);
9672    private static native void HoughCircles_3(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1);
9673    private static native void HoughCircles_4(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist);
9674
9675    // C++:  void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9676    private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9677    private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9678    private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9679    private static native void erode_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y);
9680    private static native void erode_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
9681
9682    // C++:  void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9683    private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9684    private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9685    private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9686    private static native void dilate_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y);
9687    private static native void dilate_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
9688
9689    // C++:  void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9690    private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9691    private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9692    private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9693    private static native void morphologyEx_3(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y);
9694    private static native void morphologyEx_4(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj);
9695
9696    // C++:  void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
9697    private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation);
9698    private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy);
9699    private static native void resize_2(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx);
9700    private static native void resize_3(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height);
9701
9702    // C++:  void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9703    private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9704    private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode);
9705    private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
9706    private static native void warpAffine_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
9707
9708    // C++:  void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9709    private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9710    private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode);
9711    private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
9712    private static native void warpPerspective_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
9713
9714    // C++:  void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9715    private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9716    private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode);
9717    private static native void remap_2(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation);
9718
9719    // C++:  void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
9720    private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation);
9721    private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type);
9722
9723    // C++:  Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale)
9724    private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale);
9725
9726    // C++:  void cv::invertAffineTransform(Mat M, Mat& iM)
9727    private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj);
9728
9729    // C++:  Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU)
9730    private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj, int solveMethod);
9731    private static native long getPerspectiveTransform_1(long src_nativeObj, long dst_nativeObj);
9732
9733    // C++:  Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst)
9734    private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj);
9735
9736    // C++:  void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
9737    private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType);
9738    private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj);
9739
9740    // C++:  void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags)
9741    private static native void logPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double M, int flags);
9742
9743    // C++:  void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags)
9744    private static native void linearPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double maxRadius, int flags);
9745
9746    // C++:  void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags)
9747    private static native void warpPolar_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double center_x, double center_y, double maxRadius, int flags);
9748
9749    // C++:  void cv::integral(Mat src, Mat& sum, int sdepth = -1)
9750    private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth);
9751    private static native void integral_1(long src_nativeObj, long sum_nativeObj);
9752
9753    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1)
9754    private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth, int sqdepth);
9755    private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth);
9756    private static native void integral2_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj);
9757
9758    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1)
9759    private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth, int sqdepth);
9760    private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth);
9761    private static native void integral3_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj);
9762
9763    // C++:  void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat())
9764    private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
9765    private static native void accumulate_1(long src_nativeObj, long dst_nativeObj);
9766
9767    // C++:  void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
9768    private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
9769    private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj);
9770
9771    // C++:  void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
9772    private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj);
9773    private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj);
9774
9775    // C++:  void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
9776    private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj);
9777    private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha);
9778
9779    // C++:  Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
9780    private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out);
9781    private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj);
9782    private static native double[] phaseCorrelate_2(long src1_nativeObj, long src2_nativeObj);
9783
9784    // C++:  void cv::createHanningWindow(Mat& dst, Size winSize, int type)
9785    private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type);
9786
9787    // C++:  double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type)
9788    private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type);
9789
9790    // C++:  void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
9791    private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C);
9792
9793    // C++:  void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
9794    private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
9795    private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
9796    private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj);
9797
9798    // C++:  void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
9799    private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
9800    private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
9801    private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj);
9802
9803    // C++:  void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
9804    private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate);
9805    private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj);
9806
9807    // C++:  void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
9808    private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale);
9809
9810    // C++:  double cv::compareHist(Mat H1, Mat H2, int method)
9811    private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method);
9812
9813    // C++:  void cv::equalizeHist(Mat src, Mat& dst)
9814    private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj);
9815
9816    // C++:  Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
9817    private static native long createCLAHE_0(double clipLimit, double tileGridSize_width, double tileGridSize_height);
9818    private static native long createCLAHE_1(double clipLimit);
9819    private static native long createCLAHE_2();
9820
9821    // C++:  float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat())
9822    private static native float EMD_0(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj, long flow_nativeObj);
9823    private static native float EMD_1(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj);
9824    private static native float EMD_3(long signature1_nativeObj, long signature2_nativeObj, int distType);
9825
9826    // C++:  void cv::watershed(Mat image, Mat& markers)
9827    private static native void watershed_0(long image_nativeObj, long markers_nativeObj);
9828
9829    // C++:  void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
9830    private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon);
9831    private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel);
9832    private static native void pyrMeanShiftFiltering_2(long src_nativeObj, long dst_nativeObj, double sp, double sr);
9833
9834    // C++:  void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
9835    private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode);
9836    private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount);
9837
9838    // C++:  void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
9839    private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType);
9840    private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize);
9841
9842    // C++:  void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F)
9843    private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize, int dstType);
9844    private static native void distanceTransform_1(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize);
9845
9846    // C++:  int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
9847    private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags);
9848    private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3);
9849    private static native int floodFill_2(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3);
9850    private static native int floodFill_3(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out);
9851    private static native int floodFill_4(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3);
9852
9853    // C++:  void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst)
9854    private static native void blendLinear_0(long src1_nativeObj, long src2_nativeObj, long weights1_nativeObj, long weights2_nativeObj, long dst_nativeObj);
9855
9856    // C++:  void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0)
9857    private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn);
9858    private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code);
9859
9860    // C++:  void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code)
9861    private static native void cvtColorTwoPlane_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int code);
9862
9863    // C++:  void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0)
9864    private static native void demosaicing_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn);
9865    private static native void demosaicing_1(long src_nativeObj, long dst_nativeObj, int code);
9866
9867    // C++:  Moments cv::moments(Mat array, bool binaryImage = false)
9868    private static native double[] moments_0(long array_nativeObj, boolean binaryImage);
9869    private static native double[] moments_1(long array_nativeObj);
9870
9871    // C++:  void cv::HuMoments(Moments m, Mat& hu)
9872    private static native void HuMoments_0(double m_m00, double m_m10, double m_m01, double m_m20, double m_m11, double m_m02, double m_m30, double m_m21, double m_m12, double m_m03, long hu_nativeObj);
9873
9874    // C++:  void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat())
9875    private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method, long mask_nativeObj);
9876    private static native void matchTemplate_1(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method);
9877
9878    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype)
9879    private static native int connectedComponentsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype, int ccltype);
9880
9881    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S)
9882    private static native int connectedComponents_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype);
9883    private static native int connectedComponents_1(long image_nativeObj, long labels_nativeObj, int connectivity);
9884    private static native int connectedComponents_2(long image_nativeObj, long labels_nativeObj);
9885
9886    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype)
9887    private static native int connectedComponentsWithStatsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype, int ccltype);
9888
9889    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S)
9890    private static native int connectedComponentsWithStats_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype);
9891    private static native int connectedComponentsWithStats_1(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity);
9892    private static native int connectedComponentsWithStats_2(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj);
9893
9894    // C++:  void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point())
9895    private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y);
9896    private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method);
9897
9898    // C++:  void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
9899    private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed);
9900
9901    // C++:  double cv::arcLength(vector_Point2f curve, bool closed)
9902    private static native double arcLength_0(long curve_mat_nativeObj, boolean closed);
9903
9904    // C++:  Rect cv::boundingRect(Mat array)
9905    private static native double[] boundingRect_0(long array_nativeObj);
9906
9907    // C++:  double cv::contourArea(Mat contour, bool oriented = false)
9908    private static native double contourArea_0(long contour_nativeObj, boolean oriented);
9909    private static native double contourArea_1(long contour_nativeObj);
9910
9911    // C++:  RotatedRect cv::minAreaRect(vector_Point2f points)
9912    private static native double[] minAreaRect_0(long points_mat_nativeObj);
9913
9914    // C++:  void cv::boxPoints(RotatedRect box, Mat& points)
9915    private static native void boxPoints_0(double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, long points_nativeObj);
9916
9917    // C++:  void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
9918    private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out);
9919
9920    // C++:  double cv::minEnclosingTriangle(Mat points, Mat& triangle)
9921    private static native double minEnclosingTriangle_0(long points_nativeObj, long triangle_nativeObj);
9922
9923    // C++:  double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter)
9924    private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter);
9925
9926    // C++:  void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false,  _hidden_  returnPoints = true)
9927    private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise);
9928    private static native void convexHull_2(long points_mat_nativeObj, long hull_mat_nativeObj);
9929
9930    // C++:  void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
9931    private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj);
9932
9933    // C++:  bool cv::isContourConvex(vector_Point contour)
9934    private static native boolean isContourConvex_0(long contour_mat_nativeObj);
9935
9936    // C++:  float cv::intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true)
9937    private static native float intersectConvexConvex_0(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj, boolean handleNested);
9938    private static native float intersectConvexConvex_1(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj);
9939
9940    // C++:  RotatedRect cv::fitEllipse(vector_Point2f points)
9941    private static native double[] fitEllipse_0(long points_mat_nativeObj);
9942
9943    // C++:  RotatedRect cv::fitEllipseAMS(Mat points)
9944    private static native double[] fitEllipseAMS_0(long points_nativeObj);
9945
9946    // C++:  RotatedRect cv::fitEllipseDirect(Mat points)
9947    private static native double[] fitEllipseDirect_0(long points_nativeObj);
9948
9949    // C++:  void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps)
9950    private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps);
9951
9952    // C++:  double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
9953    private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist);
9954
9955    // C++:  int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion)
9956    private static native int rotatedRectangleIntersection_0(double rect1_center_x, double rect1_center_y, double rect1_size_width, double rect1_size_height, double rect1_angle, double rect2_center_x, double rect2_center_y, double rect2_size_width, double rect2_size_height, double rect2_angle, long intersectingRegion_nativeObj);
9957
9958    // C++:  Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard()
9959    private static native long createGeneralizedHoughBallard_0();
9960
9961    // C++:  Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil()
9962    private static native long createGeneralizedHoughGuil_0();
9963
9964    // C++:  void cv::applyColorMap(Mat src, Mat& dst, int colormap)
9965    private static native void applyColorMap_0(long src_nativeObj, long dst_nativeObj, int colormap);
9966
9967    // C++:  void cv::applyColorMap(Mat src, Mat& dst, Mat userColor)
9968    private static native void applyColorMap_1(long src_nativeObj, long dst_nativeObj, long userColor_nativeObj);
9969
9970    // C++:  void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
9971    private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
9972    private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
9973    private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
9974    private static native void line_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
9975
9976    // C++:  void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1)
9977    private static native void arrowedLine_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift, double tipLength);
9978    private static native void arrowedLine_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift);
9979    private static native void arrowedLine_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type);
9980    private static native void arrowedLine_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
9981    private static native void arrowedLine_4(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
9982
9983    // C++:  void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
9984    private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
9985    private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
9986    private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
9987    private static native void rectangle_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
9988
9989    // C++:  void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
9990    private static native void rectangle_4(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
9991    private static native void rectangle_5(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
9992    private static native void rectangle_6(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
9993    private static native void rectangle_7(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3);
9994
9995    // C++:  void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
9996    private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
9997    private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
9998    private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
9999    private static native void circle_3(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3);
10000
10001    // C++:  void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10002    private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10003    private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10004    private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10005    private static native void ellipse_3(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3);
10006
10007    // C++:  void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8)
10008    private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10009    private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10010    private static native void ellipse_6(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3);
10011
10012    // C++:  void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8)
10013    private static native void drawMarker_0(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness, int line_type);
10014    private static native void drawMarker_1(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness);
10015    private static native void drawMarker_2(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize);
10016    private static native void drawMarker_3(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType);
10017    private static native void drawMarker_4(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3);
10018
10019    // C++:  void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0)
10020    private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift);
10021    private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType);
10022    private static native void fillConvexPoly_2(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3);
10023
10024    // C++:  void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point())
10025    private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y);
10026    private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift);
10027    private static native void fillPoly_2(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType);
10028    private static native void fillPoly_3(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3);
10029
10030    // C++:  void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10031    private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10032    private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10033    private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10034    private static native void polylines_3(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3);
10035
10036    // C++:  void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
10037    private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y);
10038    private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel);
10039    private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj);
10040    private static native void drawContours_3(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10041    private static native void drawContours_4(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10042    private static native void drawContours_5(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3);
10043
10044    // C++:  bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2)
10045    private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out);
10046
10047    // C++:  void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts)
10048    private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj);
10049
10050    // C++:  void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false)
10051    private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin);
10052    private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10053    private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10054    private static native void putText_3(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3);
10055
10056    // C++:  double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1)
10057    private static native double getFontScaleFromHeight_0(int fontFace, int pixelHeight, int thickness);
10058    private static native double getFontScaleFromHeight_1(int fontFace, int pixelHeight);
10059
10060    // C++:  void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
10061    private static native void HoughLinesWithAccumulator_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta);
10062    private static native void HoughLinesWithAccumulator_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta);
10063    private static native void HoughLinesWithAccumulator_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn);
10064    private static native void HoughLinesWithAccumulator_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn);
10065    private static native void HoughLinesWithAccumulator_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
10066private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine);
10067
10068}