001// 002// This file is auto-generated. Please don't modify it! 003// 004package org.opencv.photo; 005 006import java.util.ArrayList; 007import java.util.List; 008import org.opencv.core.Mat; 009import org.opencv.core.MatOfFloat; 010import org.opencv.core.Point; 011import org.opencv.photo.AlignMTB; 012import org.opencv.photo.CalibrateDebevec; 013import org.opencv.photo.CalibrateRobertson; 014import org.opencv.photo.MergeDebevec; 015import org.opencv.photo.MergeMertens; 016import org.opencv.photo.MergeRobertson; 017import org.opencv.photo.Tonemap; 018import org.opencv.photo.TonemapDrago; 019import org.opencv.photo.TonemapMantiuk; 020import org.opencv.photo.TonemapReinhard; 021import org.opencv.utils.Converters; 022 023// C++: class Photo 024 025public class Photo { 026 027 // C++: enum <unnamed> 028 public static final int 029 INPAINT_NS = 0, 030 INPAINT_TELEA = 1, 031 LDR_SIZE = 256, 032 NORMAL_CLONE = 1, 033 MIXED_CLONE = 2, 034 MONOCHROME_TRANSFER = 3, 035 RECURS_FILTER = 1, 036 NORMCONV_FILTER = 2; 037 038 039 // 040 // C++: void cv::inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) 041 // 042 043 /** 044 * Restores the selected region in an image using the region neighborhood. 045 * 046 * @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image. 047 * @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that 048 * needs to be inpainted. 049 * @param dst Output image with the same size and type as src . 050 * @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered 051 * by the algorithm. 052 * @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA 053 * 054 * The function reconstructs the selected image area from the pixel near the area boundary. The 055 * function may be used to remove dust and scratches from a scanned photo, or to remove undesirable 056 * objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details. 057 * 058 * <b>Note:</b> 059 * <ul> 060 * <li> 061 * An example using the inpainting technique can be found at 062 * opencv_source_code/samples/cpp/inpaint.cpp 063 * </li> 064 * <li> 065 * (Python) An example using the inpainting technique can be found at 066 * opencv_source_code/samples/python/inpaint.py 067 * </li> 068 * </ul> 069 */ 070 public static void inpaint(Mat src, Mat inpaintMask, Mat dst, double inpaintRadius, int flags) { 071 inpaint_0(src.nativeObj, inpaintMask.nativeObj, dst.nativeObj, inpaintRadius, flags); 072 } 073 074 075 // 076 // C++: void cv::fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) 077 // 078 079 /** 080 * Perform image denoising using Non-local Means Denoising algorithm 081 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 082 * optimizations. Noise expected to be a gaussian white noise 083 * 084 * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. 085 * @param dst Output image with the same size and type as src . 086 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 087 * Should be odd. Recommended value 7 pixels 088 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 089 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 090 * denoising time. Recommended value 21 pixels 091 * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also 092 * removes image details, smaller h value preserves details but also preserves some noise 093 * 094 * This function expected to be applied to grayscale images. For colored images look at 095 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 096 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 097 * image to CIELAB colorspace and then separately denoise L and AB components with different h 098 * parameter. 099 */ 100 public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize) { 101 fastNlMeansDenoising_0(src.nativeObj, dst.nativeObj, h, templateWindowSize, searchWindowSize); 102 } 103 104 /** 105 * Perform image denoising using Non-local Means Denoising algorithm 106 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 107 * optimizations. Noise expected to be a gaussian white noise 108 * 109 * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. 110 * @param dst Output image with the same size and type as src . 111 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 112 * Should be odd. Recommended value 7 pixels 113 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 114 * denoising time. Recommended value 21 pixels 115 * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also 116 * removes image details, smaller h value preserves details but also preserves some noise 117 * 118 * This function expected to be applied to grayscale images. For colored images look at 119 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 120 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 121 * image to CIELAB colorspace and then separately denoise L and AB components with different h 122 * parameter. 123 */ 124 public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize) { 125 fastNlMeansDenoising_1(src.nativeObj, dst.nativeObj, h, templateWindowSize); 126 } 127 128 /** 129 * Perform image denoising using Non-local Means Denoising algorithm 130 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 131 * optimizations. Noise expected to be a gaussian white noise 132 * 133 * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. 134 * @param dst Output image with the same size and type as src . 135 * Should be odd. Recommended value 7 pixels 136 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 137 * denoising time. Recommended value 21 pixels 138 * @param h Parameter regulating filter strength. Big h value perfectly removes noise but also 139 * removes image details, smaller h value preserves details but also preserves some noise 140 * 141 * This function expected to be applied to grayscale images. For colored images look at 142 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 143 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 144 * image to CIELAB colorspace and then separately denoise L and AB components with different h 145 * parameter. 146 */ 147 public static void fastNlMeansDenoising(Mat src, Mat dst, float h) { 148 fastNlMeansDenoising_2(src.nativeObj, dst.nativeObj, h); 149 } 150 151 /** 152 * Perform image denoising using Non-local Means Denoising algorithm 153 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 154 * optimizations. Noise expected to be a gaussian white noise 155 * 156 * @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. 157 * @param dst Output image with the same size and type as src . 158 * Should be odd. Recommended value 7 pixels 159 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 160 * denoising time. Recommended value 21 pixels 161 * removes image details, smaller h value preserves details but also preserves some noise 162 * 163 * This function expected to be applied to grayscale images. For colored images look at 164 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 165 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 166 * image to CIELAB colorspace and then separately denoise L and AB components with different h 167 * parameter. 168 */ 169 public static void fastNlMeansDenoising(Mat src, Mat dst) { 170 fastNlMeansDenoising_3(src.nativeObj, dst.nativeObj); 171 } 172 173 174 // 175 // C++: void cv::fastNlMeansDenoising(Mat src, Mat& dst, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2) 176 // 177 178 /** 179 * Perform image denoising using Non-local Means Denoising algorithm 180 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 181 * optimizations. Noise expected to be a gaussian white noise 182 * 183 * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 184 * 2-channel, 3-channel or 4-channel image. 185 * @param dst Output image with the same size and type as src . 186 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 187 * Should be odd. Recommended value 7 pixels 188 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 189 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 190 * denoising time. Recommended value 21 pixels 191 * @param h Array of parameters regulating filter strength, either one 192 * parameter applied to all channels or one per channel in dst. Big h value 193 * perfectly removes noise but also removes image details, smaller h 194 * value preserves details but also preserves some noise 195 * @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 196 * 197 * This function expected to be applied to grayscale images. For colored images look at 198 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 199 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 200 * image to CIELAB colorspace and then separately denoise L and AB components with different h 201 * parameter. 202 */ 203 public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize, int searchWindowSize, int normType) { 204 Mat h_mat = h; 205 fastNlMeansDenoising_4(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize, searchWindowSize, normType); 206 } 207 208 /** 209 * Perform image denoising using Non-local Means Denoising algorithm 210 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 211 * optimizations. Noise expected to be a gaussian white noise 212 * 213 * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 214 * 2-channel, 3-channel or 4-channel image. 215 * @param dst Output image with the same size and type as src . 216 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 217 * Should be odd. Recommended value 7 pixels 218 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 219 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 220 * denoising time. Recommended value 21 pixels 221 * @param h Array of parameters regulating filter strength, either one 222 * parameter applied to all channels or one per channel in dst. Big h value 223 * perfectly removes noise but also removes image details, smaller h 224 * value preserves details but also preserves some noise 225 * 226 * This function expected to be applied to grayscale images. For colored images look at 227 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 228 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 229 * image to CIELAB colorspace and then separately denoise L and AB components with different h 230 * parameter. 231 */ 232 public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize, int searchWindowSize) { 233 Mat h_mat = h; 234 fastNlMeansDenoising_5(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize, searchWindowSize); 235 } 236 237 /** 238 * Perform image denoising using Non-local Means Denoising algorithm 239 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 240 * optimizations. Noise expected to be a gaussian white noise 241 * 242 * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 243 * 2-channel, 3-channel or 4-channel image. 244 * @param dst Output image with the same size and type as src . 245 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 246 * Should be odd. Recommended value 7 pixels 247 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 248 * denoising time. Recommended value 21 pixels 249 * @param h Array of parameters regulating filter strength, either one 250 * parameter applied to all channels or one per channel in dst. Big h value 251 * perfectly removes noise but also removes image details, smaller h 252 * value preserves details but also preserves some noise 253 * 254 * This function expected to be applied to grayscale images. For colored images look at 255 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 256 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 257 * image to CIELAB colorspace and then separately denoise L and AB components with different h 258 * parameter. 259 */ 260 public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h, int templateWindowSize) { 261 Mat h_mat = h; 262 fastNlMeansDenoising_6(src.nativeObj, dst.nativeObj, h_mat.nativeObj, templateWindowSize); 263 } 264 265 /** 266 * Perform image denoising using Non-local Means Denoising algorithm 267 * <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational 268 * optimizations. Noise expected to be a gaussian white noise 269 * 270 * @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 271 * 2-channel, 3-channel or 4-channel image. 272 * @param dst Output image with the same size and type as src . 273 * Should be odd. Recommended value 7 pixels 274 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 275 * denoising time. Recommended value 21 pixels 276 * @param h Array of parameters regulating filter strength, either one 277 * parameter applied to all channels or one per channel in dst. Big h value 278 * perfectly removes noise but also removes image details, smaller h 279 * value preserves details but also preserves some noise 280 * 281 * This function expected to be applied to grayscale images. For colored images look at 282 * fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored 283 * image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting 284 * image to CIELAB colorspace and then separately denoise L and AB components with different h 285 * parameter. 286 */ 287 public static void fastNlMeansDenoising(Mat src, Mat dst, MatOfFloat h) { 288 Mat h_mat = h; 289 fastNlMeansDenoising_7(src.nativeObj, dst.nativeObj, h_mat.nativeObj); 290 } 291 292 293 // 294 // C++: void cv::fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) 295 // 296 297 /** 298 * Modification of fastNlMeansDenoising function for colored images 299 * 300 * @param src Input 8-bit 3-channel image. 301 * @param dst Output image with the same size and type as src . 302 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 303 * Should be odd. Recommended value 7 pixels 304 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 305 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 306 * denoising time. Recommended value 21 pixels 307 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 308 * removes noise but also removes image details, smaller h value preserves details but also preserves 309 * some noise 310 * @param hColor The same as h but for color components. For most images value equals 10 311 * will be enough to remove colored noise and do not distort colors 312 * 313 * The function converts image to CIELAB colorspace and then separately denoise L and AB components 314 * with given h parameters using fastNlMeansDenoising function. 315 */ 316 public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize) { 317 fastNlMeansDenoisingColored_0(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize, searchWindowSize); 318 } 319 320 /** 321 * Modification of fastNlMeansDenoising function for colored images 322 * 323 * @param src Input 8-bit 3-channel image. 324 * @param dst Output image with the same size and type as src . 325 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 326 * Should be odd. Recommended value 7 pixels 327 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 328 * denoising time. Recommended value 21 pixels 329 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 330 * removes noise but also removes image details, smaller h value preserves details but also preserves 331 * some noise 332 * @param hColor The same as h but for color components. For most images value equals 10 333 * will be enough to remove colored noise and do not distort colors 334 * 335 * The function converts image to CIELAB colorspace and then separately denoise L and AB components 336 * with given h parameters using fastNlMeansDenoising function. 337 */ 338 public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize) { 339 fastNlMeansDenoisingColored_1(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize); 340 } 341 342 /** 343 * Modification of fastNlMeansDenoising function for colored images 344 * 345 * @param src Input 8-bit 3-channel image. 346 * @param dst Output image with the same size and type as src . 347 * Should be odd. Recommended value 7 pixels 348 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 349 * denoising time. Recommended value 21 pixels 350 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 351 * removes noise but also removes image details, smaller h value preserves details but also preserves 352 * some noise 353 * @param hColor The same as h but for color components. For most images value equals 10 354 * will be enough to remove colored noise and do not distort colors 355 * 356 * The function converts image to CIELAB colorspace and then separately denoise L and AB components 357 * with given h parameters using fastNlMeansDenoising function. 358 */ 359 public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor) { 360 fastNlMeansDenoisingColored_2(src.nativeObj, dst.nativeObj, h, hColor); 361 } 362 363 /** 364 * Modification of fastNlMeansDenoising function for colored images 365 * 366 * @param src Input 8-bit 3-channel image. 367 * @param dst Output image with the same size and type as src . 368 * Should be odd. Recommended value 7 pixels 369 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 370 * denoising time. Recommended value 21 pixels 371 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 372 * removes noise but also removes image details, smaller h value preserves details but also preserves 373 * some noise 374 * will be enough to remove colored noise and do not distort colors 375 * 376 * The function converts image to CIELAB colorspace and then separately denoise L and AB components 377 * with given h parameters using fastNlMeansDenoising function. 378 */ 379 public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h) { 380 fastNlMeansDenoisingColored_3(src.nativeObj, dst.nativeObj, h); 381 } 382 383 /** 384 * Modification of fastNlMeansDenoising function for colored images 385 * 386 * @param src Input 8-bit 3-channel image. 387 * @param dst Output image with the same size and type as src . 388 * Should be odd. Recommended value 7 pixels 389 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 390 * denoising time. Recommended value 21 pixels 391 * removes noise but also removes image details, smaller h value preserves details but also preserves 392 * some noise 393 * will be enough to remove colored noise and do not distort colors 394 * 395 * The function converts image to CIELAB colorspace and then separately denoise L and AB components 396 * with given h parameters using fastNlMeansDenoising function. 397 */ 398 public static void fastNlMeansDenoisingColored(Mat src, Mat dst) { 399 fastNlMeansDenoisingColored_4(src.nativeObj, dst.nativeObj); 400 } 401 402 403 // 404 // C++: void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) 405 // 406 407 /** 408 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 409 * captured in small period of time. For example video. This version of the function is for grayscale 410 * images or for manual manipulation with colorspaces. For more details see 411 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 412 * 413 * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 414 * 4-channel images sequence. All images should have the same type and 415 * size. 416 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 417 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 418 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 419 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 420 * srcImgs[imgToDenoiseIndex] image. 421 * @param dst Output image with the same size and type as srcImgs images. 422 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 423 * Should be odd. Recommended value 7 pixels 424 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 425 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 426 * denoising time. Recommended value 21 pixels 427 * @param h Parameter regulating filter strength. Bigger h value 428 * perfectly removes noise but also removes image details, smaller h 429 * value preserves details but also preserves some noise 430 */ 431 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize) { 432 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 433 fastNlMeansDenoisingMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize, searchWindowSize); 434 } 435 436 /** 437 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 438 * captured in small period of time. For example video. This version of the function is for grayscale 439 * images or for manual manipulation with colorspaces. For more details see 440 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 441 * 442 * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 443 * 4-channel images sequence. All images should have the same type and 444 * size. 445 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 446 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 447 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 448 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 449 * srcImgs[imgToDenoiseIndex] image. 450 * @param dst Output image with the same size and type as srcImgs images. 451 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 452 * Should be odd. Recommended value 7 pixels 453 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 454 * denoising time. Recommended value 21 pixels 455 * @param h Parameter regulating filter strength. Bigger h value 456 * perfectly removes noise but also removes image details, smaller h 457 * value preserves details but also preserves some noise 458 */ 459 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize) { 460 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 461 fastNlMeansDenoisingMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize); 462 } 463 464 /** 465 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 466 * captured in small period of time. For example video. This version of the function is for grayscale 467 * images or for manual manipulation with colorspaces. For more details see 468 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 469 * 470 * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 471 * 4-channel images sequence. All images should have the same type and 472 * size. 473 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 474 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 475 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 476 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 477 * srcImgs[imgToDenoiseIndex] image. 478 * @param dst Output image with the same size and type as srcImgs images. 479 * Should be odd. Recommended value 7 pixels 480 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 481 * denoising time. Recommended value 21 pixels 482 * @param h Parameter regulating filter strength. Bigger h value 483 * perfectly removes noise but also removes image details, smaller h 484 * value preserves details but also preserves some noise 485 */ 486 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h) { 487 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 488 fastNlMeansDenoisingMulti_2(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h); 489 } 490 491 /** 492 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 493 * captured in small period of time. For example video. This version of the function is for grayscale 494 * images or for manual manipulation with colorspaces. For more details see 495 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 496 * 497 * @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or 498 * 4-channel images sequence. All images should have the same type and 499 * size. 500 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 501 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 502 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 503 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 504 * srcImgs[imgToDenoiseIndex] image. 505 * @param dst Output image with the same size and type as srcImgs images. 506 * Should be odd. Recommended value 7 pixels 507 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 508 * denoising time. Recommended value 21 pixels 509 * perfectly removes noise but also removes image details, smaller h 510 * value preserves details but also preserves some noise 511 */ 512 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) { 513 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 514 fastNlMeansDenoisingMulti_3(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); 515 } 516 517 518 // 519 // C++: void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2) 520 // 521 522 /** 523 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 524 * captured in small period of time. For example video. This version of the function is for grayscale 525 * images or for manual manipulation with colorspaces. For more details see 526 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 527 * 528 * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 529 * 2-channel, 3-channel or 4-channel images sequence. All images should 530 * have the same type and size. 531 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 532 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 533 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 534 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 535 * srcImgs[imgToDenoiseIndex] image. 536 * @param dst Output image with the same size and type as srcImgs images. 537 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 538 * Should be odd. Recommended value 7 pixels 539 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 540 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 541 * denoising time. Recommended value 21 pixels 542 * @param h Array of parameters regulating filter strength, either one 543 * parameter applied to all channels or one per channel in dst. Big h value 544 * perfectly removes noise but also removes image details, smaller h 545 * value preserves details but also preserves some noise 546 * @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 547 */ 548 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize, int searchWindowSize, int normType) { 549 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 550 Mat h_mat = h; 551 fastNlMeansDenoisingMulti_4(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize, searchWindowSize, normType); 552 } 553 554 /** 555 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 556 * captured in small period of time. For example video. This version of the function is for grayscale 557 * images or for manual manipulation with colorspaces. For more details see 558 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 559 * 560 * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 561 * 2-channel, 3-channel or 4-channel images sequence. All images should 562 * have the same type and size. 563 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 564 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 565 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 566 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 567 * srcImgs[imgToDenoiseIndex] image. 568 * @param dst Output image with the same size and type as srcImgs images. 569 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 570 * Should be odd. Recommended value 7 pixels 571 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 572 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 573 * denoising time. Recommended value 21 pixels 574 * @param h Array of parameters regulating filter strength, either one 575 * parameter applied to all channels or one per channel in dst. Big h value 576 * perfectly removes noise but also removes image details, smaller h 577 * value preserves details but also preserves some noise 578 */ 579 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize, int searchWindowSize) { 580 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 581 Mat h_mat = h; 582 fastNlMeansDenoisingMulti_5(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize, searchWindowSize); 583 } 584 585 /** 586 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 587 * captured in small period of time. For example video. This version of the function is for grayscale 588 * images or for manual manipulation with colorspaces. For more details see 589 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 590 * 591 * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 592 * 2-channel, 3-channel or 4-channel images sequence. All images should 593 * have the same type and size. 594 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 595 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 596 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 597 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 598 * srcImgs[imgToDenoiseIndex] image. 599 * @param dst Output image with the same size and type as srcImgs images. 600 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 601 * Should be odd. Recommended value 7 pixels 602 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 603 * denoising time. Recommended value 21 pixels 604 * @param h Array of parameters regulating filter strength, either one 605 * parameter applied to all channels or one per channel in dst. Big h value 606 * perfectly removes noise but also removes image details, smaller h 607 * value preserves details but also preserves some noise 608 */ 609 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h, int templateWindowSize) { 610 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 611 Mat h_mat = h; 612 fastNlMeansDenoisingMulti_6(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj, templateWindowSize); 613 } 614 615 /** 616 * Modification of fastNlMeansDenoising function for images sequence where consecutive images have been 617 * captured in small period of time. For example video. This version of the function is for grayscale 618 * images or for manual manipulation with colorspaces. For more details see 619 * <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394> 620 * 621 * @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, 622 * 2-channel, 3-channel or 4-channel images sequence. All images should 623 * have the same type and size. 624 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 625 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 626 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 627 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 628 * srcImgs[imgToDenoiseIndex] image. 629 * @param dst Output image with the same size and type as srcImgs images. 630 * Should be odd. Recommended value 7 pixels 631 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 632 * denoising time. Recommended value 21 pixels 633 * @param h Array of parameters regulating filter strength, either one 634 * parameter applied to all channels or one per channel in dst. Big h value 635 * perfectly removes noise but also removes image details, smaller h 636 * value preserves details but also preserves some noise 637 */ 638 public static void fastNlMeansDenoisingMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, MatOfFloat h) { 639 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 640 Mat h_mat = h; 641 fastNlMeansDenoisingMulti_7(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h_mat.nativeObj); 642 } 643 644 645 // 646 // C++: void cv::fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) 647 // 648 649 /** 650 * Modification of fastNlMeansDenoisingMulti function for colored images sequences 651 * 652 * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 653 * size. 654 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 655 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 656 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 657 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 658 * srcImgs[imgToDenoiseIndex] image. 659 * @param dst Output image with the same size and type as srcImgs images. 660 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 661 * Should be odd. Recommended value 7 pixels 662 * @param searchWindowSize Size in pixels of the window that is used to compute weighted average for 663 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 664 * denoising time. Recommended value 21 pixels 665 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 666 * removes noise but also removes image details, smaller h value preserves details but also preserves 667 * some noise. 668 * @param hColor The same as h but for color components. 669 * 670 * The function converts images to CIELAB colorspace and then separately denoise L and AB components 671 * with given h parameters using fastNlMeansDenoisingMulti function. 672 */ 673 public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize) { 674 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 675 fastNlMeansDenoisingColoredMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize); 676 } 677 678 /** 679 * Modification of fastNlMeansDenoisingMulti function for colored images sequences 680 * 681 * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 682 * size. 683 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 684 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 685 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 686 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 687 * srcImgs[imgToDenoiseIndex] image. 688 * @param dst Output image with the same size and type as srcImgs images. 689 * @param templateWindowSize Size in pixels of the template patch that is used to compute weights. 690 * Should be odd. Recommended value 7 pixels 691 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 692 * denoising time. Recommended value 21 pixels 693 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 694 * removes noise but also removes image details, smaller h value preserves details but also preserves 695 * some noise. 696 * @param hColor The same as h but for color components. 697 * 698 * The function converts images to CIELAB colorspace and then separately denoise L and AB components 699 * with given h parameters using fastNlMeansDenoisingMulti function. 700 */ 701 public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize) { 702 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 703 fastNlMeansDenoisingColoredMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize); 704 } 705 706 /** 707 * Modification of fastNlMeansDenoisingMulti function for colored images sequences 708 * 709 * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 710 * size. 711 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 712 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 713 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 714 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 715 * srcImgs[imgToDenoiseIndex] image. 716 * @param dst Output image with the same size and type as srcImgs images. 717 * Should be odd. Recommended value 7 pixels 718 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 719 * denoising time. Recommended value 21 pixels 720 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 721 * removes noise but also removes image details, smaller h value preserves details but also preserves 722 * some noise. 723 * @param hColor The same as h but for color components. 724 * 725 * The function converts images to CIELAB colorspace and then separately denoise L and AB components 726 * with given h parameters using fastNlMeansDenoisingMulti function. 727 */ 728 public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor) { 729 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 730 fastNlMeansDenoisingColoredMulti_2(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor); 731 } 732 733 /** 734 * Modification of fastNlMeansDenoisingMulti function for colored images sequences 735 * 736 * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 737 * size. 738 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 739 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 740 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 741 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 742 * srcImgs[imgToDenoiseIndex] image. 743 * @param dst Output image with the same size and type as srcImgs images. 744 * Should be odd. Recommended value 7 pixels 745 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 746 * denoising time. Recommended value 21 pixels 747 * @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly 748 * removes noise but also removes image details, smaller h value preserves details but also preserves 749 * some noise. 750 * 751 * The function converts images to CIELAB colorspace and then separately denoise L and AB components 752 * with given h parameters using fastNlMeansDenoisingMulti function. 753 */ 754 public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h) { 755 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 756 fastNlMeansDenoisingColoredMulti_3(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h); 757 } 758 759 /** 760 * Modification of fastNlMeansDenoisingMulti function for colored images sequences 761 * 762 * @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and 763 * size. 764 * @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence 765 * @param temporalWindowSize Number of surrounding images to use for target image denoising. Should 766 * be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to 767 * imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise 768 * srcImgs[imgToDenoiseIndex] image. 769 * @param dst Output image with the same size and type as srcImgs images. 770 * Should be odd. Recommended value 7 pixels 771 * given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater 772 * denoising time. Recommended value 21 pixels 773 * removes noise but also removes image details, smaller h value preserves details but also preserves 774 * some noise. 775 * 776 * The function converts images to CIELAB colorspace and then separately denoise L and AB components 777 * with given h parameters using fastNlMeansDenoisingMulti function. 778 */ 779 public static void fastNlMeansDenoisingColoredMulti(List<Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) { 780 Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); 781 fastNlMeansDenoisingColoredMulti_4(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); 782 } 783 784 785 // 786 // C++: void cv::denoise_TVL1(vector_Mat observations, Mat result, double lambda = 1.0, int niters = 30) 787 // 788 789 /** 790 * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, 791 * finding a function to minimize some functional). As the image denoising, in particular, may be seen 792 * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is 793 * exactly what is implemented. 794 * 795 * It should be noted, that this implementation was taken from the July 2013 blog entry 796 * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python. 797 * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end 798 * of July 2013 and finally it was slightly adapted by later authors. 799 * 800 * Although the thorough discussion and justification of the algorithm involved may be found in 801 * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin 802 * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of 803 * pixels (it may be seen as set 804 * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some 805 * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with 806 * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula 807 * 808 * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\) 809 * 810 * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our 811 * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that 812 * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is 813 * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play. 814 * 815 * @param observations This array should contain one or more noised versions of the image that is to 816 * be restored. 817 * @param result Here the denoised image will be stored. There is no need to do pre-allocation of 818 * storage space, as it will be automatically allocated, if necessary. 819 * @param lambda Corresponds to \(\lambda\) in the formulas above. As it is enlarged, the smooth 820 * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly 821 * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be 822 * removed. 823 * @param niters Number of iterations that the algorithm will run. Of course, as more iterations as 824 * better, but it is hard to quantitatively refine this statement, so just use the default and 825 * increase it if the results are poor. 826 */ 827 public static void denoise_TVL1(List<Mat> observations, Mat result, double lambda, int niters) { 828 Mat observations_mat = Converters.vector_Mat_to_Mat(observations); 829 denoise_TVL1_0(observations_mat.nativeObj, result.nativeObj, lambda, niters); 830 } 831 832 /** 833 * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, 834 * finding a function to minimize some functional). As the image denoising, in particular, may be seen 835 * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is 836 * exactly what is implemented. 837 * 838 * It should be noted, that this implementation was taken from the July 2013 blog entry 839 * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python. 840 * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end 841 * of July 2013 and finally it was slightly adapted by later authors. 842 * 843 * Although the thorough discussion and justification of the algorithm involved may be found in 844 * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin 845 * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of 846 * pixels (it may be seen as set 847 * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some 848 * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with 849 * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula 850 * 851 * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\) 852 * 853 * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our 854 * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that 855 * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is 856 * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play. 857 * 858 * @param observations This array should contain one or more noised versions of the image that is to 859 * be restored. 860 * @param result Here the denoised image will be stored. There is no need to do pre-allocation of 861 * storage space, as it will be automatically allocated, if necessary. 862 * @param lambda Corresponds to \(\lambda\) in the formulas above. As it is enlarged, the smooth 863 * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly 864 * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be 865 * removed. 866 * better, but it is hard to quantitatively refine this statement, so just use the default and 867 * increase it if the results are poor. 868 */ 869 public static void denoise_TVL1(List<Mat> observations, Mat result, double lambda) { 870 Mat observations_mat = Converters.vector_Mat_to_Mat(observations); 871 denoise_TVL1_1(observations_mat.nativeObj, result.nativeObj, lambda); 872 } 873 874 /** 875 * Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, 876 * finding a function to minimize some functional). As the image denoising, in particular, may be seen 877 * as the variational problem, primal-dual algorithm then can be used to perform denoising and this is 878 * exactly what is implemented. 879 * 880 * It should be noted, that this implementation was taken from the July 2013 blog entry 881 * CITE: MA13 , which also contained (slightly more general) ready-to-use source code on Python. 882 * Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end 883 * of July 2013 and finally it was slightly adapted by later authors. 884 * 885 * Although the thorough discussion and justification of the algorithm involved may be found in 886 * CITE: ChambolleEtAl, it might make sense to skim over it here, following CITE: MA13 . To begin 887 * with, we consider the 1-byte gray-level images as the functions from the rectangular domain of 888 * pixels (it may be seen as set 889 * \(\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\) for some 890 * \(m,\;n\in\mathbb{N}\)) into \(\{0,1,\dots,255\}\). We shall denote the noised images as \(f_i\) and with 891 * this view, given some image \(x\) of the same size, we may measure how bad it is by the formula 892 * 893 * \(\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\) 894 * 895 * \(\|\|\cdot\|\|\) here denotes \(L_2\)-norm and as you see, the first addend states that we want our 896 * image to be smooth (ideally, having zero gradient, thus being constant) and the second states that 897 * we want our result to be close to the observations we've got. If we treat \(x\) as a function, this is 898 * exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play. 899 * 900 * @param observations This array should contain one or more noised versions of the image that is to 901 * be restored. 902 * @param result Here the denoised image will be stored. There is no need to do pre-allocation of 903 * storage space, as it will be automatically allocated, if necessary. 904 * (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly 905 * speaking, as it becomes smaller, the result will be more blur but more sever outliers will be 906 * removed. 907 * better, but it is hard to quantitatively refine this statement, so just use the default and 908 * increase it if the results are poor. 909 */ 910 public static void denoise_TVL1(List<Mat> observations, Mat result) { 911 Mat observations_mat = Converters.vector_Mat_to_Mat(observations); 912 denoise_TVL1_2(observations_mat.nativeObj, result.nativeObj); 913 } 914 915 916 // 917 // C++: Ptr_Tonemap cv::createTonemap(float gamma = 1.0f) 918 // 919 920 /** 921 * Creates simple linear mapper with gamma correction 922 * 923 * @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma 924 * equal to 2.2f is suitable for most displays. 925 * Generally gamma > 1 brightens the image and gamma < 1 darkens it. 926 * @return automatically generated 927 */ 928 public static Tonemap createTonemap(float gamma) { 929 return Tonemap.__fromPtr__(createTonemap_0(gamma)); 930 } 931 932 /** 933 * Creates simple linear mapper with gamma correction 934 * 935 * equal to 2.2f is suitable for most displays. 936 * Generally gamma > 1 brightens the image and gamma < 1 darkens it. 937 * @return automatically generated 938 */ 939 public static Tonemap createTonemap() { 940 return Tonemap.__fromPtr__(createTonemap_1()); 941 } 942 943 944 // 945 // C++: Ptr_TonemapDrago cv::createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f) 946 // 947 948 /** 949 * Creates TonemapDrago object 950 * 951 * @param gamma gamma value for gamma correction. See createTonemap 952 * @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater 953 * than 1 increase saturation and values less than 1 decrease it. 954 * @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best 955 * results, default value is 0.85. 956 * @return automatically generated 957 */ 958 public static TonemapDrago createTonemapDrago(float gamma, float saturation, float bias) { 959 return TonemapDrago.__fromPtr__(createTonemapDrago_0(gamma, saturation, bias)); 960 } 961 962 /** 963 * Creates TonemapDrago object 964 * 965 * @param gamma gamma value for gamma correction. See createTonemap 966 * @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater 967 * than 1 increase saturation and values less than 1 decrease it. 968 * results, default value is 0.85. 969 * @return automatically generated 970 */ 971 public static TonemapDrago createTonemapDrago(float gamma, float saturation) { 972 return TonemapDrago.__fromPtr__(createTonemapDrago_1(gamma, saturation)); 973 } 974 975 /** 976 * Creates TonemapDrago object 977 * 978 * @param gamma gamma value for gamma correction. See createTonemap 979 * than 1 increase saturation and values less than 1 decrease it. 980 * results, default value is 0.85. 981 * @return automatically generated 982 */ 983 public static TonemapDrago createTonemapDrago(float gamma) { 984 return TonemapDrago.__fromPtr__(createTonemapDrago_2(gamma)); 985 } 986 987 /** 988 * Creates TonemapDrago object 989 * 990 * than 1 increase saturation and values less than 1 decrease it. 991 * results, default value is 0.85. 992 * @return automatically generated 993 */ 994 public static TonemapDrago createTonemapDrago() { 995 return TonemapDrago.__fromPtr__(createTonemapDrago_3()); 996 } 997 998 999 // 1000 // C++: Ptr_TonemapReinhard cv::createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f) 1001 // 1002 1003 /** 1004 * Creates TonemapReinhard object 1005 * 1006 * @param gamma gamma value for gamma correction. See createTonemap 1007 * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results. 1008 * @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel 1009 * value, if 0 it's global, otherwise it's a weighted mean of this two cases. 1010 * @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently, 1011 * if 0 adaptation level is the same for each channel. 1012 * @return automatically generated 1013 */ 1014 public static TonemapReinhard createTonemapReinhard(float gamma, float intensity, float light_adapt, float color_adapt) { 1015 return TonemapReinhard.__fromPtr__(createTonemapReinhard_0(gamma, intensity, light_adapt, color_adapt)); 1016 } 1017 1018 /** 1019 * Creates TonemapReinhard object 1020 * 1021 * @param gamma gamma value for gamma correction. See createTonemap 1022 * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results. 1023 * @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel 1024 * value, if 0 it's global, otherwise it's a weighted mean of this two cases. 1025 * if 0 adaptation level is the same for each channel. 1026 * @return automatically generated 1027 */ 1028 public static TonemapReinhard createTonemapReinhard(float gamma, float intensity, float light_adapt) { 1029 return TonemapReinhard.__fromPtr__(createTonemapReinhard_1(gamma, intensity, light_adapt)); 1030 } 1031 1032 /** 1033 * Creates TonemapReinhard object 1034 * 1035 * @param gamma gamma value for gamma correction. See createTonemap 1036 * @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results. 1037 * value, if 0 it's global, otherwise it's a weighted mean of this two cases. 1038 * if 0 adaptation level is the same for each channel. 1039 * @return automatically generated 1040 */ 1041 public static TonemapReinhard createTonemapReinhard(float gamma, float intensity) { 1042 return TonemapReinhard.__fromPtr__(createTonemapReinhard_2(gamma, intensity)); 1043 } 1044 1045 /** 1046 * Creates TonemapReinhard object 1047 * 1048 * @param gamma gamma value for gamma correction. See createTonemap 1049 * value, if 0 it's global, otherwise it's a weighted mean of this two cases. 1050 * if 0 adaptation level is the same for each channel. 1051 * @return automatically generated 1052 */ 1053 public static TonemapReinhard createTonemapReinhard(float gamma) { 1054 return TonemapReinhard.__fromPtr__(createTonemapReinhard_3(gamma)); 1055 } 1056 1057 /** 1058 * Creates TonemapReinhard object 1059 * 1060 * value, if 0 it's global, otherwise it's a weighted mean of this two cases. 1061 * if 0 adaptation level is the same for each channel. 1062 * @return automatically generated 1063 */ 1064 public static TonemapReinhard createTonemapReinhard() { 1065 return TonemapReinhard.__fromPtr__(createTonemapReinhard_4()); 1066 } 1067 1068 1069 // 1070 // C++: Ptr_TonemapMantiuk cv::createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f) 1071 // 1072 1073 /** 1074 * Creates TonemapMantiuk object 1075 * 1076 * @param gamma gamma value for gamma correction. See createTonemap 1077 * @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing 1078 * dynamic range. Values from 0.6 to 0.9 produce best results. 1079 * @param saturation saturation enhancement value. See createTonemapDrago 1080 * @return automatically generated 1081 */ 1082 public static TonemapMantiuk createTonemapMantiuk(float gamma, float scale, float saturation) { 1083 return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_0(gamma, scale, saturation)); 1084 } 1085 1086 /** 1087 * Creates TonemapMantiuk object 1088 * 1089 * @param gamma gamma value for gamma correction. See createTonemap 1090 * @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing 1091 * dynamic range. Values from 0.6 to 0.9 produce best results. 1092 * @return automatically generated 1093 */ 1094 public static TonemapMantiuk createTonemapMantiuk(float gamma, float scale) { 1095 return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_1(gamma, scale)); 1096 } 1097 1098 /** 1099 * Creates TonemapMantiuk object 1100 * 1101 * @param gamma gamma value for gamma correction. See createTonemap 1102 * dynamic range. Values from 0.6 to 0.9 produce best results. 1103 * @return automatically generated 1104 */ 1105 public static TonemapMantiuk createTonemapMantiuk(float gamma) { 1106 return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_2(gamma)); 1107 } 1108 1109 /** 1110 * Creates TonemapMantiuk object 1111 * 1112 * dynamic range. Values from 0.6 to 0.9 produce best results. 1113 * @return automatically generated 1114 */ 1115 public static TonemapMantiuk createTonemapMantiuk() { 1116 return TonemapMantiuk.__fromPtr__(createTonemapMantiuk_3()); 1117 } 1118 1119 1120 // 1121 // C++: Ptr_AlignMTB cv::createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true) 1122 // 1123 1124 /** 1125 * Creates AlignMTB object 1126 * 1127 * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are 1128 * usually good enough (31 and 63 pixels shift respectively). 1129 * @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the 1130 * median value. 1131 * @param cut if true cuts images, otherwise fills the new regions with zeros. 1132 * @return automatically generated 1133 */ 1134 public static AlignMTB createAlignMTB(int max_bits, int exclude_range, boolean cut) { 1135 return AlignMTB.__fromPtr__(createAlignMTB_0(max_bits, exclude_range, cut)); 1136 } 1137 1138 /** 1139 * Creates AlignMTB object 1140 * 1141 * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are 1142 * usually good enough (31 and 63 pixels shift respectively). 1143 * @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the 1144 * median value. 1145 * @return automatically generated 1146 */ 1147 public static AlignMTB createAlignMTB(int max_bits, int exclude_range) { 1148 return AlignMTB.__fromPtr__(createAlignMTB_1(max_bits, exclude_range)); 1149 } 1150 1151 /** 1152 * Creates AlignMTB object 1153 * 1154 * @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are 1155 * usually good enough (31 and 63 pixels shift respectively). 1156 * median value. 1157 * @return automatically generated 1158 */ 1159 public static AlignMTB createAlignMTB(int max_bits) { 1160 return AlignMTB.__fromPtr__(createAlignMTB_2(max_bits)); 1161 } 1162 1163 /** 1164 * Creates AlignMTB object 1165 * 1166 * usually good enough (31 and 63 pixels shift respectively). 1167 * median value. 1168 * @return automatically generated 1169 */ 1170 public static AlignMTB createAlignMTB() { 1171 return AlignMTB.__fromPtr__(createAlignMTB_3()); 1172 } 1173 1174 1175 // 1176 // C++: Ptr_CalibrateDebevec cv::createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false) 1177 // 1178 1179 /** 1180 * Creates CalibrateDebevec object 1181 * 1182 * @param samples number of pixel locations to use 1183 * @param lambda smoothness term weight. Greater values produce smoother results, but can alter the 1184 * response. 1185 * @param random if true sample pixel locations are chosen at random, otherwise they form a 1186 * rectangular grid. 1187 * @return automatically generated 1188 */ 1189 public static CalibrateDebevec createCalibrateDebevec(int samples, float lambda, boolean random) { 1190 return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_0(samples, lambda, random)); 1191 } 1192 1193 /** 1194 * Creates CalibrateDebevec object 1195 * 1196 * @param samples number of pixel locations to use 1197 * @param lambda smoothness term weight. Greater values produce smoother results, but can alter the 1198 * response. 1199 * rectangular grid. 1200 * @return automatically generated 1201 */ 1202 public static CalibrateDebevec createCalibrateDebevec(int samples, float lambda) { 1203 return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_1(samples, lambda)); 1204 } 1205 1206 /** 1207 * Creates CalibrateDebevec object 1208 * 1209 * @param samples number of pixel locations to use 1210 * response. 1211 * rectangular grid. 1212 * @return automatically generated 1213 */ 1214 public static CalibrateDebevec createCalibrateDebevec(int samples) { 1215 return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_2(samples)); 1216 } 1217 1218 /** 1219 * Creates CalibrateDebevec object 1220 * 1221 * response. 1222 * rectangular grid. 1223 * @return automatically generated 1224 */ 1225 public static CalibrateDebevec createCalibrateDebevec() { 1226 return CalibrateDebevec.__fromPtr__(createCalibrateDebevec_3()); 1227 } 1228 1229 1230 // 1231 // C++: Ptr_CalibrateRobertson cv::createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f) 1232 // 1233 1234 /** 1235 * Creates CalibrateRobertson object 1236 * 1237 * @param max_iter maximal number of Gauss-Seidel solver iterations. 1238 * @param threshold target difference between results of two successive steps of the minimization. 1239 * @return automatically generated 1240 */ 1241 public static CalibrateRobertson createCalibrateRobertson(int max_iter, float threshold) { 1242 return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_0(max_iter, threshold)); 1243 } 1244 1245 /** 1246 * Creates CalibrateRobertson object 1247 * 1248 * @param max_iter maximal number of Gauss-Seidel solver iterations. 1249 * @return automatically generated 1250 */ 1251 public static CalibrateRobertson createCalibrateRobertson(int max_iter) { 1252 return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_1(max_iter)); 1253 } 1254 1255 /** 1256 * Creates CalibrateRobertson object 1257 * 1258 * @return automatically generated 1259 */ 1260 public static CalibrateRobertson createCalibrateRobertson() { 1261 return CalibrateRobertson.__fromPtr__(createCalibrateRobertson_2()); 1262 } 1263 1264 1265 // 1266 // C++: Ptr_MergeDebevec cv::createMergeDebevec() 1267 // 1268 1269 /** 1270 * Creates MergeDebevec object 1271 * @return automatically generated 1272 */ 1273 public static MergeDebevec createMergeDebevec() { 1274 return MergeDebevec.__fromPtr__(createMergeDebevec_0()); 1275 } 1276 1277 1278 // 1279 // C++: Ptr_MergeMertens cv::createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f) 1280 // 1281 1282 /** 1283 * Creates MergeMertens object 1284 * 1285 * @param contrast_weight contrast measure weight. See MergeMertens. 1286 * @param saturation_weight saturation measure weight 1287 * @param exposure_weight well-exposedness measure weight 1288 * @return automatically generated 1289 */ 1290 public static MergeMertens createMergeMertens(float contrast_weight, float saturation_weight, float exposure_weight) { 1291 return MergeMertens.__fromPtr__(createMergeMertens_0(contrast_weight, saturation_weight, exposure_weight)); 1292 } 1293 1294 /** 1295 * Creates MergeMertens object 1296 * 1297 * @param contrast_weight contrast measure weight. See MergeMertens. 1298 * @param saturation_weight saturation measure weight 1299 * @return automatically generated 1300 */ 1301 public static MergeMertens createMergeMertens(float contrast_weight, float saturation_weight) { 1302 return MergeMertens.__fromPtr__(createMergeMertens_1(contrast_weight, saturation_weight)); 1303 } 1304 1305 /** 1306 * Creates MergeMertens object 1307 * 1308 * @param contrast_weight contrast measure weight. See MergeMertens. 1309 * @return automatically generated 1310 */ 1311 public static MergeMertens createMergeMertens(float contrast_weight) { 1312 return MergeMertens.__fromPtr__(createMergeMertens_2(contrast_weight)); 1313 } 1314 1315 /** 1316 * Creates MergeMertens object 1317 * 1318 * @return automatically generated 1319 */ 1320 public static MergeMertens createMergeMertens() { 1321 return MergeMertens.__fromPtr__(createMergeMertens_3()); 1322 } 1323 1324 1325 // 1326 // C++: Ptr_MergeRobertson cv::createMergeRobertson() 1327 // 1328 1329 /** 1330 * Creates MergeRobertson object 1331 * @return automatically generated 1332 */ 1333 public static MergeRobertson createMergeRobertson() { 1334 return MergeRobertson.__fromPtr__(createMergeRobertson_0()); 1335 } 1336 1337 1338 // 1339 // C++: void cv::decolor(Mat src, Mat& grayscale, Mat& color_boost) 1340 // 1341 1342 /** 1343 * Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized 1344 * black-and-white photograph rendering, and in many single channel image processing applications 1345 * CITE: CL12 . 1346 * 1347 * @param src Input 8-bit 3-channel image. 1348 * @param grayscale Output 8-bit 1-channel image. 1349 * @param color_boost Output 8-bit 3-channel image. 1350 * 1351 * This function is to be applied on color images. 1352 */ 1353 public static void decolor(Mat src, Mat grayscale, Mat color_boost) { 1354 decolor_0(src.nativeObj, grayscale.nativeObj, color_boost.nativeObj); 1355 } 1356 1357 1358 // 1359 // C++: void cv::seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat& blend, int flags) 1360 // 1361 1362 /** 1363 * Image editing tasks concern either global changes (color/intensity corrections, filters, 1364 * deformations) or local changes concerned to a selection. Here we are interested in achieving local 1365 * changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless 1366 * manner. The extent of the changes ranges from slight distortions to complete replacement by novel 1367 * content CITE: PM03 . 1368 * 1369 * @param src Input 8-bit 3-channel image. 1370 * @param dst Input 8-bit 3-channel image. 1371 * @param mask Input 8-bit 1 or 3-channel image. 1372 * @param p Point in dst image where object is placed. 1373 * @param blend Output image with the same size and type as dst. 1374 * @param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER 1375 */ 1376 public static void seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat blend, int flags) { 1377 seamlessClone_0(src.nativeObj, dst.nativeObj, mask.nativeObj, p.x, p.y, blend.nativeObj, flags); 1378 } 1379 1380 1381 // 1382 // C++: void cv::colorChange(Mat src, Mat mask, Mat& dst, float red_mul = 1.0f, float green_mul = 1.0f, float blue_mul = 1.0f) 1383 // 1384 1385 /** 1386 * Given an original color image, two differently colored versions of this image can be mixed 1387 * seamlessly. 1388 * 1389 * @param src Input 8-bit 3-channel image. 1390 * @param mask Input 8-bit 1 or 3-channel image. 1391 * @param dst Output image with the same size and type as src . 1392 * @param red_mul R-channel multiply factor. 1393 * @param green_mul G-channel multiply factor. 1394 * @param blue_mul B-channel multiply factor. 1395 * 1396 * Multiplication factor is between .5 to 2.5. 1397 */ 1398 public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul, float blue_mul) { 1399 colorChange_0(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul, green_mul, blue_mul); 1400 } 1401 1402 /** 1403 * Given an original color image, two differently colored versions of this image can be mixed 1404 * seamlessly. 1405 * 1406 * @param src Input 8-bit 3-channel image. 1407 * @param mask Input 8-bit 1 or 3-channel image. 1408 * @param dst Output image with the same size and type as src . 1409 * @param red_mul R-channel multiply factor. 1410 * @param green_mul G-channel multiply factor. 1411 * 1412 * Multiplication factor is between .5 to 2.5. 1413 */ 1414 public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul, float green_mul) { 1415 colorChange_1(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul, green_mul); 1416 } 1417 1418 /** 1419 * Given an original color image, two differently colored versions of this image can be mixed 1420 * seamlessly. 1421 * 1422 * @param src Input 8-bit 3-channel image. 1423 * @param mask Input 8-bit 1 or 3-channel image. 1424 * @param dst Output image with the same size and type as src . 1425 * @param red_mul R-channel multiply factor. 1426 * 1427 * Multiplication factor is between .5 to 2.5. 1428 */ 1429 public static void colorChange(Mat src, Mat mask, Mat dst, float red_mul) { 1430 colorChange_2(src.nativeObj, mask.nativeObj, dst.nativeObj, red_mul); 1431 } 1432 1433 /** 1434 * Given an original color image, two differently colored versions of this image can be mixed 1435 * seamlessly. 1436 * 1437 * @param src Input 8-bit 3-channel image. 1438 * @param mask Input 8-bit 1 or 3-channel image. 1439 * @param dst Output image with the same size and type as src . 1440 * 1441 * Multiplication factor is between .5 to 2.5. 1442 */ 1443 public static void colorChange(Mat src, Mat mask, Mat dst) { 1444 colorChange_3(src.nativeObj, mask.nativeObj, dst.nativeObj); 1445 } 1446 1447 1448 // 1449 // C++: void cv::illuminationChange(Mat src, Mat mask, Mat& dst, float alpha = 0.2f, float beta = 0.4f) 1450 // 1451 1452 /** 1453 * Applying an appropriate non-linear transformation to the gradient field inside the selection and 1454 * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image. 1455 * 1456 * @param src Input 8-bit 3-channel image. 1457 * @param mask Input 8-bit 1 or 3-channel image. 1458 * @param dst Output image with the same size and type as src. 1459 * @param alpha Value ranges between 0-2. 1460 * @param beta Value ranges between 0-2. 1461 * 1462 * This is useful to highlight under-exposed foreground objects or to reduce specular reflections. 1463 */ 1464 public static void illuminationChange(Mat src, Mat mask, Mat dst, float alpha, float beta) { 1465 illuminationChange_0(src.nativeObj, mask.nativeObj, dst.nativeObj, alpha, beta); 1466 } 1467 1468 /** 1469 * Applying an appropriate non-linear transformation to the gradient field inside the selection and 1470 * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image. 1471 * 1472 * @param src Input 8-bit 3-channel image. 1473 * @param mask Input 8-bit 1 or 3-channel image. 1474 * @param dst Output image with the same size and type as src. 1475 * @param alpha Value ranges between 0-2. 1476 * 1477 * This is useful to highlight under-exposed foreground objects or to reduce specular reflections. 1478 */ 1479 public static void illuminationChange(Mat src, Mat mask, Mat dst, float alpha) { 1480 illuminationChange_1(src.nativeObj, mask.nativeObj, dst.nativeObj, alpha); 1481 } 1482 1483 /** 1484 * Applying an appropriate non-linear transformation to the gradient field inside the selection and 1485 * then integrating back with a Poisson solver, modifies locally the apparent illumination of an image. 1486 * 1487 * @param src Input 8-bit 3-channel image. 1488 * @param mask Input 8-bit 1 or 3-channel image. 1489 * @param dst Output image with the same size and type as src. 1490 * 1491 * This is useful to highlight under-exposed foreground objects or to reduce specular reflections. 1492 */ 1493 public static void illuminationChange(Mat src, Mat mask, Mat dst) { 1494 illuminationChange_2(src.nativeObj, mask.nativeObj, dst.nativeObj); 1495 } 1496 1497 1498 // 1499 // C++: void cv::textureFlattening(Mat src, Mat mask, Mat& dst, float low_threshold = 30, float high_threshold = 45, int kernel_size = 3) 1500 // 1501 1502 /** 1503 * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one 1504 * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used. 1505 * 1506 * @param src Input 8-bit 3-channel image. 1507 * @param mask Input 8-bit 1 or 3-channel image. 1508 * @param dst Output image with the same size and type as src. 1509 * @param low_threshold %Range from 0 to 100. 1510 * @param high_threshold Value > 100. 1511 * @param kernel_size The size of the Sobel kernel to be used. 1512 * 1513 * <b>Note:</b> 1514 * The algorithm assumes that the color of the source image is close to that of the destination. This 1515 * assumption means that when the colors don't match, the source image color gets tinted toward the 1516 * color of the destination image. 1517 */ 1518 public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold, int kernel_size) { 1519 textureFlattening_0(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold, high_threshold, kernel_size); 1520 } 1521 1522 /** 1523 * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one 1524 * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used. 1525 * 1526 * @param src Input 8-bit 3-channel image. 1527 * @param mask Input 8-bit 1 or 3-channel image. 1528 * @param dst Output image with the same size and type as src. 1529 * @param low_threshold %Range from 0 to 100. 1530 * @param high_threshold Value > 100. 1531 * 1532 * <b>Note:</b> 1533 * The algorithm assumes that the color of the source image is close to that of the destination. This 1534 * assumption means that when the colors don't match, the source image color gets tinted toward the 1535 * color of the destination image. 1536 */ 1537 public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold, float high_threshold) { 1538 textureFlattening_1(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold, high_threshold); 1539 } 1540 1541 /** 1542 * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one 1543 * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used. 1544 * 1545 * @param src Input 8-bit 3-channel image. 1546 * @param mask Input 8-bit 1 or 3-channel image. 1547 * @param dst Output image with the same size and type as src. 1548 * @param low_threshold %Range from 0 to 100. 1549 * 1550 * <b>Note:</b> 1551 * The algorithm assumes that the color of the source image is close to that of the destination. This 1552 * assumption means that when the colors don't match, the source image color gets tinted toward the 1553 * color of the destination image. 1554 */ 1555 public static void textureFlattening(Mat src, Mat mask, Mat dst, float low_threshold) { 1556 textureFlattening_2(src.nativeObj, mask.nativeObj, dst.nativeObj, low_threshold); 1557 } 1558 1559 /** 1560 * By retaining only the gradients at edge locations, before integrating with the Poisson solver, one 1561 * washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used. 1562 * 1563 * @param src Input 8-bit 3-channel image. 1564 * @param mask Input 8-bit 1 or 3-channel image. 1565 * @param dst Output image with the same size and type as src. 1566 * 1567 * <b>Note:</b> 1568 * The algorithm assumes that the color of the source image is close to that of the destination. This 1569 * assumption means that when the colors don't match, the source image color gets tinted toward the 1570 * color of the destination image. 1571 */ 1572 public static void textureFlattening(Mat src, Mat mask, Mat dst) { 1573 textureFlattening_3(src.nativeObj, mask.nativeObj, dst.nativeObj); 1574 } 1575 1576 1577 // 1578 // C++: void cv::edgePreservingFilter(Mat src, Mat& dst, int flags = 1, float sigma_s = 60, float sigma_r = 0.4f) 1579 // 1580 1581 /** 1582 * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing 1583 * filters are used in many different applications CITE: EM11 . 1584 * 1585 * @param src Input 8-bit 3-channel image. 1586 * @param dst Output 8-bit 3-channel image. 1587 * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER 1588 * @param sigma_s %Range between 0 to 200. 1589 * @param sigma_r %Range between 0 to 1. 1590 */ 1591 public static void edgePreservingFilter(Mat src, Mat dst, int flags, float sigma_s, float sigma_r) { 1592 edgePreservingFilter_0(src.nativeObj, dst.nativeObj, flags, sigma_s, sigma_r); 1593 } 1594 1595 /** 1596 * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing 1597 * filters are used in many different applications CITE: EM11 . 1598 * 1599 * @param src Input 8-bit 3-channel image. 1600 * @param dst Output 8-bit 3-channel image. 1601 * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER 1602 * @param sigma_s %Range between 0 to 200. 1603 */ 1604 public static void edgePreservingFilter(Mat src, Mat dst, int flags, float sigma_s) { 1605 edgePreservingFilter_1(src.nativeObj, dst.nativeObj, flags, sigma_s); 1606 } 1607 1608 /** 1609 * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing 1610 * filters are used in many different applications CITE: EM11 . 1611 * 1612 * @param src Input 8-bit 3-channel image. 1613 * @param dst Output 8-bit 3-channel image. 1614 * @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER 1615 */ 1616 public static void edgePreservingFilter(Mat src, Mat dst, int flags) { 1617 edgePreservingFilter_2(src.nativeObj, dst.nativeObj, flags); 1618 } 1619 1620 /** 1621 * Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing 1622 * filters are used in many different applications CITE: EM11 . 1623 * 1624 * @param src Input 8-bit 3-channel image. 1625 * @param dst Output 8-bit 3-channel image. 1626 */ 1627 public static void edgePreservingFilter(Mat src, Mat dst) { 1628 edgePreservingFilter_3(src.nativeObj, dst.nativeObj); 1629 } 1630 1631 1632 // 1633 // C++: void cv::detailEnhance(Mat src, Mat& dst, float sigma_s = 10, float sigma_r = 0.15f) 1634 // 1635 1636 /** 1637 * This filter enhances the details of a particular image. 1638 * 1639 * @param src Input 8-bit 3-channel image. 1640 * @param dst Output image with the same size and type as src. 1641 * @param sigma_s %Range between 0 to 200. 1642 * @param sigma_r %Range between 0 to 1. 1643 */ 1644 public static void detailEnhance(Mat src, Mat dst, float sigma_s, float sigma_r) { 1645 detailEnhance_0(src.nativeObj, dst.nativeObj, sigma_s, sigma_r); 1646 } 1647 1648 /** 1649 * This filter enhances the details of a particular image. 1650 * 1651 * @param src Input 8-bit 3-channel image. 1652 * @param dst Output image with the same size and type as src. 1653 * @param sigma_s %Range between 0 to 200. 1654 */ 1655 public static void detailEnhance(Mat src, Mat dst, float sigma_s) { 1656 detailEnhance_1(src.nativeObj, dst.nativeObj, sigma_s); 1657 } 1658 1659 /** 1660 * This filter enhances the details of a particular image. 1661 * 1662 * @param src Input 8-bit 3-channel image. 1663 * @param dst Output image with the same size and type as src. 1664 */ 1665 public static void detailEnhance(Mat src, Mat dst) { 1666 detailEnhance_2(src.nativeObj, dst.nativeObj); 1667 } 1668 1669 1670 // 1671 // C++: void cv::pencilSketch(Mat src, Mat& dst1, Mat& dst2, float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f) 1672 // 1673 1674 /** 1675 * Pencil-like non-photorealistic line drawing 1676 * 1677 * @param src Input 8-bit 3-channel image. 1678 * @param dst1 Output 8-bit 1-channel image. 1679 * @param dst2 Output image with the same size and type as src. 1680 * @param sigma_s %Range between 0 to 200. 1681 * @param sigma_r %Range between 0 to 1. 1682 * @param shade_factor %Range between 0 to 0.1. 1683 */ 1684 public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r, float shade_factor) { 1685 pencilSketch_0(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s, sigma_r, shade_factor); 1686 } 1687 1688 /** 1689 * Pencil-like non-photorealistic line drawing 1690 * 1691 * @param src Input 8-bit 3-channel image. 1692 * @param dst1 Output 8-bit 1-channel image. 1693 * @param dst2 Output image with the same size and type as src. 1694 * @param sigma_s %Range between 0 to 200. 1695 * @param sigma_r %Range between 0 to 1. 1696 */ 1697 public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s, float sigma_r) { 1698 pencilSketch_1(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s, sigma_r); 1699 } 1700 1701 /** 1702 * Pencil-like non-photorealistic line drawing 1703 * 1704 * @param src Input 8-bit 3-channel image. 1705 * @param dst1 Output 8-bit 1-channel image. 1706 * @param dst2 Output image with the same size and type as src. 1707 * @param sigma_s %Range between 0 to 200. 1708 */ 1709 public static void pencilSketch(Mat src, Mat dst1, Mat dst2, float sigma_s) { 1710 pencilSketch_2(src.nativeObj, dst1.nativeObj, dst2.nativeObj, sigma_s); 1711 } 1712 1713 /** 1714 * Pencil-like non-photorealistic line drawing 1715 * 1716 * @param src Input 8-bit 3-channel image. 1717 * @param dst1 Output 8-bit 1-channel image. 1718 * @param dst2 Output image with the same size and type as src. 1719 */ 1720 public static void pencilSketch(Mat src, Mat dst1, Mat dst2) { 1721 pencilSketch_3(src.nativeObj, dst1.nativeObj, dst2.nativeObj); 1722 } 1723 1724 1725 // 1726 // C++: void cv::stylization(Mat src, Mat& dst, float sigma_s = 60, float sigma_r = 0.45f) 1727 // 1728 1729 /** 1730 * Stylization aims to produce digital imagery with a wide variety of effects not focused on 1731 * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low 1732 * contrast while preserving, or enhancing, high-contrast features. 1733 * 1734 * @param src Input 8-bit 3-channel image. 1735 * @param dst Output image with the same size and type as src. 1736 * @param sigma_s %Range between 0 to 200. 1737 * @param sigma_r %Range between 0 to 1. 1738 */ 1739 public static void stylization(Mat src, Mat dst, float sigma_s, float sigma_r) { 1740 stylization_0(src.nativeObj, dst.nativeObj, sigma_s, sigma_r); 1741 } 1742 1743 /** 1744 * Stylization aims to produce digital imagery with a wide variety of effects not focused on 1745 * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low 1746 * contrast while preserving, or enhancing, high-contrast features. 1747 * 1748 * @param src Input 8-bit 3-channel image. 1749 * @param dst Output image with the same size and type as src. 1750 * @param sigma_s %Range between 0 to 200. 1751 */ 1752 public static void stylization(Mat src, Mat dst, float sigma_s) { 1753 stylization_1(src.nativeObj, dst.nativeObj, sigma_s); 1754 } 1755 1756 /** 1757 * Stylization aims to produce digital imagery with a wide variety of effects not focused on 1758 * photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low 1759 * contrast while preserving, or enhancing, high-contrast features. 1760 * 1761 * @param src Input 8-bit 3-channel image. 1762 * @param dst Output image with the same size and type as src. 1763 */ 1764 public static void stylization(Mat src, Mat dst) { 1765 stylization_2(src.nativeObj, dst.nativeObj); 1766 } 1767 1768 1769 1770 1771 // C++: void cv::inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) 1772 private static native void inpaint_0(long src_nativeObj, long inpaintMask_nativeObj, long dst_nativeObj, double inpaintRadius, int flags); 1773 1774 // C++: void cv::fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) 1775 private static native void fastNlMeansDenoising_0(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize, int searchWindowSize); 1776 private static native void fastNlMeansDenoising_1(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize); 1777 private static native void fastNlMeansDenoising_2(long src_nativeObj, long dst_nativeObj, float h); 1778 private static native void fastNlMeansDenoising_3(long src_nativeObj, long dst_nativeObj); 1779 1780 // C++: void cv::fastNlMeansDenoising(Mat src, Mat& dst, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2) 1781 private static native void fastNlMeansDenoising_4(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize, int normType); 1782 private static native void fastNlMeansDenoising_5(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize); 1783 private static native void fastNlMeansDenoising_6(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj, int templateWindowSize); 1784 private static native void fastNlMeansDenoising_7(long src_nativeObj, long dst_nativeObj, long h_mat_nativeObj); 1785 1786 // C++: void cv::fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) 1787 private static native void fastNlMeansDenoisingColored_0(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize, int searchWindowSize); 1788 private static native void fastNlMeansDenoisingColored_1(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize); 1789 private static native void fastNlMeansDenoisingColored_2(long src_nativeObj, long dst_nativeObj, float h, float hColor); 1790 private static native void fastNlMeansDenoisingColored_3(long src_nativeObj, long dst_nativeObj, float h); 1791 private static native void fastNlMeansDenoisingColored_4(long src_nativeObj, long dst_nativeObj); 1792 1793 // C++: void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) 1794 private static native void fastNlMeansDenoisingMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize); 1795 private static native void fastNlMeansDenoisingMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize); 1796 private static native void fastNlMeansDenoisingMulti_2(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h); 1797 private static native void fastNlMeansDenoisingMulti_3(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); 1798 1799 // C++: void cv::fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, vector_float h, int templateWindowSize = 7, int searchWindowSize = 21, int normType = NORM_L2) 1800 private static native void fastNlMeansDenoisingMulti_4(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize, int normType); 1801 private static native void fastNlMeansDenoisingMulti_5(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize, int searchWindowSize); 1802 private static native void fastNlMeansDenoisingMulti_6(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj, int templateWindowSize); 1803 private static native void fastNlMeansDenoisingMulti_7(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, long h_mat_nativeObj); 1804 1805 // C++: void cv::fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) 1806 private static native void fastNlMeansDenoisingColoredMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize); 1807 private static native void fastNlMeansDenoisingColoredMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize); 1808 private static native void fastNlMeansDenoisingColoredMulti_2(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor); 1809 private static native void fastNlMeansDenoisingColoredMulti_3(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h); 1810 private static native void fastNlMeansDenoisingColoredMulti_4(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); 1811 1812 // C++: void cv::denoise_TVL1(vector_Mat observations, Mat result, double lambda = 1.0, int niters = 30) 1813 private static native void denoise_TVL1_0(long observations_mat_nativeObj, long result_nativeObj, double lambda, int niters); 1814 private static native void denoise_TVL1_1(long observations_mat_nativeObj, long result_nativeObj, double lambda); 1815 private static native void denoise_TVL1_2(long observations_mat_nativeObj, long result_nativeObj); 1816 1817 // C++: Ptr_Tonemap cv::createTonemap(float gamma = 1.0f) 1818 private static native long createTonemap_0(float gamma); 1819 private static native long createTonemap_1(); 1820 1821 // C++: Ptr_TonemapDrago cv::createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f) 1822 private static native long createTonemapDrago_0(float gamma, float saturation, float bias); 1823 private static native long createTonemapDrago_1(float gamma, float saturation); 1824 private static native long createTonemapDrago_2(float gamma); 1825 private static native long createTonemapDrago_3(); 1826 1827 // C++: Ptr_TonemapReinhard cv::createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f) 1828 private static native long createTonemapReinhard_0(float gamma, float intensity, float light_adapt, float color_adapt); 1829 private static native long createTonemapReinhard_1(float gamma, float intensity, float light_adapt); 1830 private static native long createTonemapReinhard_2(float gamma, float intensity); 1831 private static native long createTonemapReinhard_3(float gamma); 1832 private static native long createTonemapReinhard_4(); 1833 1834 // C++: Ptr_TonemapMantiuk cv::createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f) 1835 private static native long createTonemapMantiuk_0(float gamma, float scale, float saturation); 1836 private static native long createTonemapMantiuk_1(float gamma, float scale); 1837 private static native long createTonemapMantiuk_2(float gamma); 1838 private static native long createTonemapMantiuk_3(); 1839 1840 // C++: Ptr_AlignMTB cv::createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true) 1841 private static native long createAlignMTB_0(int max_bits, int exclude_range, boolean cut); 1842 private static native long createAlignMTB_1(int max_bits, int exclude_range); 1843 private static native long createAlignMTB_2(int max_bits); 1844 private static native long createAlignMTB_3(); 1845 1846 // C++: Ptr_CalibrateDebevec cv::createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false) 1847 private static native long createCalibrateDebevec_0(int samples, float lambda, boolean random); 1848 private static native long createCalibrateDebevec_1(int samples, float lambda); 1849 private static native long createCalibrateDebevec_2(int samples); 1850 private static native long createCalibrateDebevec_3(); 1851 1852 // C++: Ptr_CalibrateRobertson cv::createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f) 1853 private static native long createCalibrateRobertson_0(int max_iter, float threshold); 1854 private static native long createCalibrateRobertson_1(int max_iter); 1855 private static native long createCalibrateRobertson_2(); 1856 1857 // C++: Ptr_MergeDebevec cv::createMergeDebevec() 1858 private static native long createMergeDebevec_0(); 1859 1860 // C++: Ptr_MergeMertens cv::createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f) 1861 private static native long createMergeMertens_0(float contrast_weight, float saturation_weight, float exposure_weight); 1862 private static native long createMergeMertens_1(float contrast_weight, float saturation_weight); 1863 private static native long createMergeMertens_2(float contrast_weight); 1864 private static native long createMergeMertens_3(); 1865 1866 // C++: Ptr_MergeRobertson cv::createMergeRobertson() 1867 private static native long createMergeRobertson_0(); 1868 1869 // C++: void cv::decolor(Mat src, Mat& grayscale, Mat& color_boost) 1870 private static native void decolor_0(long src_nativeObj, long grayscale_nativeObj, long color_boost_nativeObj); 1871 1872 // C++: void cv::seamlessClone(Mat src, Mat dst, Mat mask, Point p, Mat& blend, int flags) 1873 private static native void seamlessClone_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj, double p_x, double p_y, long blend_nativeObj, int flags); 1874 1875 // C++: void cv::colorChange(Mat src, Mat mask, Mat& dst, float red_mul = 1.0f, float green_mul = 1.0f, float blue_mul = 1.0f) 1876 private static native void colorChange_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul, float green_mul, float blue_mul); 1877 private static native void colorChange_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul, float green_mul); 1878 private static native void colorChange_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float red_mul); 1879 private static native void colorChange_3(long src_nativeObj, long mask_nativeObj, long dst_nativeObj); 1880 1881 // C++: void cv::illuminationChange(Mat src, Mat mask, Mat& dst, float alpha = 0.2f, float beta = 0.4f) 1882 private static native void illuminationChange_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float alpha, float beta); 1883 private static native void illuminationChange_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float alpha); 1884 private static native void illuminationChange_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj); 1885 1886 // C++: void cv::textureFlattening(Mat src, Mat mask, Mat& dst, float low_threshold = 30, float high_threshold = 45, int kernel_size = 3) 1887 private static native void textureFlattening_0(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold, float high_threshold, int kernel_size); 1888 private static native void textureFlattening_1(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold, float high_threshold); 1889 private static native void textureFlattening_2(long src_nativeObj, long mask_nativeObj, long dst_nativeObj, float low_threshold); 1890 private static native void textureFlattening_3(long src_nativeObj, long mask_nativeObj, long dst_nativeObj); 1891 1892 // C++: void cv::edgePreservingFilter(Mat src, Mat& dst, int flags = 1, float sigma_s = 60, float sigma_r = 0.4f) 1893 private static native void edgePreservingFilter_0(long src_nativeObj, long dst_nativeObj, int flags, float sigma_s, float sigma_r); 1894 private static native void edgePreservingFilter_1(long src_nativeObj, long dst_nativeObj, int flags, float sigma_s); 1895 private static native void edgePreservingFilter_2(long src_nativeObj, long dst_nativeObj, int flags); 1896 private static native void edgePreservingFilter_3(long src_nativeObj, long dst_nativeObj); 1897 1898 // C++: void cv::detailEnhance(Mat src, Mat& dst, float sigma_s = 10, float sigma_r = 0.15f) 1899 private static native void detailEnhance_0(long src_nativeObj, long dst_nativeObj, float sigma_s, float sigma_r); 1900 private static native void detailEnhance_1(long src_nativeObj, long dst_nativeObj, float sigma_s); 1901 private static native void detailEnhance_2(long src_nativeObj, long dst_nativeObj); 1902 1903 // C++: void cv::pencilSketch(Mat src, Mat& dst1, Mat& dst2, float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f) 1904 private static native void pencilSketch_0(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s, float sigma_r, float shade_factor); 1905 private static native void pencilSketch_1(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s, float sigma_r); 1906 private static native void pencilSketch_2(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj, float sigma_s); 1907 private static native void pencilSketch_3(long src_nativeObj, long dst1_nativeObj, long dst2_nativeObj); 1908 1909 // C++: void cv::stylization(Mat src, Mat& dst, float sigma_s = 60, float sigma_r = 0.45f) 1910 private static native void stylization_0(long src_nativeObj, long dst_nativeObj, float sigma_s, float sigma_r); 1911 private static native void stylization_1(long src_nativeObj, long dst_nativeObj, float sigma_s); 1912 private static native void stylization_2(long src_nativeObj, long dst_nativeObj); 1913 1914}