Thanks to visit codestin.com
Credit goes to docs.rs

arrayfire/image/
mod.rs

1use super::core::{
2    af_array, dim_t, AfError, Array, BorderType, CannyThresholdType, ColorSpace, ConfidenceCCInput,
3    Connectivity, DeconvInput, DiffusionEq, EdgeComputable, FloatingPoint, FluxFn,
4    GrayRGBConvertible, HasAfEnum, ImageFilterType, ImageNativeType, InterpType, InverseDeconvAlgo,
5    IterativeDeconvAlgo, MomentType, MomentsComputable, RealFloating, RealNumber, YCCStd,
6    HANDLE_ERROR,
7};
8
9use libc::{c_char, c_double, c_float, c_int, c_uint};
10use std::ffi::CString;
11
12// unused functions from image.h header
13// TODO add later when requested
14// af_load_image_memory
15// af_save_image_memory
16// af_delete_image_memory
17
18extern "C" {
19    fn af_cast(out: *mut af_array, arr: af_array, aftype: c_uint) -> c_int;
20    fn af_gradient(dx: *mut af_array, dy: *mut af_array, arr: af_array) -> c_int;
21    fn af_load_image(out: *mut af_array, filename: *const c_char, iscolor: bool) -> c_int;
22    fn af_save_image(filename: *const c_char, input: af_array) -> c_int;
23    fn af_load_image_native(out: *mut af_array, filename: *const c_char) -> c_int;
24    fn af_save_image_native(filename: *const c_char, input: af_array) -> c_int;
25
26    fn af_resize(
27        out: *mut af_array,
28        input: af_array,
29        odim0: dim_t,
30        odim1: dim_t,
31        method: c_uint,
32    ) -> c_int;
33
34    fn af_transform(
35        out: *mut af_array,
36        input: af_array,
37        trans: af_array,
38        odim0: dim_t,
39        odim1: dim_t,
40        method: c_uint,
41        is_inverse: bool,
42    ) -> c_int;
43
44    fn af_rotate(
45        out: *mut af_array,
46        input: af_array,
47        theta: c_float,
48        crop: bool,
49        method: c_uint,
50    ) -> c_int;
51
52    fn af_translate(
53        out: *mut af_array,
54        input: af_array,
55        trans0: c_float,
56        trans1: c_float,
57        odim0: dim_t,
58        odim1: dim_t,
59        method: c_uint,
60    ) -> c_int;
61
62    fn af_scale(
63        out: *mut af_array,
64        input: af_array,
65        scale0: c_float,
66        scale1: c_float,
67        odim0: dim_t,
68        odim1: dim_t,
69        method: c_uint,
70    ) -> c_int;
71
72    fn af_skew(
73        out: *mut af_array,
74        input: af_array,
75        skew0: c_float,
76        skew1: c_float,
77        odim0: dim_t,
78        odim1: dim_t,
79        method: c_uint,
80        is_inverse: bool,
81    ) -> c_int;
82
83    fn af_histogram(
84        out: *mut af_array,
85        input: af_array,
86        nbins: c_uint,
87        minval: c_double,
88        maxval: c_double,
89    ) -> c_int;
90
91    fn af_dilate(out: *mut af_array, input: af_array, mask: af_array) -> c_int;
92    fn af_dilate3(out: *mut af_array, input: af_array, mask: af_array) -> c_int;
93    fn af_erode(out: *mut af_array, input: af_array, mask: af_array) -> c_int;
94    fn af_erode3(out: *mut af_array, input: af_array, mask: af_array) -> c_int;
95    fn af_regions(out: *mut af_array, input: af_array, conn: c_uint, aftype: c_uint) -> c_int;
96    fn af_sobel_operator(dx: *mut af_array, dy: *mut af_array, i: af_array, ksize: c_uint)
97        -> c_int;
98    fn af_rgb2gray(
99        out: *mut af_array,
100        input: af_array,
101        r: c_float,
102        g: c_float,
103        b: c_float,
104    ) -> c_int;
105    fn af_gray2rgb(
106        out: *mut af_array,
107        input: af_array,
108        r: c_float,
109        g: c_float,
110        b: c_float,
111    ) -> c_int;
112    fn af_hist_equal(out: *mut af_array, input: af_array, hist: af_array) -> c_int;
113    fn af_hsv2rgb(out: *mut af_array, input: af_array) -> c_int;
114    fn af_rgb2hsv(out: *mut af_array, input: af_array) -> c_int;
115
116    fn af_bilateral(
117        out: *mut af_array,
118        input: af_array,
119        sp_sig: c_float,
120        ch_sig: c_float,
121        iscolor: bool,
122    ) -> c_int;
123
124    fn af_mean_shift(
125        out: *mut af_array,
126        input: af_array,
127        sp_sig: c_float,
128        ch_sig: c_float,
129        iter: c_uint,
130        iscolor: bool,
131    ) -> c_int;
132
133    fn af_medfilt(
134        out: *mut af_array,
135        input: af_array,
136        wlen: dim_t,
137        wwid: dim_t,
138        etype: c_uint,
139    ) -> c_int;
140
141    fn af_medfilt1(out: *mut af_array, input: af_array, wlen: dim_t, etype: c_uint) -> c_int;
142
143    fn af_minfilt(
144        out: *mut af_array,
145        input: af_array,
146        wlen: dim_t,
147        wwid: dim_t,
148        etype: c_uint,
149    ) -> c_int;
150
151    fn af_maxfilt(
152        out: *mut af_array,
153        input: af_array,
154        wlen: dim_t,
155        wwid: dim_t,
156        etype: c_uint,
157    ) -> c_int;
158
159    fn af_gaussian_kernel(
160        out: *mut af_array,
161        rows: c_int,
162        cols: c_int,
163        sigma_r: c_double,
164        sigma_c: c_double,
165    ) -> c_int;
166
167    fn af_color_space(
168        out: *mut af_array,
169        input: af_array,
170        tospace: c_uint,
171        fromspace: c_uint,
172    ) -> c_int;
173
174    fn af_unwrap(
175        out: *mut af_array,
176        input: af_array,
177        wx: dim_t,
178        wy: dim_t,
179        sx: dim_t,
180        sy: dim_t,
181        px: dim_t,
182        py: dim_t,
183        is_column: bool,
184    ) -> c_int;
185
186    fn af_wrap(
187        out: *mut af_array,
188        input: af_array,
189        ox: dim_t,
190        oy: dim_t,
191        wx: dim_t,
192        wy: dim_t,
193        sx: dim_t,
194        sy: dim_t,
195        px: dim_t,
196        py: dim_t,
197        is_column: bool,
198    ) -> c_int;
199
200    fn af_sat(out: *mut af_array, input: af_array) -> c_int;
201
202    fn af_ycbcr2rgb(out: *mut af_array, input: af_array, stnd: c_uint) -> c_int;
203    fn af_rgb2ycbcr(out: *mut af_array, input: af_array, stnd: c_uint) -> c_int;
204    fn af_is_image_io_available(out: *mut bool) -> c_int;
205    fn af_transform_coordinates(
206        out: *mut af_array,
207        tf: af_array,
208        d0: c_float,
209        d1: c_float,
210    ) -> c_int;
211
212    fn af_moments(out: *mut af_array, input: af_array, moment: c_uint) -> c_int;
213    fn af_moments_all(out: *mut c_double, input: af_array, moment: c_uint) -> c_int;
214
215    fn af_canny(
216        out: *mut af_array,
217        input: af_array,
218        thres_type: c_int,
219        low: c_float,
220        high: c_float,
221        swindow: c_uint,
222        is_fast: bool,
223    ) -> c_int;
224    fn af_anisotropic_diffusion(
225        out: *mut af_array,
226        input: af_array,
227        dt: c_float,
228        K: c_float,
229        iters: c_uint,
230        fftype: c_uint,
231        diff_kind: c_uint,
232    ) -> c_int;
233    fn af_confidence_cc(
234        out: *mut af_array,
235        input: af_array,
236        seedx: af_array,
237        seedy: af_array,
238        radius: c_uint,
239        multiplier: c_uint,
240        iterations: c_int,
241        seg_val: c_double,
242    ) -> c_int;
243    fn af_iterative_deconv(
244        out: *mut af_array,
245        input: af_array,
246        ker: af_array,
247        iterations: c_uint,
248        rfactor: c_float,
249        algo: c_uint,
250    ) -> c_int;
251    fn af_inverse_deconv(
252        out: *mut af_array,
253        input: af_array,
254        ker: af_array,
255        gamma: c_float,
256        algo: c_uint,
257    ) -> c_int;
258}
259
260/// Calculate the gradients
261///
262/// The gradients along the first and second dimensions are calculated simultaneously.
263///
264/// # Parameters
265///
266/// - `input` is the input Array
267///
268/// # Return Values
269///
270/// A tuple of Arrays.
271///
272/// The first Array is `dx` which is the gradient along the 1st dimension.
273///
274/// The second Array is `dy` which is the gradient along the 2nd dimension.
275pub fn gradient<T>(input: &Array<T>) -> (Array<T>, Array<T>)
276where
277    T: HasAfEnum + FloatingPoint,
278{
279    unsafe {
280        let mut dx: af_array = std::ptr::null_mut();
281        let mut dy: af_array = std::ptr::null_mut();
282        let err_val = af_gradient(
283            &mut dx as *mut af_array,
284            &mut dy as *mut af_array,
285            input.get(),
286        );
287        HANDLE_ERROR(AfError::from(err_val));
288        (dx.into(), dy.into())
289    }
290}
291
292/// Load Image into Array
293///
294/// Only, Images with 8/16/32 bits per channel can be loaded using this function.
295///
296/// # Parameters
297///
298/// - `filename` is aboslute path of the image to be loaded.
299/// - `is_color` indicates if the image file at given path is color or gray scale.
300///
301/// # Return Arrays
302///
303/// An Array with pixel values loaded from the image
304#[allow(clippy::match_wild_err_arm)]
305pub fn load_image<T>(filename: String, is_color: bool) -> Array<T>
306where
307    T: HasAfEnum + RealNumber,
308{
309    let cstr_param = match CString::new(filename) {
310        Ok(cstr) => cstr,
311        Err(_) => panic!("CString creation from input filename failed"),
312    };
313    let trgt_type = T::get_af_dtype();
314    unsafe {
315        let mut temp: af_array = std::ptr::null_mut();
316        let err1 = af_load_image(&mut temp as *mut af_array, cstr_param.as_ptr(), is_color);
317        HANDLE_ERROR(AfError::from(err1));
318
319        let mut img: af_array = std::ptr::null_mut();
320        let err2 = af_cast(&mut img as *mut af_array, temp, trgt_type as c_uint);
321        HANDLE_ERROR(AfError::from(err2));
322
323        img.into()
324    }
325}
326
327/// Load Image into Array in it's native type
328///
329/// This load image function allows you to load images as U8, U16 or F32
330/// depending on the type of input image as shown by the table below.
331///
332///  Bits per Color (Gray/RGB/RGBA Bits Per Pixel) | Array Type  | Range
333/// -----------------------------------------------|-------------|---------------
334///   8 ( 8/24/32  BPP)                            | u8          | 0 - 255
335///  16 (16/48/64  BPP)                            | u16         | 0 - 65535
336///  32 (32/96/128 BPP)                            | f32         | 0 - 1
337///
338/// # Parameters
339///
340/// - `filename` is name of file to be loaded
341///
342/// # Return Arrays
343///
344/// An Array with pixel values loaded from the image
345#[allow(clippy::match_wild_err_arm)]
346pub fn load_image_native<T>(filename: String) -> Array<T>
347where
348    T: HasAfEnum + ImageNativeType,
349{
350    let cstr_param = match CString::new(filename) {
351        Ok(cstr) => cstr,
352        Err(_) => panic!("CString creation from input filename failed"),
353    };
354    let trgt_type = T::get_af_dtype();
355    unsafe {
356        let mut temp: af_array = std::ptr::null_mut();
357        let err1 = af_load_image_native(&mut temp as *mut af_array, cstr_param.as_ptr());
358        HANDLE_ERROR(AfError::from(err1));
359
360        let mut img: af_array = std::ptr::null_mut();
361        let err2 = af_cast(&mut img as *mut af_array, temp, trgt_type as c_uint);
362        HANDLE_ERROR(AfError::from(err2));
363
364        img.into()
365    }
366}
367
368/// Save an Array to an image file
369///
370/// # Parameters
371///
372/// - `filename` is the abolute path(includes filename) at which input Array is going to be saved
373/// - `input` is the Array to be stored into the image file
374#[allow(clippy::match_wild_err_arm)]
375pub fn save_image<T>(filename: String, input: &Array<T>)
376where
377    T: HasAfEnum + RealNumber,
378{
379    let cstr_param = match CString::new(filename) {
380        Ok(cstr) => cstr,
381        Err(_) => panic!("CString creation from input filename failed"),
382    };
383    unsafe {
384        let err_val = af_save_image(cstr_param.as_ptr(), input.get());
385        HANDLE_ERROR(AfError::from(err_val));
386    }
387}
388
389/// Save an Array without modifications to an image file
390///
391/// This function only accepts U8, U16, F32 arrays. These arrays are saved to images without any modifications. You must also note that note all image type support 16 or 32 bit images. The best options for 16 bit images are PNG, PPM and TIFF. The best option for 32 bit images is TIFF. These allow lossless storage.
392///
393/// The images stored have the following properties:
394///
395///  Array Type  | Bits per Color (Gray/RGB/RGBA Bits Per Pixel) | Range
396/// -------------|-----------------------------------------------|---------------
397///  U8          |  8 ( 8/24/32  BPP)                            | 0 - 255
398///  U16         | 16 (16/48/64  BPP)                            | 0 - 65535
399///  F32         | 32 (32/96/128 BPP)                            | 0 - 1
400///
401/// # Parameters
402///
403/// - `filename` is name of file to be saved
404/// - `input` is the Array to be saved. Should be U8 for saving 8-bit image, U16 for 16-bit image, and F32 for 32-bit image.
405#[allow(clippy::match_wild_err_arm)]
406pub fn save_image_native<T>(filename: String, input: &Array<T>)
407where
408    T: HasAfEnum + ImageNativeType,
409{
410    let cstr_param = match CString::new(filename) {
411        Ok(cstr) => cstr,
412        Err(_) => panic!("CString creation from input filename failed"),
413    };
414    unsafe {
415        let err_val = af_save_image_native(cstr_param.as_ptr(), input.get());
416        HANDLE_ERROR(AfError::from(err_val));
417    }
418}
419
420/// Resize an Image
421///
422/// Resizing an input image can be done using either NEAREST or BILINEAR interpolations.
423/// Nearest interpolation will pick the nearest value to the location, whereas bilinear
424/// interpolation will do a weighted interpolation for calculate the new size.
425///
426/// This function does not differentiate between images and data. As long as the array is defined
427/// and the output dimensions are not 0, it will resize any type or size of array.
428///
429/// # Parameters
430///
431/// - `input` is the image to be resized
432/// - `odim0` is the output height
433/// - `odim1` is the output width
434/// - `method` indicates which interpolation method to use for resizing. It uses enum
435/// [InterpType](./enum.InterpType.html) to identify the interpolation method.
436///
437/// # Return Values
438///
439/// Resized Array
440pub fn resize<T: HasAfEnum>(
441    input: &Array<T>,
442    odim0: i64,
443    odim1: i64,
444    method: InterpType,
445) -> Array<T> {
446    unsafe {
447        let mut temp: af_array = std::ptr::null_mut();
448        let err_val = af_resize(
449            &mut temp as *mut af_array,
450            input.get(),
451            odim0 as dim_t,
452            odim1 as dim_t,
453            method as c_uint,
454        );
455        HANDLE_ERROR(AfError::from(err_val));
456        temp.into()
457    }
458}
459
460/// Transform(Affine) an Image
461///
462/// The transform function uses an affine transform matrix to tranform an input image into a new
463/// one. The transform matrix tf is a 3x2 matrix of type float. The matrix operation is applied to each
464/// location (x, y) that is then transformed to (x', y') of the new array. Hence the transformation
465/// is an element-wise operation.
466///
467/// The operation is as below: tf = [r00 r10 r01 r11 t0 t1]
468///
469/// x' = x * r00 + y * r01 + t0; y' = x * r10 + y * r11 + t1;
470///
471/// Interpolation types of NEAREST, LINEAR, BILINEAR and CUBIC are allowed. Affine transforms can be used for various purposes. [translate](./fn.translate.html), [scale](./fn.scale.html) and [skew](./fn.skew.html) are
472/// specializations of the transform function.
473///
474/// This function can also handle batch operations.
475///
476/// # Parameters
477///
478/// - `input` is the image to be resized
479/// - `trans` is the transformation matrix to be used for image transformation
480/// - `odim0` is the output height
481/// - `odim1` is the output width
482/// - `method` indicates which interpolation method to use for resizing. It uses enum
483/// [InterpType](./enum.InterpType.html) to identify the interpolation method.
484/// - `is_inverse` indicates if to apply inverse/forward transform
485///
486/// # Return Values
487///
488/// Transformed Array
489pub fn transform<T: HasAfEnum>(
490    input: &Array<T>,
491    trans: &Array<f32>,
492    odim0: i64,
493    odim1: i64,
494    method: InterpType,
495    is_inverse: bool,
496) -> Array<T> {
497    unsafe {
498        let mut temp: af_array = std::ptr::null_mut();
499        let err_val = af_transform(
500            &mut temp as *mut af_array,
501            input.get(),
502            trans.get(),
503            odim0 as dim_t,
504            odim1 as dim_t,
505            method as c_uint,
506            is_inverse,
507        );
508        HANDLE_ERROR(AfError::from(err_val));
509        temp.into()
510    }
511}
512
513/// Rotate an Image
514///
515/// Rotating an input image can be done using either NEAREST or BILINEAR interpolations.
516/// Nearest interpolation will pick the nearest value to the location, whereas bilinear
517/// interpolation will do a weighted interpolation for calculate the new size.
518///
519/// This function does not differentiate between images and data. As long as the array is defined,
520/// it will rotate any type or size of array.
521///
522/// The crop option allows you to choose whether to resize the image. If crop is set to false, ie.
523/// the entire rotated image will be a part of the array and the new array size will be greater
524/// than or equal to the input array size. If crop is set to true, then the new array size is same
525/// as the input array size and the data that falls outside the boundaries of the array is
526/// discarded.
527///
528/// Any location of the rotated array that does not map to a location of the input array is set to
529/// 0.
530///
531/// # Parameters
532///
533/// - `input` is the input image
534/// - `theta` is the amount of angle (in radians) image should be rotated
535/// - `crop` indicates if the rotated image has to be cropped to original size
536/// - `method` indicates which interpolation method to use for rotating the image. It uses enum
537/// [InterpType](./enum.InterpType.html) to identify the interpolation method.
538///
539/// # Return Values
540///
541/// Rotated Array
542pub fn rotate<T: HasAfEnum>(
543    input: &Array<T>,
544    theta: f64,
545    crop: bool,
546    method: InterpType,
547) -> Array<T> {
548    unsafe {
549        let mut temp: af_array = std::ptr::null_mut();
550        let err_val = af_rotate(
551            &mut temp as *mut af_array,
552            input.get(),
553            theta as c_float,
554            crop,
555            method as c_uint,
556        );
557        HANDLE_ERROR(AfError::from(err_val));
558        temp.into()
559    }
560}
561
562/// Translate an Image
563///
564/// Translating an image is moving it along 1st and 2nd dimensions by trans0 and trans1. Positive
565/// values of these will move the data towards negative x and negative y whereas negative values of
566/// these will move the positive right and positive down. See the example below for more.
567///
568/// To specify an output dimension, use the odim0 and odim1 for dim0 and dim1 respectively. The
569/// size of 2rd and 3rd dimension is same as input. If odim0 and odim1 and not defined, then the
570/// output dimensions are same as the input dimensions and the data out of bounds will be
571/// discarded.
572///
573/// All new values that do not map to a location of the input array are set to 0.
574///
575/// Translate is a special case of the [transform](./fn.transform.html) function.
576///
577/// # Parameters
578///
579/// - `input` is input image
580/// - `trans0` is amount by which the first dimension is translated
581/// - `trans1` is amount by which the second dimension is translated
582/// - `odim0` is the first output dimension
583/// - `odim1` is the second output dimension
584/// - `method` is the interpolation type (Nearest by default)
585///
586/// # Return Values
587///
588/// Translated Image(Array).
589pub fn translate<T: HasAfEnum>(
590    input: &Array<T>,
591    trans0: f32,
592    trans1: f32,
593    odim0: i64,
594    odim1: i64,
595    method: InterpType,
596) -> Array<T> {
597    unsafe {
598        let mut temp: af_array = std::ptr::null_mut();
599        let err_val = af_translate(
600            &mut temp as *mut af_array,
601            input.get(),
602            trans0,
603            trans1,
604            odim0 as dim_t,
605            odim1 as dim_t,
606            method as c_uint,
607        );
608        HANDLE_ERROR(AfError::from(err_val));
609        temp.into()
610    }
611}
612
613/// Scale an Image
614///
615/// Scale is the same functionality as [resize](./fn.resize.html) except that the scale function uses the transform kernels. The other difference is that scale does not set boundary values to be the boundary of the input array. Instead these are set to 0.
616///
617/// Scale is a special case of the [transform](./fn.transform.html) function.
618///
619/// # Parameters
620///
621/// - `input` is input image
622/// - `trans0` is amount by which the first dimension is translated
623/// - `trans1` is amount by which the second dimension is translated
624/// - `odim0` is the first output dimension
625/// - `odim1` is the second output dimension
626/// - `method` is the interpolation type (Nearest by default)
627///
628/// # Return Values
629///
630/// Translated Image(Array).
631pub fn scale<T: HasAfEnum>(
632    input: &Array<T>,
633    scale0: f32,
634    scale1: f32,
635    odim0: i64,
636    odim1: i64,
637    method: InterpType,
638) -> Array<T> {
639    unsafe {
640        let mut temp: af_array = std::ptr::null_mut();
641        let err_val = af_scale(
642            &mut temp as *mut af_array,
643            input.get(),
644            scale0,
645            scale1,
646            odim0 as dim_t,
647            odim1 as dim_t,
648            method as c_uint,
649        );
650        HANDLE_ERROR(AfError::from(err_val));
651        temp.into()
652    }
653}
654
655/// Skew an image
656///
657/// Skew function skews the input array along dim0 by skew0 and along dim1 by skew1. The skew
658/// areguments are in radians. Skewing the data means the data remains parallel along 1 dimensions
659/// but the other dimensions gets moved along based on the angle. If both skew0 and skew1 are
660/// specified, then the data will be skewed along both directions. Explicit output dimensions
661/// can be specified using odim0 and odim1. All new values that do not map to a location of the input array are set to 0.
662///
663/// Skew is a special case of the [transform](./fn.transform.html) function.
664///
665/// # Parameters
666///
667/// - `input` is the image to be skewed
668/// - `skew0` is the factor by which data is skewed along first dimension
669/// - `skew1` is the factor by which data is skewed along second dimension
670/// - `odim0` is the output length along first dimension
671/// - `odim1` is the output length along second dimension
672/// - `method` indicates which interpolation method to use for rotating the image. It uses enum
673/// [InterpType](./enum.InterpType.html) to identify the interpolation method.
674/// - `is_inverse` indicates if to apply inverse/forward transform
675///
676/// # Return Values
677///
678/// Skewed Image
679pub fn skew<T: HasAfEnum>(
680    input: &Array<T>,
681    skew0: f32,
682    skew1: f32,
683    odim0: i64,
684    odim1: i64,
685    method: InterpType,
686    is_inverse: bool,
687) -> Array<T> {
688    unsafe {
689        let mut temp: af_array = std::ptr::null_mut();
690        let err_val = af_skew(
691            &mut temp as *mut af_array,
692            input.get(),
693            skew0,
694            skew1,
695            odim0 as dim_t,
696            odim1 as dim_t,
697            method as c_uint,
698            is_inverse,
699        );
700        HANDLE_ERROR(AfError::from(err_val));
701        temp.into()
702    }
703}
704
705/// Compute Histogram of an Array
706///
707/// A histogram is a representation of the distribution of given data. This representation is
708/// essentially a graph consisting of the data range or domain on one axis and frequency of
709/// occurence on the other axis. All the data in the domain is counted in the appropriate bin. The
710/// total number of elements belonging to each bin is known as the bin's frequency.
711///
712/// The regular histogram function creates bins of equal size between the minimum and maximum of
713/// the input data (min and max are calculated internally). The histogram min-max function takes
714/// input parameters minimum and maximum, and divides the bins into equal sizes within the range
715/// specified by min and max parameters. All values less than min in the data range are placed in
716/// the first (min) bin and all values greater than max will be placed in the last (max) bin.
717///
718/// # Parameters
719///
720/// - `input` is the Array whose histogram has to be computed
721/// - `nbins` is the number bins the input data has to be categorized into.
722/// - `minval` is the minimum value of bin ordering
723/// - `maxval` is the maximum value of bin ordering
724///
725/// # Return Values
726///
727/// Histogram of input Array
728pub fn histogram<T>(input: &Array<T>, nbins: u32, minval: f64, maxval: f64) -> Array<u32>
729where
730    T: HasAfEnum + RealNumber,
731{
732    unsafe {
733        let mut temp: af_array = std::ptr::null_mut();
734        let err_val = af_histogram(
735            &mut temp as *mut af_array,
736            input.get(),
737            nbins,
738            minval,
739            maxval,
740        );
741        HANDLE_ERROR(AfError::from(err_val));
742        temp.into()
743    }
744}
745
746/// Dilate an Image
747///
748/// The dilation function takes two pieces of data as inputs. The first is the input image to be
749/// morphed, and the second is the mask indicating the neighborhood around each pixel to match.
750///
751/// In dilation, for each pixel, the mask is centered at the pixel. If the center pixel of the mask
752/// matches the corresponding pixel on the image, then the mask is accepted. If the center pixels
753/// do not matches, then the mask is ignored and no changes are made.
754///
755/// For further reference, see [here](https://en.wikipedia.org/wiki/Dilation_(morphology)).
756///
757/// # Parameters
758///
759/// - `input` is the input image
760/// - `mask` is the morphological operation mask
761///
762/// # Return Values
763///
764/// Dilated Image(Array)
765pub fn dilate<T>(input: &Array<T>, mask: &Array<T>) -> Array<T>
766where
767    T: HasAfEnum + ImageFilterType,
768{
769    unsafe {
770        let mut temp: af_array = std::ptr::null_mut();
771        let err_val = af_dilate(&mut temp as *mut af_array, input.get(), mask.get());
772        HANDLE_ERROR(AfError::from(err_val));
773        temp.into()
774    }
775}
776
777/// Erode an Image
778///
779/// The erosion function is a morphological transformation on an image that requires two inputs.
780/// The first is the image to be morphed, and the second is the mask indicating neighborhood that
781/// must be white in order to preserve each pixel.
782///
783/// In erode, for each pixel, the mask is centered at the pixel. If each pixel of the mask matches
784/// the corresponding pixel on the image, then no change is made. If there is at least one
785/// mismatch, then pixels are changed to the background color (black).
786///
787/// For further reference, see [here](https://en.wikipedia.org/wiki/Erosion_(morphology)).
788///
789/// # Parameters
790///
791/// - `input` is the input image
792/// - `mask` is the morphological operation mask
793///
794/// # Return Values
795///
796/// Eroded Image(Array)
797pub fn erode<T>(input: &Array<T>, mask: &Array<T>) -> Array<T>
798where
799    T: HasAfEnum + ImageFilterType,
800{
801    unsafe {
802        let mut temp: af_array = std::ptr::null_mut();
803        let err_val = af_erode(&mut temp as *mut af_array, input.get(), mask.get());
804        HANDLE_ERROR(AfError::from(err_val));
805        temp.into()
806    }
807}
808
809/// Dilate a Volume
810///
811/// Dilation for a volume is similar to the way dilation works on an image. Only difference is that
812/// the masking operation is performed on a volume instead of a rectangular region.
813///
814/// # Parameters
815///
816/// - `input` is the input volume
817/// - `mask` is the morphological operation mask
818///
819/// # Return Values
820///
821/// Dilated Volume(Array)
822pub fn dilate3<T>(input: &Array<T>, mask: &Array<T>) -> Array<T>
823where
824    T: HasAfEnum + ImageFilterType,
825{
826    unsafe {
827        let mut temp: af_array = std::ptr::null_mut();
828        let err_val = af_dilate3(&mut temp as *mut af_array, input.get(), mask.get());
829        HANDLE_ERROR(AfError::from(err_val));
830        temp.into()
831    }
832}
833
834/// Erode a Volume
835///
836/// Erosion for a volume is similar to the way erosion works on an image. Only difference is that
837/// the masking operation is performed on a volume instead of a rectangular region.
838///
839/// # Parameters
840///
841/// - `input` is the input volume
842/// - `mask` is the morphological operation mask
843///
844/// # Return Values
845///
846/// Eroded Volume(Array)
847pub fn erode3<T>(input: &Array<T>, mask: &Array<T>) -> Array<T>
848where
849    T: HasAfEnum + ImageFilterType,
850{
851    unsafe {
852        let mut temp: af_array = std::ptr::null_mut();
853        let err_val = af_erode3(&mut temp as *mut af_array, input.get(), mask.get());
854        HANDLE_ERROR(AfError::from(err_val));
855        temp.into()
856    }
857}
858
859/// Bilateral Filter.
860///
861/// A bilateral filter is a edge-preserving filter that reduces noise in an image. The intensity of
862/// each pixel is replaced by a weighted average of the intensities of nearby pixels. The weights
863/// follow a Gaussian distribution and depend on the distance as well as the color distance.
864///
865/// The bilateral filter requires the size of the filter (in pixels) and the upper bound on color
866/// values, N, where pixel values range from 0–N inclusively.
867///
868/// # Parameters
869///
870/// - `input` array is the input image
871/// - `spatial_sigma` is the spatial variance parameter that decides the filter window
872/// - `chromatic_sigma` is the chromatic variance parameter
873/// - `iscolor` indicates if the input is color image or grayscale
874///
875/// # Return Values
876///
877/// Filtered Image - Array
878pub fn bilateral<T>(
879    input: &Array<T>,
880    spatial_sigma: f32,
881    chromatic_sigma: f32,
882    iscolor: bool,
883) -> Array<T::AbsOutType>
884where
885    T: HasAfEnum + ImageFilterType,
886    T::AbsOutType: HasAfEnum,
887{
888    unsafe {
889        let mut temp: af_array = std::ptr::null_mut();
890        let err_val = af_bilateral(
891            &mut temp as *mut af_array,
892            input.get(),
893            spatial_sigma,
894            chromatic_sigma,
895            iscolor,
896        );
897        HANDLE_ERROR(AfError::from(err_val));
898        temp.into()
899    }
900}
901
902/// Meanshift Filter.
903///
904/// A meanshift filter is an edge-preserving smoothing filter commonly used in object tracking and
905/// image segmentation.
906///
907/// This filter replaces each pixel in the image with the mean of the values within a given given
908/// color and spatial radius. The meanshift filter is an iterative algorithm that continues until a
909/// maxium number of iterations is met or until the value of the means no longer changes.
910///
911/// # Parameters
912///
913/// - `input` array is the input image
914/// - `spatial_sigma` is the spatial variance parameter that decides the filter window
915/// - `chromatic_sigma` is the chromatic variance parameter
916/// - `iter` is the number of iterations filter operation is performed
917/// - `iscolor` indicates if the input is color image or grayscale
918///
919/// # Return Values
920///
921/// Filtered Image - Array
922pub fn mean_shift<T>(
923    input: &Array<T>,
924    spatial_sigma: f32,
925    chromatic_sigma: f32,
926    iter: u32,
927    iscolor: bool,
928) -> Array<T>
929where
930    T: HasAfEnum + RealNumber,
931{
932    unsafe {
933        let mut temp: af_array = std::ptr::null_mut();
934        let err_val = af_mean_shift(
935            &mut temp as *mut af_array,
936            input.get(),
937            spatial_sigma,
938            chromatic_sigma,
939            iter,
940            iscolor,
941        );
942        HANDLE_ERROR(AfError::from(err_val));
943        temp.into()
944    }
945}
946
947macro_rules! filt_func_def {
948    ($doc_str: expr, $fn_name: ident, $ffi_name: ident) => {
949        #[doc=$doc_str]
950        ///
951        ///# Parameters
952        ///
953        /// - `input` is the input image(Array)
954        /// - `wlen` is the horizontal length of the filter
955        /// - `hlen` is the vertical length of the filter
956        /// - `etype` is enum of type [BorderType](./enum.BorderType.html)
957        ///
958        ///# Return Values
959        ///
960        /// An Array with filtered image data.
961        pub fn $fn_name<T>(input: &Array<T>, wlen: u64, wwid: u64, etype: BorderType) -> Array<T>
962        where
963            T: HasAfEnum + ImageFilterType,
964        {
965            unsafe {
966        let mut temp: af_array = std::ptr::null_mut();
967                let err_val = $ffi_name(
968                    &mut temp as *mut af_array,
969                    input.get(),
970                    wlen as dim_t,
971                    wwid as dim_t,
972                    etype as c_uint,
973                );
974                HANDLE_ERROR(AfError::from(err_val));
975                temp.into()
976            }
977        }
978    };
979}
980
981filt_func_def!("Median filter", medfilt, af_medfilt);
982filt_func_def!(
983    "Box filter with minimum as box operation",
984    minfilt,
985    af_minfilt
986);
987filt_func_def!(
988    "Box filter with maximum as box operation",
989    maxfilt,
990    af_maxfilt
991);
992
993/// Creates a Gaussian Kernel.
994///
995/// This function creates a kernel of a specified size that contains a Gaussian distribution. This
996/// distribution is normalized to one. This is most commonly used when performing a Gaussian blur
997/// on an image. The function takes two sets of arguments, the size of the kernel (width and height
998/// in pixels) and the sigma parameters (for row and column) which effect the distribution of the
999/// weights in the y and x directions, respectively.
1000///
1001/// Changing sigma causes the weights in each direction to vary. Sigma is calculated internally as
1002/// (0.25 * rows + 0.75) for rows and similarly for columns.
1003///
1004/// # Parameters
1005///
1006/// - `rows` is number of rows of kernel
1007/// - `cols` is number of cols of kernel
1008/// - `sigma_r` is standard deviation of rows
1009/// - `sigma_c` is standard deviation of cols
1010///
1011/// # Return Values
1012///
1013/// An Array with gaussian kernel values
1014pub fn gaussian_kernel(rows: i32, cols: i32, sigma_r: f64, sigma_c: f64) -> Array<f32> {
1015    unsafe {
1016        let mut temp: af_array = std::ptr::null_mut();
1017        let err_val = af_gaussian_kernel(&mut temp as *mut af_array, rows, cols, sigma_r, sigma_c);
1018        HANDLE_ERROR(AfError::from(err_val));
1019        temp.into()
1020    }
1021}
1022
1023/// Color space conversion
1024///
1025/// Following are the supported conversions
1026///
1027/// - RGB => GRAY
1028/// - GRAY => RGB
1029/// - RGB => HSV
1030/// - HSV => RGB
1031/// - YCbCr => RGB
1032/// - RGB => YCbCr
1033///
1034/// RGB (Red, Green, Blue) is the most common format used in computer imaging. RGB stores
1035/// individual values for red, green and blue, and hence the 3 values per pixel. A combination of
1036/// these three values produces the gamut of unique colors.
1037///
1038/// HSV (Hue, Saturation, Value), also known as HSB (hue, saturation, brightness), is often used by
1039/// artists because it is more natural to think about a color in terms of hue and saturation than
1040/// in terms of additive or subtractive color components (as in RGB). HSV is a transformation of
1041/// RGB colorspace; its components and colorimetry are relative to the RGB colorspace from which it
1042/// was derived. Like RGB, HSV also uses 3 values per pixel.
1043///
1044/// GRAY is a single channel color space where pixel value ranges from 0 to 1. Zero represents
1045/// black, one represent white and any value between zero & one is a gray value
1046///
1047/// # Parameters
1048///
1049/// - `input` is the input image
1050/// - `tospace` is the target color space. Takes values of [ColorSpace](./enum.ColorSpace.html)
1051/// - `fromspace` is the source image color space. Takes values of
1052/// [ColorSpace](./enum.ColorSpace.html)
1053///
1054/// # Return Values
1055///
1056/// An Array with input image values in target color space
1057pub fn color_space<T>(input: &Array<T>, tospace: ColorSpace, fromspace: ColorSpace) -> Array<T>
1058where
1059    T: HasAfEnum + RealNumber,
1060{
1061    unsafe {
1062        let mut temp: af_array = std::ptr::null_mut();
1063        let err_val = af_color_space(
1064            &mut temp as *mut af_array,
1065            input.get(),
1066            tospace as c_uint,
1067            fromspace as c_uint,
1068        );
1069        HANDLE_ERROR(AfError::from(err_val));
1070        temp.into()
1071    }
1072}
1073
1074/// Find blobs in given image.
1075///
1076/// Given a binary image (with zero representing background pixels), regions computes a floating
1077/// point image where each connected component is labeled from 1 to N, the total number of
1078/// components in the image.
1079///
1080/// A component is defined as one or more nonzero pixels that are connected by the specified
1081/// connectivity (either [`Connectivity::FOUR`](./enum.Connectivity.html) or [`Connectivity::EIGHT`](./enum.Connectivity.html)) in two dimensions.
1082///
1083/// # Parameters
1084///
1085/// - `input` is the input image
1086/// - `conn` can take one of the values of [Connectivity](./enum.Connectivity.html)
1087///
1088/// # Return Values
1089///
1090/// Array with labels indicating different regions
1091pub fn regions<OutType>(input: &Array<bool>, conn: Connectivity) -> Array<OutType>
1092where
1093    OutType: HasAfEnum + RealNumber,
1094{
1095    let otype = OutType::get_af_dtype();
1096    unsafe {
1097        let mut temp: af_array = std::ptr::null_mut();
1098        let err_val = af_regions(
1099            &mut temp as *mut af_array,
1100            input.get(),
1101            conn as c_uint,
1102            otype as c_uint,
1103        );
1104        HANDLE_ERROR(AfError::from(err_val));
1105        temp.into()
1106    }
1107}
1108
1109/// Sobel Operator
1110///
1111/// Sobel operators perform a 2-D spatial gradient measurement on an image to emphasize the regions
1112/// of high spatial frequency, namely edges. A more in depth discussion on it can be found [here](https://en.wikipedia.org/wiki/Sobel_operator).
1113///
1114/// # Parameters
1115///
1116/// - `input` is the input image
1117/// - `ker_size` is the kernel size of sobel operator
1118///
1119/// # Return Values
1120///
1121/// A tuple of Arrays.
1122///
1123/// The first Array has derivatives along horizontal direction
1124///
1125/// The second Array has derivatives along vertical direction
1126pub fn sobel<T>(input: &Array<T>, ker_size: u32) -> (Array<T::SobelOutType>, Array<T::SobelOutType>)
1127where
1128    T: HasAfEnum + ImageFilterType,
1129    T::SobelOutType: HasAfEnum,
1130{
1131    unsafe {
1132        let mut dx: af_array = std::ptr::null_mut();
1133        let mut dy: af_array = std::ptr::null_mut();
1134        let err_val = af_sobel_operator(
1135            &mut dx as *mut af_array,
1136            &mut dy as *mut af_array,
1137            input.get(),
1138            ker_size,
1139        );
1140        HANDLE_ERROR(AfError::from(err_val));
1141        (dx.into(), dy.into())
1142    }
1143}
1144
1145/// Histogram Equalization
1146///
1147/// # Parameters
1148///
1149/// - `input` is the input Array to be equalized
1150/// - `hist` is the Array to be used for equalizing input
1151///
1152/// # Return Values
1153/// Equalized Array
1154pub fn hist_equal<T>(input: &Array<T>, hist: &Array<u32>) -> Array<T>
1155where
1156    T: HasAfEnum + RealNumber,
1157{
1158    unsafe {
1159        let mut temp: af_array = std::ptr::null_mut();
1160        let err_val = af_hist_equal(&mut temp as *mut af_array, input.get(), hist.get());
1161        HANDLE_ERROR(AfError::from(err_val));
1162        temp.into()
1163    }
1164}
1165
1166macro_rules! grayrgb_func_def {
1167    ($doc_str: expr, $fn_name: ident, $ffi_name: ident) => {
1168        #[doc=$doc_str]
1169        ///
1170        ///# Parameters
1171        ///
1172        /// - `r` is fraction of red channel to appear in output
1173        /// - `g` is fraction of green channel to appear in output
1174        /// - `b` is fraction of blue channel to appear in output
1175        ///
1176        ///#Return Values
1177        ///
1178        ///An Array with image data in target color space
1179        pub fn $fn_name<T>(input: &Array<T>, r: f32, g: f32, b: f32) -> Array<T>
1180        where
1181            T: HasAfEnum + GrayRGBConvertible,
1182        {
1183            unsafe {
1184        let mut temp: af_array = std::ptr::null_mut();
1185                let err_val = $ffi_name(&mut temp as *mut af_array, input.get(), r, g, b);
1186                HANDLE_ERROR(AfError::from(err_val));
1187                temp.into()
1188            }
1189        }
1190    };
1191}
1192
1193grayrgb_func_def!("Color(RGB) to Grayscale conversion", rgb2gray, af_rgb2gray);
1194grayrgb_func_def!("Grayscale to Color(RGB) conversion", gray2rgb, af_gray2rgb);
1195
1196macro_rules! hsvrgb_func_def {
1197    ($doc_str: expr, $fn_name: ident, $ffi_name: ident) => {
1198        #[doc=$doc_str]
1199        pub fn $fn_name<T>(input: &Array<T>) -> Array<T>
1200        where
1201            T: HasAfEnum + RealFloating,
1202        {
1203            unsafe {
1204        let mut temp: af_array = std::ptr::null_mut();
1205                let err_val = $ffi_name(&mut temp as *mut af_array, input.get());
1206                HANDLE_ERROR(AfError::from(err_val));
1207                temp.into()
1208            }
1209        }
1210    };
1211}
1212
1213hsvrgb_func_def!("HSV to RGB color space conversion", hsv2rgb, af_hsv2rgb);
1214hsvrgb_func_def!("RGB to HSV color space conversion", rgb2hsv, af_rgb2hsv);
1215
1216/// Generate an array with image windows as columns
1217///
1218/// unwrap takes in an input image along with the window sizes wx and wy, strides sx and sy, and
1219/// padding px and py. This function then generates a matrix where each windows is an independent
1220/// column.
1221///
1222/// The number of columns (rows if is_column is true) in the output array are govenered by the
1223/// number of windows that can be fit along x and y directions. Padding is applied along all 4
1224/// sides of the matrix with px defining the height of the padding along dim 0 and py defining the
1225/// width of the padding along dim 1.
1226///
1227/// The first column window is always at the top left corner of the input including padding. If a
1228/// window cannot fit before the end of the matrix + padding, it is skipped from the generated
1229/// matrix.
1230///
1231/// Padding can take a maximum value of window - 1 repectively for x and y.
1232///
1233/// For multiple channels (3rd and 4th dimension), the generated matrix contains the same number of
1234/// channels as the input matrix. Each channel of the output matrix corresponds to the same channel
1235/// of the input.
1236///
1237/// # Parameters
1238///
1239/// - `input` is the input image
1240/// - `wx` is the block window size along 0th-dimension between \[1, input.dims\[0\] + px\]
1241/// - `wy` is the block window size along 1st-dimension between \[1, input.dims\[1\] + py\]
1242/// - `sx` is the stride along 0th-dimension
1243/// - `sy` is the stride along 1st-dimension
1244/// - `px` is the padding along 0th-dimension between [0, wx). Padding is applied both before and after.
1245/// - `py` is the padding along 1st-dimension between [0, wy). Padding is applied both before and after.
1246/// - `is_column` specifies the layout for the unwrapped patch. If is_column is false, the unrapped patch is laid out as a row.
1247///
1248/// # Return Values
1249///
1250/// An Array with image windows as columns
1251///
1252/// # Examples
1253///
1254/// ```text
1255/// A [5 5 1 1]
1256/// 10 15 20 25 30
1257/// 11 16 21 26 31
1258/// 12 17 22 27 32
1259/// 13 18 23 28 33
1260/// 14 19 24 29 34
1261///
1262/// // Window 3x3, strides 1x1, padding 0x0
1263/// unwrap(A, 3, 3, 1, 1, 0, 0, False) [9 9 1 1]
1264/// 10 11 12 15 16 17 20 21 22
1265/// 11 12 13 16 17 18 21 22 23
1266/// 12 13 14 17 18 19 22 23 24
1267/// 15 16 17 20 21 22 25 26 27
1268/// 16 17 18 21 22 23 26 27 28
1269/// 17 18 19 22 23 24 27 28 29
1270/// 20 21 22 25 26 27 30 31 32
1271/// 21 22 23 26 27 28 31 32 33
1272/// 22 23 24 27 28 29 32 33 34
1273///
1274/// // Window 3x3, strides 1x1, padding 1x1
1275/// unwrap(A, 3, 3, 1, 1, 1, 1, False) [9 25 1 1]
1276///  0  0  0  0  0  0 10 11 12 13  0 15 16 17 18  0 20 21 22 23  0 25 26 27 28
1277///  0  0  0  0  0 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
1278///  0  0  0  0  0 11 12 13 14  0 16 17 18 19  0 21 22 23 24  0 26 27 28 29  0
1279///  0 10 11 12 13  0 15 16 17 18  0 20 21 22 23  0 25 26 27 28  0 30 31 32 33
1280/// 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
1281/// 11 12 13 14  0 16 17 18 19  0 21 22 23 24  0 26 27 28 29  0 31 32 33 34  0
1282///  0 15 16 17 18  0 20 21 22 23  0 25 26 27 28  0 30 31 32 33  0  0  0  0  0
1283/// 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34  0  0  0  0  0
1284/// 16 17 18 19  0 21 22 23 24  0 26 27 28 29  0 31 32 33 34  0  0  0  0  0  0
1285/// ```
1286#[allow(clippy::too_many_arguments)]
1287pub fn unwrap<T: HasAfEnum>(
1288    input: &Array<T>,
1289    wx: i64,
1290    wy: i64,
1291    sx: i64,
1292    sy: i64,
1293    px: i64,
1294    py: i64,
1295    is_column: bool,
1296) -> Array<T> {
1297    unsafe {
1298        let mut temp: af_array = std::ptr::null_mut();
1299        let err_val = af_unwrap(
1300            &mut temp as *mut af_array,
1301            input.get(),
1302            wx,
1303            wy,
1304            sx,
1305            sy,
1306            px,
1307            py,
1308            is_column,
1309        );
1310        HANDLE_ERROR(AfError::from(err_val));
1311        temp.into()
1312    }
1313}
1314
1315/// Converts unwrapped image to an image
1316///
1317/// Wrap takes an unwrapped image (see unwrap()) and converts it back to an image.
1318///
1319/// The inputs to this function should be the same as the inputs used to generate the unwrapped
1320/// image.
1321///
1322/// # Parameters
1323///
1324/// - `input` is the output of unwrap function call
1325/// - `ox` is the 0th-dimension of output image
1326/// - `oy` is the 1st-dimension of output image
1327/// - `wx` is the block window size along 0th-dimension between
1328/// - `wy` is the block window size along 1st-dimension between
1329/// - `sx` is the stride along 0th-dimension
1330/// - `sy` is the stride along 1st-dimension
1331/// - `px` is the padding used along 0th-dimension between [0, wx).
1332/// - `py` is the padding used along 1st-dimension between [0, wy).
1333/// - `is_column` specifies the layout for the unwrapped patch. If is_column is false, the rows are treated as the patches
1334///
1335/// # Return Values
1336///
1337/// Image(Array) created from unwrapped Image(Array)
1338#[allow(clippy::too_many_arguments)]
1339pub fn wrap<T: HasAfEnum>(
1340    input: &Array<T>,
1341    ox: i64,
1342    oy: i64,
1343    wx: i64,
1344    wy: i64,
1345    sx: i64,
1346    sy: i64,
1347    px: i64,
1348    py: i64,
1349    is_column: bool,
1350) -> Array<T> {
1351    unsafe {
1352        let mut temp: af_array = std::ptr::null_mut();
1353        let err_val = af_wrap(
1354            &mut temp as *mut af_array,
1355            input.get(),
1356            ox,
1357            oy,
1358            wx,
1359            wy,
1360            sx,
1361            sy,
1362            px,
1363            py,
1364            is_column,
1365        );
1366        HANDLE_ERROR(AfError::from(err_val));
1367        temp.into()
1368    }
1369}
1370
1371/// Summed area table of an Image
1372///
1373/// # Parameters
1374///
1375/// - `input` is the input image
1376///
1377/// # Return Values
1378///
1379/// Summed area table (a.k.a Integral Image) of the input image.
1380pub fn sat<T>(input: &Array<T>) -> Array<T::AggregateOutType>
1381where
1382    T: HasAfEnum + RealNumber,
1383    T::AggregateOutType: HasAfEnum,
1384{
1385    unsafe {
1386        let mut temp: af_array = std::ptr::null_mut();
1387        let err_val = af_sat(&mut temp as *mut af_array, input.get());
1388        HANDLE_ERROR(AfError::from(err_val));
1389        temp.into()
1390    }
1391}
1392
1393/// RGB to YCbCr colorspace converter.
1394///
1395/// RGB (Red, Green, Blue) is the most common format used in computer imaging. RGB stores
1396/// individual values for red, green and blue, and hence the 3 values per pixel. A combination of
1397/// these three values produces the gamut of unique colors.
1398///
1399/// YCbCr is a family of color spaces used as a part of the color image pipeline in video and
1400/// digital photography systems where Y is luma component and Cb & Cr are the blue-difference and
1401/// red-difference chroma components.
1402///
1403/// Input array to this function should be of real data in the range [0,1].
1404///
1405/// # Parameters
1406///
1407/// - `input` is the input image in RGB color space
1408/// - `standard` is the target color space - [YCbCr standard](./enum.YCCStd.html)
1409///
1410/// # Return Values
1411///
1412/// Image(Array) in YCbCr color space
1413pub fn rgb2ycbcr<T>(input: &Array<T>, standard: YCCStd) -> Array<T>
1414where
1415    T: HasAfEnum + RealFloating,
1416{
1417    unsafe {
1418        let mut temp: af_array = std::ptr::null_mut();
1419        let err_val = af_rgb2ycbcr(&mut temp as *mut af_array, input.get(), standard as c_uint);
1420        HANDLE_ERROR(AfError::from(err_val));
1421        temp.into()
1422    }
1423}
1424
1425/// YCbCr to RGB colorspace converter.
1426///
1427/// YCbCr is a family of color spaces used as a part of the color image pipeline in video and
1428/// digital photography systems where Y is luma component and Cb & Cr are the blue-difference and
1429/// red-difference chroma components.
1430///
1431/// RGB (Red, Green, Blue) is the most common format used in computer imaging. RGB stores
1432/// individual values for red, green and blue, and hence the 3 values per pixel. A combination of
1433/// these three values produces the gamut of unique colors.
1434///
1435/// Input array to this function should be of real data with the following range in their
1436/// respective channels.
1437///
1438/// - Y  −> [16,219]
1439/// - Cb −> [16,240]
1440/// - Cr −> [16,240]
1441///
1442/// # Parameters
1443///
1444/// - `input` is the input image in YCbCr color space
1445/// - `standard` is the [YCbCr standard](./enum.YCCStd.html) in which input image color space is
1446/// present.
1447///
1448/// # Return Values
1449///
1450/// Image(Array) in RGB color space
1451pub fn ycbcr2rgb<T>(input: &Array<T>, standard: YCCStd) -> Array<T>
1452where
1453    T: HasAfEnum + RealFloating,
1454{
1455    unsafe {
1456        let mut temp: af_array = std::ptr::null_mut();
1457        let err_val = af_ycbcr2rgb(&mut temp as *mut af_array, input.get(), standard as c_uint);
1458        HANDLE_ERROR(AfError::from(err_val));
1459        temp.into()
1460    }
1461}
1462
1463/// Function to check if Image I/O is available
1464///
1465/// # Parameters
1466///
1467/// None
1468///
1469/// # Return Values
1470///
1471/// Return a boolean indicating if ArrayFire was compiled with Image I/O support
1472pub fn is_imageio_available() -> bool {
1473    let mut temp: bool = false;
1474    unsafe {
1475        af_is_image_io_available(&mut temp as *mut bool);
1476    }
1477    temp
1478}
1479
1480/// Transform input coordinates
1481///
1482/// The transform function uses a perspective transform matrix to transform input coordinates
1483/// (given as two dimensions) into a coordinates matrix.
1484///
1485/// The output is a 4x2 matrix, indicating the coordinates of the 4 bidimensional transformed
1486/// points.
1487///
1488/// # Parameters
1489///
1490/// - `tf` is the transformation matrix
1491/// - `d0` is the first input dimension
1492/// - `d1` is the second input dimension
1493///
1494/// # Return Values
1495///
1496/// Transformed coordinates
1497pub fn transform_coords<T>(tf: &Array<T>, d0: f32, d1: f32) -> Array<T>
1498where
1499    T: HasAfEnum + RealFloating,
1500{
1501    unsafe {
1502        let mut temp: af_array = std::ptr::null_mut();
1503        let err_val = af_transform_coordinates(&mut temp as *mut af_array, tf.get(), d0, d1);
1504        HANDLE_ERROR(AfError::from(err_val));
1505        temp.into()
1506    }
1507}
1508
1509/// Find Image moments
1510///
1511/// # Parameters
1512///
1513/// - `input` is the input image
1514/// - `moment` is the type of moment to be computed, takes a value of
1515/// [enum](./enum.MomentType.html)
1516///
1517/// # Return Values
1518///
1519/// Moments Array
1520pub fn moments<T>(input: &Array<T>, moment: MomentType) -> Array<f32>
1521where
1522    T: HasAfEnum + MomentsComputable,
1523{
1524    unsafe {
1525        let mut temp: af_array = std::ptr::null_mut();
1526        let err_val = af_moments(&mut temp as *mut af_array, input.get(), moment as c_uint);
1527        HANDLE_ERROR(AfError::from(err_val));
1528        temp.into()
1529    }
1530}
1531
1532/// Find Image moment for whole image
1533///
1534/// # Parameters
1535///
1536/// - `input` is the input image
1537/// - `moment` is the type of moment to be computed, takes a value of
1538/// [enum](./enum.MomentType.html)
1539///
1540/// # Return Values
1541///
1542/// Moment value of the whole image
1543pub fn moments_all<T>(input: &Array<T>, moment: MomentType) -> f64
1544where
1545    T: HasAfEnum + MomentsComputable,
1546{
1547    let mut temp: f64 = 0.0;
1548    unsafe {
1549        let err_val = af_moments_all(&mut temp as *mut c_double, input.get(), moment as c_uint);
1550        HANDLE_ERROR(AfError::from(err_val));
1551    }
1552    temp
1553}
1554
1555/// One dimensional median filter on image
1556///
1557/// # Parameters
1558///
1559///  - `input` is the input image(Array)
1560///  - `wlen` is the horizontal length of the filter
1561///  - `etype` is enum of type [BorderType](./enum.BorderType.html)
1562///
1563/// # Return Values
1564///
1565/// An Array with filtered image data.
1566pub fn medfilt1<T>(input: &Array<T>, wlen: u64, etype: BorderType) -> Array<T>
1567where
1568    T: HasAfEnum + ImageFilterType,
1569{
1570    unsafe {
1571        let mut temp: af_array = std::ptr::null_mut();
1572        let err_val = af_medfilt1(
1573            &mut temp as *mut af_array,
1574            input.get(),
1575            wlen as dim_t,
1576            etype as c_uint,
1577        );
1578        HANDLE_ERROR(AfError::from(err_val));
1579        temp.into()
1580    }
1581}
1582
1583/// Canny edge detection operator
1584///
1585/// The Canny edge detector is an edge detection operator that uses a multi-stage algorithm to detect a wide range of edges in images. A more in depth discussion on it can be found [here](https://en.wikipedia.org/wiki/Canny_edge_detector).
1586///
1587/// # Parameters
1588///
1589/// - `input` is the input image
1590/// - `threshold_type` helps determine if user set high threshold is to be used or not. It can take values defined by the enum [CannyThresholdType](./enum.CannyThresholdType.html)
1591/// - `low` is the lower threshold % of the maximum or auto-derived high
1592/// - `high` is the higher threshold % of maximum value in gradient image used in hysteresis procedure. This value is ignored if [CannyThresholdType::OTSU](./enum.CannyThresholdType.html) is chosen.
1593/// - `sobel_window` is the window size of sobel kernel for computing gradient direction and magnitude.
1594/// - `is_fast` indicates if L<SUB>1</SUB> norm(faster but less accurate) is used to compute image gradient magnitude instead of L<SUB>2</SUB> norm.
1595///
1596/// # Return Values
1597///
1598/// An Array of binary type [DType::B8](./enum.DType.html) indicating edges(All pixels with
1599/// non-zero values are edges).
1600pub fn canny<T>(
1601    input: &Array<T>,
1602    threshold_type: CannyThresholdType,
1603    low: f32,
1604    high: f32,
1605    sobel_window: u32,
1606    is_fast: bool,
1607) -> Array<bool>
1608where
1609    T: HasAfEnum + EdgeComputable,
1610{
1611    unsafe {
1612        let mut temp: af_array = std::ptr::null_mut();
1613        let err_val = af_canny(
1614            &mut temp as *mut af_array,
1615            input.get(),
1616            threshold_type as c_int,
1617            low,
1618            high,
1619            sobel_window as c_uint,
1620            is_fast,
1621        );
1622        HANDLE_ERROR(AfError::from(err_val));
1623        temp.into()
1624    }
1625}
1626
1627/// Anisotropic smoothing filter
1628///
1629/// Anisotropic diffusion algorithm aims at removing noise in the images
1630/// while preserving important features such as edges. The algorithm
1631/// essentially creates a scale space representation of the original
1632/// image, where image from previous step is used to create a new version
1633/// of blurred image using the diffusion process. Standard isotropic diffusion
1634/// methods such as gaussian blur, doesn't take into account the local
1635/// content(smaller neighborhood of current processing pixel) while removing
1636/// noise. Anisotropic diffusion uses the flux equations given below to
1637/// achieve that. Flux equation is the formula used by the diffusion process
1638/// to determine how much a pixel in neighborhood should contribute to
1639/// the blurring operation being done at the current pixel at a given iteration.
1640///
1641/// The flux function can be either exponential or quadratic.
1642///
1643/// <table>
1644/// <caption id="multi row">Available Flux Functions</caption>
1645/// <tr>
1646///     <td align="center" style="vertical-align:middle;">
1647///       AF_FLUX_QUADRATIC
1648///     </td>
1649///     <td align="center">
1650///       \begin{equation}
1651///         \frac{1}{1 + (\frac{\| \nabla I\|}{K})^2}
1652///       \end{equation}
1653///     </td>
1654/// </tr>
1655/// <tr>
1656///     <td align="center" style="vertical-align:middle;">
1657///       AF_FLUX_EXPONENTIAL
1658///     </td>
1659///     <td align="center">
1660///       \begin{equation}
1661///         \exp{-(\frac{\| \nabla I\|}{K})^2}
1662///       \end{equation}
1663///     </td>
1664/// </tr>
1665/// </table>
1666///
1667/// Please be cautious using the time step parameter to the function.
1668/// Appropriate time steps for solving this type of p.d.e. depend on
1669/// the dimensionality of the image and the order of the equation.
1670/// Stable values for most 2D and 3D functions are 0.125 and 0.0625,
1671/// respectively. The time step values are automatically constrained
1672/// to the stable value.
1673///
1674/// Another input parameter to be cautious about is the conductance
1675/// parameter, lower values strongly preserve image features and
1676/// vice-versa. For human vision, this value ranges from 0.5 to 2.0.
1677///
1678/// # Parameters
1679///
1680/// - `img` is the noisy input image
1681/// - `dt` is the timestep for diffusion equation
1682/// - `k` is the conductance parameter for diffusion
1683/// - `iters` is the number of iterations diffusion is performed
1684/// - `fftype` dictates the type of flux flow and it is an
1685///    [enum](./enum.DiffusionEq.html)
1686/// - `diff_kind` dictates the type of diffusion and it is an
1687///   [enum](./enum.FluxFn.html)
1688///
1689/// # Return Values
1690///
1691/// Returns an anisotropically smoothed and noise-free image
1692///
1693/// ### References
1694///
1695///  - Pietro Perona and Jitendra Malik, `Scale-space and edge detection
1696///    using anisotropic diffusion,` IEEE Transactions on Pattern Analysis
1697///    Machine Intelligence, vol. 12, pp. 629-639, 1990.
1698///  - R. Whitaker and X. Xue. `Variable-Conductance, Level-Set Curvature
1699///    for Image Denoising`, International Conference on Image Processing,
1700///    2001 pp. 142-145, Vol.3.
1701pub fn anisotropic_diffusion<T>(
1702    img: &Array<T>,
1703    dt: f32,
1704    k: f32,
1705    iters: u32,
1706    fftype: FluxFn,
1707    diff_kind: DiffusionEq,
1708) -> Array<T::AbsOutType>
1709where
1710    T: HasAfEnum + EdgeComputable,
1711    T::AbsOutType: HasAfEnum,
1712{
1713    unsafe {
1714        let mut temp: af_array = std::ptr::null_mut();
1715        let err_val = af_anisotropic_diffusion(
1716            &mut temp as *mut af_array,
1717            img.get(),
1718            dt,
1719            k,
1720            iters,
1721            fftype as c_uint,
1722            diff_kind as c_uint,
1723        );
1724        HANDLE_ERROR(AfError::from(err_val));
1725        temp.into()
1726    }
1727}
1728
1729/// Segment image based on similar pixel characteristics
1730///
1731/// This filter is similar to [regions](./fn.regions.html) with additional criteria for
1732/// segmentation. In regions, all connected pixels are considered to be a single component.
1733/// In this variation of connected components, pixels having similar pixel statistics of the
1734/// neighborhoods around a given set of seed points are grouped together.
1735///
1736/// The parameter `radius` determines the size of neighborhood around a seed point.
1737///
1738/// Mean and Variance are the pixel statistics that are computed across all neighborhoods around
1739/// the given set of seed points. The pixels which are connected to seed points and lie in the
1740/// confidence interval are grouped together. Given below is the confidence interval.
1741///
1742/// \begin{equation}
1743///     [\mu - \alpha * \sigma, \mu + \alpha * \sigma]
1744/// \end{equation}
1745/// where
1746///
1747/// - $ \mu $ is the mean of the pixels in the seed neighborhood
1748/// - $ \sigma^2 $ is the variance of the pixels in the seed neighborhood
1749/// - $ \alpha $ is the multiplier used to control the width of the confidence interval.
1750///
1751/// This filter follows an iterative approach for fine tuning the segmentation. An initial
1752/// segmenetation followed by a finite number `iterations` of segmentations are performed.
1753/// The user provided parameter `iterations` is only a request and the algorithm can prempt
1754/// the execution if variance approaches zero. The initial segmentation uses the mean and
1755/// variance calculated from the neighborhoods of all the seed points. For subsequent
1756/// segmentations, all pixels in the previous segmentation are used to re-calculate the mean
1757/// and variance (as opposed to using the pixels in the neighborhood of the seed point).
1758///
1759/// # Parameters
1760///
1761/// - `input` is the input image
1762/// - `seedx` contains the x coordinates of seeds in image coordinates
1763/// - `seedy` contains the y coordinates of seeds in image coordinates
1764/// - `radius` is the neighborhood region to be considered around each seed point
1765/// - `multiplier` controls the threshold range computed from the mean and variance of seed point neighborhoods
1766/// - `iterations` is the number of times the segmentation in performed
1767/// - `segmented_value` is the value to which output array valid pixels are set to
1768///
1769/// # Return Values
1770///
1771/// Segmented(based on pixel characteristics) image(Array) with regions surrounding the seed points
1772pub fn confidence_cc<InOutType>(
1773    input: &Array<InOutType>,
1774    seedx: &Array<u32>,
1775    seedy: &Array<u32>,
1776    radius: u32,
1777    multiplier: u32,
1778    iterations: u32,
1779    segmented_val: f64,
1780) -> Array<InOutType>
1781where
1782    InOutType: ConfidenceCCInput,
1783{
1784    unsafe {
1785        let mut temp: af_array = std::ptr::null_mut();
1786        let err_val = af_confidence_cc(
1787            &mut temp as *mut af_array,
1788            input.get(),
1789            seedx.get(),
1790            seedy.get(),
1791            radius,
1792            multiplier,
1793            iterations as i32,
1794            segmented_val,
1795        );
1796        HANDLE_ERROR(AfError::from(err_val));
1797        temp.into()
1798    }
1799}
1800
1801/// Iterative Deconvolution
1802///
1803/// The following table shows the iteration update equations of the respective
1804/// deconvolution algorithms.
1805///
1806/// <table>
1807/// <tr><th>Algorithm</th><th>Update Equation</th></tr>
1808/// <tr>
1809///     <td>LandWeber</td>
1810///     <td>
1811///         $ \hat{I}_{n} = \hat{I}_{n-1} + \alpha * P^T \otimes (I - P \otimes \hat{I}_{n-1}) $
1812///     </td>
1813/// </tr>
1814/// <tr>
1815///   <td>Richardson-Lucy</td>
1816///   <td>
1817///     $ \hat{I}_{n} = \hat{I}_{n-1} . ( \frac{I}{\hat{I}_{n-1} \otimes P} \otimes P^T ) $
1818///   </td>
1819/// </tr>
1820/// </table>
1821///
1822/// where
1823///
1824/// - $ I $ is the observed(input/blurred) image
1825/// - $ P $ is the point spread function
1826/// - $ P^T $ is the transpose of point spread function
1827/// - $ \hat{I}_{n} $ is the current iteration's updated image estimate
1828/// - $ \hat{I}_{n-1} $ is the previous iteration's image estimate
1829/// - $ \alpha $ is the relaxation factor
1830/// - $ \otimes $ indicates the convolution operator
1831///
1832/// The type of output Array from deconvolution will be of type f64 if
1833/// the input array type is f64. For other types, output type will be f32 type.
1834/// Should the caller want to save the image to disk or require the values of output
1835/// to be in a fixed range, that should be done by the caller explicitly.
1836pub fn iterative_deconv<T>(
1837    input: &Array<T>,
1838    kernel: &Array<f32>,
1839    iterations: u32,
1840    relaxation_factor: f32,
1841    algo: IterativeDeconvAlgo,
1842) -> Array<T::AbsOutType>
1843where
1844    T: DeconvInput,
1845    T::AbsOutType: HasAfEnum,
1846{
1847    unsafe {
1848        let mut temp: af_array = std::ptr::null_mut();
1849        let err_val = af_iterative_deconv(
1850            &mut temp as *mut af_array,
1851            input.get(),
1852            kernel.get(),
1853            iterations,
1854            relaxation_factor,
1855            algo as c_uint,
1856        );
1857        HANDLE_ERROR(AfError::from(err_val));
1858        temp.into()
1859    }
1860}
1861
1862/// Inverse deconvolution
1863///
1864/// This is a linear algorithm i.e. they are non-iterative in
1865/// nature and usually faster than iterative deconvolution algorithms.
1866///
1867/// Depending on the values passed on to `algo` of type enum [InverseDeconvAlgo](./enum.inverse_deconv_algo.html),
1868/// different equations are used to compute the final result.
1869///
1870/// #### Tikhonov's Deconvolution Method:
1871///
1872/// The update equation for this algorithm is as follows:
1873///
1874/// <div>
1875/// \begin{equation}
1876/// \hat{I}_{\omega} = \frac{ I_{\omega} * P^{*}_{\omega} } { |P_{\omega}|^2 + \gamma }
1877/// \end{equation}
1878/// </div>
1879///
1880/// where
1881///
1882/// - $ I_{\omega} $ is the observed(input/blurred) image in frequency domain
1883/// - $ P_{\omega} $ is the point spread function in frequency domain
1884/// - $ \gamma $ is a user defined regularization constant
1885///
1886/// The type of output Array from deconvolution will be double if the input array type is double.
1887/// Otherwise, it will be float in rest of the cases. Should the caller want to save the image to
1888/// disk or require the values of output to be in a fixed range, that should be done by the caller
1889/// explicitly.
1890pub fn inverse_deconv<T>(
1891    input: &Array<T>,
1892    kernel: &Array<f32>,
1893    gamma: f32,
1894    algo: InverseDeconvAlgo,
1895) -> Array<T::AbsOutType>
1896where
1897    T: DeconvInput,
1898    T::AbsOutType: HasAfEnum,
1899{
1900    unsafe {
1901        let mut temp: af_array = std::ptr::null_mut();
1902        let err_val = af_inverse_deconv(
1903            &mut temp as *mut af_array,
1904            input.get(),
1905            kernel.get(),
1906            gamma,
1907            algo as c_uint,
1908        );
1909        HANDLE_ERROR(AfError::from(err_val));
1910        temp.into()
1911    }
1912}