arrayfire/vision/mod.rs
1use super::core::{
2 af_array, af_features, dim_t, AfError, Array, HasAfEnum, HomographyType, ImageFilterType,
3 MatchType, RealFloating, HANDLE_ERROR,
4};
5
6use libc::{c_float, c_int, c_uint};
7use std::mem;
8
9// af_sift and af_gloh uses patented algorithms, so didn't add them
10// they are NOT built using installer builds
11
12extern "C" {
13 fn af_create_features(feat: *mut af_features, num: dim_t) -> c_int;
14 fn af_retain_features(feat: *mut af_features, feat: af_features) -> c_int;
15 fn af_get_features_num(num: *mut dim_t, feat: af_features) -> c_int;
16 fn af_get_features_xpos(out: *mut af_array, feat: af_features) -> c_int;
17 fn af_get_features_ypos(out: *mut af_array, feat: af_features) -> c_int;
18 fn af_get_features_score(out: *mut af_array, feat: af_features) -> c_int;
19 fn af_get_features_orientation(out: *mut af_array, feat: af_features) -> c_int;
20 fn af_get_features_size(out: *mut af_array, feat: af_features) -> c_int;
21 fn af_release_features(feat: af_features) -> c_int;
22
23 fn af_fast(
24 out: *mut af_features,
25 input: af_array,
26 thr: c_float,
27 arc_len: c_uint,
28 non_max: bool,
29 feature_ratio: c_float,
30 edge: c_uint,
31 ) -> c_int;
32
33 fn af_harris(
34 out: *mut af_features,
35 input: af_array,
36 m: c_uint,
37 r: c_float,
38 s: c_float,
39 bs: c_uint,
40 k: c_float,
41 ) -> c_int;
42
43 fn af_orb(
44 out: *mut af_features,
45 desc: *mut af_array,
46 arr: af_array,
47 fast_thr: c_float,
48 max_feat: c_uint,
49 scl_fctr: c_float,
50 levels: c_uint,
51 blur_img: bool,
52 ) -> c_int;
53
54 fn af_hamming_matcher(
55 idx: *mut af_array,
56 dist: *mut af_array,
57 query: af_array,
58 train: af_array,
59 dist_dim: dim_t,
60 n_dist: c_uint,
61 ) -> c_int;
62
63 fn af_nearest_neighbour(
64 idx: *mut af_array,
65 dist: *mut af_array,
66 q: af_array,
67 t: af_array,
68 dist_dim: dim_t,
69 n_dist: c_uint,
70 dist_type: c_int,
71 ) -> c_int;
72
73 fn af_match_template(
74 out: *mut af_array,
75 search_img: af_array,
76 template_img: af_array,
77 mtype: c_uint,
78 ) -> c_int;
79
80 fn af_susan(
81 feat: *mut af_features,
82 i: af_array,
83 r: c_uint,
84 d: c_float,
85 g: c_float,
86 f: c_float,
87 e: c_uint,
88 ) -> c_int;
89
90 fn af_dog(out: *mut af_array, i: af_array, r1: c_int, r2: c_int) -> c_int;
91
92 fn af_homography(
93 H: *mut af_array,
94 inliers: *mut c_int,
95 x_src: af_array,
96 y_src: af_array,
97 x_dst: af_array,
98 y_dst: af_array,
99 htype: c_uint,
100 inlier_thr: c_float,
101 iterations: c_uint,
102 otype: c_uint,
103 ) -> c_int;
104}
105
106/// A set of Array objects (usually, used in Computer vision context)
107///
108/// `Features` struct is used by computer vision functions
109/// to return the outcome of their operation. Typically, such output
110/// has the following Arrays:
111///
112/// - X positions of the features
113/// - Y positions of the features
114/// - Scores of the features
115/// - Orientations of the features
116/// - Sizes of the features
117///
118/// ## Sharing Across Threads
119///
120/// While sharing this object with other threads, there is no need to wrap
121/// this in an Arc object unless only one such object is required to exist.
122/// The reason being that ArrayFire's internal details that are pointed to
123/// by the features handle are appropriately reference counted in thread safe
124/// manner. However, if these features are to be edited, then please do wrap
125/// the object using a Mutex or Read-Write lock.
126pub struct Features {
127 feat: af_features,
128}
129
130unsafe impl Send for Features {}
131unsafe impl Sync for Features {}
132
133macro_rules! feat_func_def {
134 ($doc_str: expr, $fn_name: ident, $ffi_name: ident) => (
135 #[doc=$doc_str]
136 pub fn $fn_name(&self) -> Array<f32> {
137 unsafe {
138 let mut temp: af_array = std::ptr::null_mut();
139 let err_val = $ffi_name(&mut temp as *mut af_array, self.feat);
140 HANDLE_ERROR(AfError::from(err_val));
141
142 let temp_array: Array<f32> = temp.into();
143 let retained = temp_array.clone();
144 mem::forget(temp_array);
145
146 retained
147 }
148 }
149 )
150}
151
152impl Features {
153 /// Create and return an object of type Features
154 ///
155 /// This object is basically a bunch of Arrays.
156 pub fn new(n: u64) -> Self {
157 unsafe {
158 let mut temp: af_features = std::ptr::null_mut();
159 let err_val = af_create_features(&mut temp as *mut af_features, n as dim_t);
160 HANDLE_ERROR(AfError::from(err_val));
161 Self { feat: temp }
162 }
163 }
164
165 /// Get total number of features found
166 pub fn num_features(&self) -> i64 {
167 let mut temp: i64 = 0;
168 unsafe {
169 let err_val = af_get_features_num(
170 &mut temp as *mut dim_t,
171 self.feat as *const dim_t as af_features,
172 );
173 HANDLE_ERROR(AfError::from(err_val));
174 }
175 temp
176 }
177
178 feat_func_def!("Get x coordinates Array", xpos, af_get_features_xpos);
179 feat_func_def!("Get y coordinates Array", ypos, af_get_features_ypos);
180 feat_func_def!("Get score Array", score, af_get_features_score);
181 feat_func_def!(
182 "Get orientation Array",
183 orientation,
184 af_get_features_orientation
185 );
186 feat_func_def!("Get features size Array", size, af_get_features_size);
187
188 /// Get the internal handle for [Features](./struct.Features.html) object
189 pub unsafe fn get(&self) -> af_features {
190 self.feat
191 }
192}
193
194impl Clone for Features {
195 fn clone(&self) -> Self {
196 unsafe {
197 let mut temp: af_features = std::ptr::null_mut();
198 let ret_val = af_retain_features(&mut temp as *mut af_features, self.feat);
199 HANDLE_ERROR(AfError::from(ret_val));
200 Self { feat: temp }
201 }
202 }
203}
204
205impl Drop for Features {
206 fn drop(&mut self) {
207 unsafe {
208 let ret_val = af_release_features(self.feat);
209 HANDLE_ERROR(AfError::from(ret_val));
210 }
211 }
212}
213
214/// Fast feature detector
215///
216/// A circle of radius 3 pixels, translating into a total of 16 pixels, is checked for sequential
217/// segments of pixels much brighter or much darker than the central one. For a pixel p to be
218/// considered a feature, there must exist a sequential segment of arc_length pixels in the circle
219/// around it such that all are greather than (p + thr) or smaller than (p - thr). After all
220/// features in the image are detected, if nonmax is true, the non-maximal suppression is applied,
221/// checking all detected features and the features detected in its 8-neighborhood and discard it
222/// if its score is non maximal.
223///
224/// # Parameters
225///
226/// - `input` - the input image Array
227/// - `thr` - FAST threshold for which pixel of the circle around the center pixel is considered to
228/// be greater or smaller
229/// - `arc_len` - length of arc (or sequential segment) to be tested, must be within range [9-16]
230/// - `non_max` - performs non-maximal supression if true
231/// - `feat_ratio` - maximum ratio of features to detect, the maximum number of features is
232/// calculated by `feature_ratio * num of elements`. The maximum number of features is not based on
233/// the score, instead, features detected after the limit is reached are discarded.
234/// - `edge` - is the length of the edges in the image to be discarded by FAST(minimum is 3, as the
235/// radius of the circle)
236///
237/// # Return Values
238///
239/// This function returns an object of struct [Features](./struct.Features.html) containing Arrays
240/// for x and y coordinates and score, while array oreientation is set to 0 as FAST does not
241/// compute orientation. Size is set to 1 as FAST does not compute multiple scales.
242pub fn fast<T>(
243 input: &Array<T>,
244 thr: f32,
245 arc_len: u32,
246 non_max: bool,
247 feat_ratio: f32,
248 edge: u32,
249) -> Features
250where
251 T: HasAfEnum + ImageFilterType,
252{
253 unsafe {
254 let mut temp: af_features = std::ptr::null_mut();
255 let err_val = af_fast(
256 &mut temp as *mut af_features,
257 input.get(),
258 thr,
259 arc_len,
260 non_max,
261 feat_ratio,
262 edge,
263 );
264 HANDLE_ERROR(AfError::from(err_val));
265 Features { feat: temp }
266 }
267}
268
269/// Harris corner detector.
270///
271/// Compute corners using the Harris corner detector approach. For each pixel, a small window is
272/// used to calculate the determinant and trace of such a window, from which a response is
273/// calculated. Pixels are considered corners if they are local maximas and have a high positive
274/// response.
275///
276/// # Parameters
277///
278/// - `input` is the array containing a grayscale image (color images are not supported)
279/// - `max_corners` is the maximum number of corners to keep, only retains those with highest Harris responses
280/// - `min_response` is the minimum response in order for a corner to be retained, only used if max_corners = 0
281/// - `sigma` is the standard deviation of a circular window (its dimensions will be calculated according to the standard deviation), the covariation matrix will be calculated to a circular neighborhood of this standard deviation (only used when block_size == 0, must be >= 0.5f and <= 5.0f)
282/// - `block_size` is square window size, the covariation matrix will be calculated to a square neighborhood of this size (must be >= 3 and <= 31)
283/// - `k_thr` is the Harris constant, usually set empirically to 0.04f (must be >= 0.01f)
284///
285/// # Return Values
286///
287/// This function returns an object of struct [Features](./struct.Features.html) containing Arrays
288/// for x and y coordinates and score, while array oreientation & size are set to 0 & 1,
289/// respectively, since harris doesn't compute that information
290pub fn harris<T>(
291 input: &Array<T>,
292 max_corners: u32,
293 min_response: f32,
294 sigma: f32,
295 block_size: u32,
296 k_thr: f32,
297) -> Features
298where
299 T: HasAfEnum + RealFloating,
300{
301 unsafe {
302 let mut temp: af_features = std::ptr::null_mut();
303 let err_val = af_harris(
304 &mut temp as *mut af_features,
305 input.get(),
306 max_corners,
307 min_response,
308 sigma,
309 block_size,
310 k_thr,
311 );
312 HANDLE_ERROR(AfError::from(err_val));
313 Features { feat: temp }
314 }
315}
316
317/// ORB feature descriptor
318///
319/// Extract ORB descriptors from FAST features that hold higher Harris responses. FAST does not
320/// compute orientation, thus, orientation of features is calculated using the intensity centroid.
321/// As FAST is also not multi-scale enabled, a multi-scale pyramid is calculated by downsampling
322/// the input image multiple times followed by FAST feature detection on each scale.
323///
324/// # Parameters
325///
326/// - `input` - the input image Array
327/// - `fast_thr` - FAST threshold for which a pixel of the circle around the central pixel is
328/// considered to be brighter or darker
329/// - `max_feat` - maximum number of features to hold
330/// - `scl_fctr` - factor to downsample the input image, meaning that each level with hold prior
331/// level dimensions divided by `scl_fctr`
332/// - `levels` - number of levels to be computed for the image pyramid
333/// - `blur_img` - blur image with a Gaussian filter with sigma=2 before computing descriptors to
334/// increase robustness against noise if true
335///
336/// # Return Values
337///
338/// This function returns a tuple of [`Features`](./struct.Features.html) and [`Array`](./struct.Array.html). The features objects composed of Arrays for x and y coordinates, score, orientation and size of selected features. The Array object is a two dimensional Array of size Nx8 where N is number of selected features.
339pub fn orb<T>(
340 input: &Array<T>,
341 fast_thr: f32,
342 max_feat: u32,
343 scl_fctr: f32,
344 levels: u32,
345 blur_img: bool,
346) -> (Features, Array<T>)
347where
348 T: HasAfEnum + RealFloating,
349{
350 unsafe {
351 let mut f: af_features = std::ptr::null_mut();
352 let mut d: af_array = std::ptr::null_mut();
353 let err_val = af_orb(
354 &mut f as *mut af_features,
355 &mut d as *mut af_array,
356 input.get(),
357 fast_thr,
358 max_feat,
359 scl_fctr,
360 levels,
361 blur_img,
362 );
363 HANDLE_ERROR(AfError::from(err_val));
364 (Features { feat: f }, d.into())
365 }
366}
367
368/// Hamming feature matcher
369///
370/// Calculates Hamming distances between two 2-dimensional arrays containing features, one of the
371/// arrays containing the training data and the other the query data. One of the dimensions of the
372/// both arrays must be equal among them, identifying the length of each feature. The other
373/// dimension indicates the total number of features in each of the training and query arrays. Two
374/// 1-dimensional arrays are created as results, one containg the smallest N distances of the query
375/// array and another containing the indices of these distances in the training array. The
376/// resulting 1-dimensional arrays have length equal to the number of features contained in the
377/// query array.
378///
379/// # Parameters
380///
381/// - `query` - Array containing the data to be queried
382/// - `train` - Array containing the data to be used as training data
383/// - `dist_dims` - indicates the dimension to analyze for distance (the dimension indicated here
384/// must be of equal length for both query and train arrays)
385/// - `n_dist` - is the number of smallest distances to return (currently, only values <= 256 are supported)
386///
387///
388/// # Return Values
389///
390/// This function returns a tuple of [Array](./struct.Array.html)'s.
391///
392/// First Array is an array of MxN size, where M is equal to the number of query features and N is
393/// equal to n_dist. The value at position IxJ indicates the index of the Jth smallest distance to
394/// the Ith query value in the train data array. the index of the Ith smallest distance of the Mth
395/// query.
396///
397/// Second Array is an array of MxN size, where M is equal to the number of query features and N is
398/// equal to n_dist. The value at position IxJ indicates the Hamming distance of the Jth smallest
399/// distance to the Ith query value in the train data array.
400pub fn hamming_matcher<T>(
401 query: &Array<T>,
402 train: &Array<T>,
403 dist_dims: i64,
404 n_dist: u32,
405) -> (Array<u32>, Array<T::AggregateOutType>)
406where
407 T: HasAfEnum + ImageFilterType,
408 T::AggregateOutType: HasAfEnum,
409{
410 unsafe {
411 let mut idx: af_array = std::ptr::null_mut();
412 let mut dist: af_array = std::ptr::null_mut();
413 let err_val = af_hamming_matcher(
414 &mut idx as *mut af_array,
415 &mut dist as *mut af_array,
416 query.get(),
417 train.get(),
418 dist_dims,
419 n_dist,
420 );
421 HANDLE_ERROR(AfError::from(err_val));
422 (idx.into(), dist.into())
423 }
424}
425
426/// Nearest Neighbour.
427///
428/// Calculates nearest distances between two 2-dimensional arrays containing features based on the
429/// type of distance computation chosen. Currently, AF_SAD (sum of absolute differences), AF_SSD
430/// (sum of squared differences) and AF_SHD (hamming distance) are supported. One of the arrays
431/// containing the training data and the other the query data. One of the dimensions of the both
432/// arrays must be equal among them, identifying the length of each feature. The other dimension
433/// indicates the total number of features in each of the training and query arrays. Two
434/// 1-dimensional arrays are created as results, one containg the smallest N distances of the query
435/// array and another containing the indices of these distances in the training array. The resulting
436/// 1-dimensional arrays have length equal to the number of features contained in the query array.
437///
438/// # Parameters
439///
440/// - `query` is the array containing the data to be queried
441/// - `train` is the array containing the data used as training data
442/// - `dist_dim` indicates the dimension to analyze for distance (the dimension indicated here must be of equal length for both query and train arrays)
443/// - `n_dist` is the number of smallest distances to return (currently, only values <= 256 are supported)
444/// - `dist_type` is the distance computation type. Currently [`MatchType::SAD`](./enum.MatchType.html), [`MatchType::SSD`](./enum.MatchType.html), and [`MatchType::SHD`](./enum.MatchType.html) are supported.
445///
446/// # Return Values
447///
448/// A tuple of Arrays.
449///
450/// The first Array is is an array of MxN size, where M is equal to the number of query features
451/// and N is equal to `n_dist`. The value at position IxJ indicates the index of the Jth smallest
452/// distance to the Ith query value in the train data array. the index of the Ith smallest distance
453/// of the Mth query.
454///
455/// The second Array is is an array of MxN size, where M is equal to the number of query features
456/// and N is equal to `n_dist`. The value at position IxJ indicates the distance of the Jth smallest
457/// distance to the Ith query value in the train data array based on the `dist_type` chosen.
458pub fn nearest_neighbour<T>(
459 query: &Array<T>,
460 train: &Array<T>,
461 dist_dim: i64,
462 n_dist: u32,
463 dist_type: MatchType,
464) -> (Array<u32>, Array<T::AggregateOutType>)
465where
466 T: HasAfEnum + ImageFilterType,
467 T::AggregateOutType: HasAfEnum,
468{
469 unsafe {
470 let mut idx: af_array = std::ptr::null_mut();
471 let mut dist: af_array = std::ptr::null_mut();
472 let err_val = af_nearest_neighbour(
473 &mut idx as *mut af_array,
474 &mut dist as *mut af_array,
475 query.get(),
476 train.get(),
477 dist_dim,
478 n_dist,
479 dist_type as c_int,
480 );
481 HANDLE_ERROR(AfError::from(err_val));
482 (idx.into(), dist.into())
483 }
484}
485
486/// Image matching
487///
488/// Template matching is an image processing technique to find small patches of an image which
489/// match a given template image. A more in depth discussion on the topic can be found
490/// [here](https://en.wikipedia.org/wiki/Template_matching).
491///
492/// # Parameters
493///
494/// - `search_img` is an array with image data
495/// - `template_img` is the template we are looking for in the image
496/// - `mtype` is metric that should be used to calculate the disparity between window in the image and the template image. It can be one of the values defined by the enum [MatchType](./enum.MatchType.html).
497/// # Return Values
498///
499/// This function returns an Array with disparity values for the window starting at corresponding pixel position.
500pub fn match_template<T>(
501 search_img: &Array<T>,
502 template_img: &Array<T>,
503 mtype: MatchType,
504) -> Array<T::AbsOutType>
505where
506 T: HasAfEnum + ImageFilterType,
507 T::AbsOutType: HasAfEnum,
508{
509 unsafe {
510 let mut temp: af_array = std::ptr::null_mut();
511 let err_val = af_match_template(
512 &mut temp as *mut af_array,
513 search_img.get(),
514 template_img.get(),
515 mtype as c_uint,
516 );
517 HANDLE_ERROR(AfError::from(err_val));
518 temp.into()
519 }
520}
521
522/// SUSAN corner detector.
523///
524/// SUSAN is an acronym standing for Smallest Univalue Segment Assimilating Nucleus. This method
525/// places a circular disc over the pixel to be tested (a.k.a nucleus) to compute the corner
526/// measure of that corresponding pixel. The region covered by the circular disc is M, and a pixel
527/// in this region is represented by m⃗ ∈M where m⃗ 0 is the nucleus. Every pixel in the region is
528/// compared to the nucleus using the following comparison function:
529///
530/// c(m⃗ )=e^−((I(m⃗)−I(m⃗_0))/t)^6
531///
532/// where t is radius of the region, I is the brightness of the pixel.
533///
534/// Response of SUSAN operator is given by the following equation:
535///
536/// R(M) = g−n(M) if n(M) < g
537///
538/// R(M) = 0 otherwise,
539///
540/// where n(M)=∑c(m⃗) m⃗∈M, g is named the geometric threshold and n is the number of pixels in the
541/// mask which are within t of the nucleus.
542///
543/// Importance of the parameters, t and g is explained below:
544///
545/// - t determines how similar points have to be to the nucleusbefore they are considered to be a
546/// part of the univalue segment
547/// - g determines the minimum size of the univalue segment. For a large enough g, SUSAN operator
548/// becomes an edge dectector.
549///
550/// # Parameters
551///
552/// - `input` is input grayscale/intensity image
553/// - `radius` is the nucleus radius for each pixel neighborhood
554/// - `diff_thr` is intensity difference threshold a.k.a **t** from equations in description
555/// - `geom_thr` is the geometric threshold
556/// - `feature_ratio` is maximum number of features that will be returned by the function
557/// - `edge` indicates how many pixels width area should be skipped for corner detection
558///
559/// # Return Values
560/// An object of type [Features](./struct.Features.html) composed of arrays for x and y coordinates, score, orientation and size of selected features.
561pub fn susan<T>(
562 input: &Array<T>,
563 radius: u32,
564 diff_thr: f32,
565 geom_thr: f32,
566 feature_ratio: f32,
567 edge: u32,
568) -> Features
569where
570 T: HasAfEnum + ImageFilterType,
571{
572 unsafe {
573 let mut temp: af_features = std::ptr::null_mut();
574 let err_val = af_susan(
575 &mut temp as *mut af_features,
576 input.get(),
577 radius,
578 diff_thr,
579 geom_thr,
580 feature_ratio,
581 edge,
582 );
583 HANDLE_ERROR(AfError::from(err_val));
584 Features { feat: temp }
585 }
586}
587
588/// Difference of Gaussians.
589///
590/// Given an image, this function computes two different versions of smoothed input image using the
591/// difference smoothing parameters and subtracts one from the other and returns the result.
592///
593/// # Parameters
594///
595/// - `input` is the input image
596/// - `radius1` is the radius of the first gaussian kernel
597/// - `radius2` is the radius of the second gaussian kernel
598///
599/// # Return Values
600///
601/// Difference of smoothed inputs - An Array.
602pub fn dog<T>(input: &Array<T>, radius1: i32, radius2: i32) -> Array<T::AbsOutType>
603where
604 T: HasAfEnum + ImageFilterType,
605 T::AbsOutType: HasAfEnum,
606{
607 unsafe {
608 let mut temp: af_array = std::ptr::null_mut();
609 let err_val = af_dog(&mut temp as *mut af_array, input.get(), radius1, radius2);
610 HANDLE_ERROR(AfError::from(err_val));
611 temp.into()
612 }
613}
614
615/// Homography estimation
616///
617/// Homography estimation find a perspective transform between two sets of 2D points.
618/// Currently, two methods are supported for the estimation, RANSAC (RANdom SAmple Consensus)
619/// and LMedS (Least Median of Squares). Both methods work by randomly selecting a subset
620/// of 4 points of the set of source points, computing the eigenvectors of that set and
621/// finding the perspective transform. The process is repeated several times, a maximum of
622/// times given by the value passed to the iterations arguments for RANSAC (for the CPU
623/// backend, usually less than that, depending on the quality of the dataset, but for CUDA
624/// and OpenCL backends the transformation will be computed exactly the amount of times
625/// passed via the iterations parameter), the returned value is the one that matches the
626/// best number of inliers, which are all of the points that fall within a maximum L2
627/// distance from the value passed to the inlier_thr argument.
628///
629/// # Parameters
630///
631/// - `x_src` is the x coordinates of the source points.
632/// - `y_src` is the y coordinates of the source points.
633/// - `x_dst` is the x coordinates of the destination points.
634/// - `y_dst` is the y coordinates of the destination points.
635/// - `htype` can be AF_HOMOGRAPHY_RANSAC, for which a RANdom SAmple Consensus will be used to evaluate the homography quality (e.g., number of inliers), or AF_HOMOGRAPHY_LMEDS, which will use Least Median of Squares method to evaluate homography quality
636/// - `inlier_thr` - if htype is AF_HOMOGRAPHY_RANSAC, this parameter will five the maximum L2-distance for a point to be considered an inlier.
637/// - `iterations` is the maximum number of iterations when htype is AF_HOMOGRAPHY_RANSAC and backend is CPU,if backend is CUDA or OpenCL, iterations is the total number of iterations, an iteration is a selection of 4 random points for which the homography is estimated and evaluated for number of inliers.
638/// - `otype` is the array type for the homography output.
639///
640/// # Return Values
641///
642/// Returns a tuple of Array and int.
643///
644/// - `H` is a 3x3 array containing the estimated homography.
645/// - `inliers` is the number of inliers that the homography was estimated to comprise, in the case that htype is AF_HOMOGRAPHY_RANSAC, a higher inlier_thr value will increase the estimated inliers. Note that if the number of inliers is too low, it is likely that a bad homography will be returned.
646pub fn homography<OutType>(
647 x_src: &Array<f32>,
648 y_src: &Array<f32>,
649 x_dst: &Array<f32>,
650 y_dst: &Array<f32>,
651 htype: HomographyType,
652 inlier_thr: f32,
653 iterations: u32,
654) -> (Array<OutType>, i32)
655where
656 OutType: HasAfEnum + RealFloating,
657{
658 let otype = OutType::get_af_dtype();
659 unsafe {
660 let mut inliers: i32 = 0;
661 let mut temp: af_array = std::ptr::null_mut();
662 let err_val = af_homography(
663 &mut temp as *mut af_array,
664 &mut inliers as *mut c_int,
665 x_src.get(),
666 y_src.get(),
667 x_dst.get(),
668 y_dst.get(),
669 htype as c_uint,
670 inlier_thr,
671 iterations,
672 otype as c_uint,
673 );
674 HANDLE_ERROR(AfError::from(err_val));
675 (temp.into(), inliers)
676 }
677}