From 63b607ba4fdebca5cdcee0368af4bf3209aef831 Mon Sep 17 00:00:00 2001 From: "Thomas J. Fan" Date: Wed, 14 Jun 2023 15:23:38 +0100 Subject: [PATCH 1/2] CLN Renames missing_mask_per_feature --- sklearn/tree/_classes.py | 16 +++++++------- sklearn/tree/_splitter.pxd | 2 +- sklearn/tree/_splitter.pyx | 44 +++++++++++++++++++------------------- sklearn/tree/_tree.pxd | 2 +- sklearn/tree/_tree.pyx | 10 ++++----- 5 files changed, 37 insertions(+), 37 deletions(-) diff --git a/sklearn/tree/_classes.py b/sklearn/tree/_classes.py index e4a3b0a9ee3af..40e2133b1d73e 100644 --- a/sklearn/tree/_classes.py +++ b/sklearn/tree/_classes.py @@ -180,7 +180,7 @@ def get_n_leaves(self): def _support_missing_values(self, X): return not issparse(X) and self._get_tags()["allow_nan"] - def _compute_feature_has_missing(self, X): + def _compute_missing_mask_per_feature(self, X): """Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. @@ -192,7 +192,7 @@ def _compute_feature_has_missing(self, X): Returns ------- - feature_has_missing : ndarray of shape (n_features,), or None + missing_mask_per_feature : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None. """ @@ -213,11 +213,11 @@ def _compute_feature_has_missing(self, X): if not np.isnan(overall_sum): return None - feature_has_missing = _any_isnan_axis0(X) - return feature_has_missing + missing_mask_per_feature = _any_isnan_axis0(X) + return missing_mask_per_feature def _fit( - self, X, y, sample_weight=None, check_input=True, feature_has_missing=None + self, X, y, sample_weight=None, check_input=True, missing_mask_per_feature=None ): self._validate_params() random_state = check_random_state(self.random_state) @@ -227,7 +227,7 @@ def _fit( # We can't pass multi_output=True because that would allow y to be # csr. - # _compute_feature_has_missing will check for finite values and + # _compute_missing_mask_per_feature will check for finite values and # compute the missing mask if the tree supports missing values check_X_params = dict( dtype=DTYPE, accept_sparse="csc", force_all_finite=False @@ -237,7 +237,7 @@ def _fit( X, y, validate_separately=(check_X_params, check_y_params) ) - feature_has_missing = self._compute_feature_has_missing(X) + missing_mask_per_feature = self._compute_missing_mask_per_feature(X) if issparse(X): X.sort_indices() @@ -432,7 +432,7 @@ def _fit( self.min_impurity_decrease, ) - builder.build(self.tree_, X, y, sample_weight, feature_has_missing) + builder.build(self.tree_, X, y, sample_weight, missing_mask_per_feature) if self.n_outputs_ == 1 and is_classifier(self): self.n_classes_ = self.n_classes_[0] diff --git a/sklearn/tree/_splitter.pxd b/sklearn/tree/_splitter.pxd index 9d6b41ae0d4a5..2f6585e62113f 100644 --- a/sklearn/tree/_splitter.pxd +++ b/sklearn/tree/_splitter.pxd @@ -81,7 +81,7 @@ cdef class Splitter: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1 cdef int node_reset( diff --git a/sklearn/tree/_splitter.pyx b/sklearn/tree/_splitter.pyx index 0a96a6562adb4..b0cd934d6db2e 100644 --- a/sklearn/tree/_splitter.pyx +++ b/sklearn/tree/_splitter.pyx @@ -107,7 +107,7 @@ cdef class Splitter: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1: """Initialize the splitter. @@ -172,7 +172,7 @@ cdef class Splitter: self.y = y self.sample_weight = sample_weight - if feature_has_missing is not None: + if missing_mask_per_feature is not None: self.criterion.init_sum_missing() return 0 @@ -808,19 +808,19 @@ cdef class DensePartitioner: cdef SIZE_t start cdef SIZE_t end cdef SIZE_t n_missing - cdef const unsigned char[::1] feature_has_missing + cdef const unsigned char[::1] missing_mask_per_feature def __init__( self, const DTYPE_t[:, :] X, SIZE_t[::1] samples, DTYPE_t[::1] feature_values, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ): self.X = X self.samples = samples self.feature_values = feature_values - self.feature_has_missing = feature_has_missing + self.missing_mask_per_feature = missing_mask_per_feature cdef inline void init_node_split(self, SIZE_t start, SIZE_t end) noexcept nogil: """Initialize splitter at the beginning of node_split.""" @@ -843,13 +843,13 @@ cdef class DensePartitioner: const DTYPE_t[:, :] X = self.X SIZE_t[::1] samples = self.samples SIZE_t n_missing = 0 - const unsigned char[::1] feature_has_missing = self.feature_has_missing + const unsigned char[::1] missing_mask_per_feature = self.missing_mask_per_feature # Sort samples along that feature; by # copying the values into an array and # sorting the array in a manner which utilizes the cache more # effectively. - if feature_has_missing is not None and feature_has_missing[current_feature]: + if missing_mask_per_feature is not None and missing_mask_per_feature[current_feature]: i, current_end = self.start, self.end - 1 # Missing values are placed at the end and do not participate in the sorting. while i <= current_end: @@ -1018,7 +1018,7 @@ cdef class SparsePartitioner: cdef SIZE_t start cdef SIZE_t end cdef SIZE_t n_missing - cdef const unsigned char[::1] feature_has_missing + cdef const unsigned char[::1] missing_mask_per_feature cdef const DTYPE_t[::1] X_data cdef const INT32_t[::1] X_indices @@ -1039,7 +1039,7 @@ cdef class SparsePartitioner: SIZE_t[::1] samples, SIZE_t n_samples, DTYPE_t[::1] feature_values, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ): if not isspmatrix_csc(X): raise ValueError("X should be in csc format") @@ -1063,7 +1063,7 @@ cdef class SparsePartitioner: for p in range(n_samples): self.index_to_samples[samples[p]] = p - self.feature_has_missing = feature_has_missing + self.missing_mask_per_feature = missing_mask_per_feature cdef inline void init_node_split(self, SIZE_t start, SIZE_t end) noexcept nogil: """Initialize splitter at the beginning of node_split.""" @@ -1434,11 +1434,11 @@ cdef class BestSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1: - Splitter.init(self, X, y, sample_weight, feature_has_missing) + Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) self.partitioner = DensePartitioner( - X, self.samples, self.feature_values, feature_has_missing + X, self.samples, self.feature_values, missing_mask_per_feature ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1460,11 +1460,11 @@ cdef class BestSparseSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1: - Splitter.init(self, X, y, sample_weight, feature_has_missing) + Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) self.partitioner = SparsePartitioner( - X, self.samples, self.n_samples, self.feature_values, feature_has_missing + X, self.samples, self.n_samples, self.feature_values, missing_mask_per_feature ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1486,11 +1486,11 @@ cdef class RandomSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1: - Splitter.init(self, X, y, sample_weight, feature_has_missing) + Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) self.partitioner = DensePartitioner( - X, self.samples, self.feature_values, feature_has_missing + X, self.samples, self.feature_values, missing_mask_per_feature ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1512,11 +1512,11 @@ cdef class RandomSparseSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] feature_has_missing, + const unsigned char[::1] missing_mask_per_feature, ) except -1: - Splitter.init(self, X, y, sample_weight, feature_has_missing) + Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) self.partitioner = SparsePartitioner( - X, self.samples, self.n_samples, self.feature_values, feature_has_missing + X, self.samples, self.n_samples, self.feature_values, missing_mask_per_feature ) cdef int node_split(self, double impurity, SplitRecord* split, diff --git a/sklearn/tree/_tree.pxd b/sklearn/tree/_tree.pxd index e08ec5c94e41a..cc8f827c4932c 100644 --- a/sklearn/tree/_tree.pxd +++ b/sklearn/tree/_tree.pxd @@ -107,7 +107,7 @@ cdef class TreeBuilder: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=*, - const unsigned char[::1] feature_has_missing=*, + const unsigned char[::1] missing_mask_per_feature=*, ) cdef _check_input( diff --git a/sklearn/tree/_tree.pyx b/sklearn/tree/_tree.pyx index bea0ea833d30d..f86629d4e9c32 100644 --- a/sklearn/tree/_tree.pyx +++ b/sklearn/tree/_tree.pyx @@ -94,7 +94,7 @@ cdef class TreeBuilder: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] feature_has_missing=None, + const unsigned char[::1] missing_mask_per_feature=None, ): """Build a decision tree from the training set (X, y).""" pass @@ -168,7 +168,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] feature_has_missing=None, + const unsigned char[::1] missing_mask_per_feature=None, ): """Build a decision tree from the training set (X, y).""" @@ -194,7 +194,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder): cdef double min_impurity_decrease = self.min_impurity_decrease # Recursive partition (without actual recursion) - splitter.init(X, y, sample_weight, feature_has_missing) + splitter.init(X, y, sample_weight, missing_mask_per_feature) cdef SIZE_t start cdef SIZE_t end @@ -366,7 +366,7 @@ cdef class BestFirstTreeBuilder(TreeBuilder): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] feature_has_missing=None, + const unsigned char[::1] missing_mask_per_feature=None, ): """Build a decision tree from the training set (X, y).""" @@ -378,7 +378,7 @@ cdef class BestFirstTreeBuilder(TreeBuilder): cdef SIZE_t max_leaf_nodes = self.max_leaf_nodes # Recursive partition (without actual recursion) - splitter.init(X, y, sample_weight, feature_has_missing) + splitter.init(X, y, sample_weight, missing_mask_per_feature) cdef vector[FrontierRecord] frontier cdef FrontierRecord record From 8b3f7747c20ba835c54af4f878166d156dd8e669 Mon Sep 17 00:00:00 2001 From: "Thomas J. Fan" Date: Wed, 14 Jun 2023 17:01:29 +0100 Subject: [PATCH 2/2] CLN Apply suggestion --- sklearn/tree/_classes.py | 23 +++++++++++++------- sklearn/tree/_splitter.pxd | 2 +- sklearn/tree/_splitter.pyx | 44 +++++++++++++++++++------------------- sklearn/tree/_tree.pxd | 2 +- sklearn/tree/_tree.pyx | 10 ++++----- 5 files changed, 44 insertions(+), 37 deletions(-) diff --git a/sklearn/tree/_classes.py b/sklearn/tree/_classes.py index 40e2133b1d73e..5e2f292f92672 100644 --- a/sklearn/tree/_classes.py +++ b/sklearn/tree/_classes.py @@ -180,7 +180,7 @@ def get_n_leaves(self): def _support_missing_values(self, X): return not issparse(X) and self._get_tags()["allow_nan"] - def _compute_missing_mask_per_feature(self, X): + def _compute_missing_values_in_feature_mask(self, X): """Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. @@ -192,7 +192,7 @@ def _compute_missing_mask_per_feature(self, X): Returns ------- - missing_mask_per_feature : ndarray of shape (n_features,), or None + missing_values_in_feature_mask : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None. """ @@ -213,11 +213,16 @@ def _compute_missing_mask_per_feature(self, X): if not np.isnan(overall_sum): return None - missing_mask_per_feature = _any_isnan_axis0(X) - return missing_mask_per_feature + missing_values_in_feature_mask = _any_isnan_axis0(X) + return missing_values_in_feature_mask def _fit( - self, X, y, sample_weight=None, check_input=True, missing_mask_per_feature=None + self, + X, + y, + sample_weight=None, + check_input=True, + missing_values_in_feature_mask=None, ): self._validate_params() random_state = check_random_state(self.random_state) @@ -227,7 +232,7 @@ def _fit( # We can't pass multi_output=True because that would allow y to be # csr. - # _compute_missing_mask_per_feature will check for finite values and + # _compute_missing_values_in_feature_mask will check for finite values and # compute the missing mask if the tree supports missing values check_X_params = dict( dtype=DTYPE, accept_sparse="csc", force_all_finite=False @@ -237,7 +242,9 @@ def _fit( X, y, validate_separately=(check_X_params, check_y_params) ) - missing_mask_per_feature = self._compute_missing_mask_per_feature(X) + missing_values_in_feature_mask = ( + self._compute_missing_values_in_feature_mask(X) + ) if issparse(X): X.sort_indices() @@ -432,7 +439,7 @@ def _fit( self.min_impurity_decrease, ) - builder.build(self.tree_, X, y, sample_weight, missing_mask_per_feature) + builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) if self.n_outputs_ == 1 and is_classifier(self): self.n_classes_ = self.n_classes_[0] diff --git a/sklearn/tree/_splitter.pxd b/sklearn/tree/_splitter.pxd index 2f6585e62113f..acc67a7315add 100644 --- a/sklearn/tree/_splitter.pxd +++ b/sklearn/tree/_splitter.pxd @@ -81,7 +81,7 @@ cdef class Splitter: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1 cdef int node_reset( diff --git a/sklearn/tree/_splitter.pyx b/sklearn/tree/_splitter.pyx index b0cd934d6db2e..7e60f0023d2a2 100644 --- a/sklearn/tree/_splitter.pyx +++ b/sklearn/tree/_splitter.pyx @@ -107,7 +107,7 @@ cdef class Splitter: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1: """Initialize the splitter. @@ -172,7 +172,7 @@ cdef class Splitter: self.y = y self.sample_weight = sample_weight - if missing_mask_per_feature is not None: + if missing_values_in_feature_mask is not None: self.criterion.init_sum_missing() return 0 @@ -808,19 +808,19 @@ cdef class DensePartitioner: cdef SIZE_t start cdef SIZE_t end cdef SIZE_t n_missing - cdef const unsigned char[::1] missing_mask_per_feature + cdef const unsigned char[::1] missing_values_in_feature_mask def __init__( self, const DTYPE_t[:, :] X, SIZE_t[::1] samples, DTYPE_t[::1] feature_values, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ): self.X = X self.samples = samples self.feature_values = feature_values - self.missing_mask_per_feature = missing_mask_per_feature + self.missing_values_in_feature_mask = missing_values_in_feature_mask cdef inline void init_node_split(self, SIZE_t start, SIZE_t end) noexcept nogil: """Initialize splitter at the beginning of node_split.""" @@ -843,13 +843,13 @@ cdef class DensePartitioner: const DTYPE_t[:, :] X = self.X SIZE_t[::1] samples = self.samples SIZE_t n_missing = 0 - const unsigned char[::1] missing_mask_per_feature = self.missing_mask_per_feature + const unsigned char[::1] missing_values_in_feature_mask = self.missing_values_in_feature_mask # Sort samples along that feature; by # copying the values into an array and # sorting the array in a manner which utilizes the cache more # effectively. - if missing_mask_per_feature is not None and missing_mask_per_feature[current_feature]: + if missing_values_in_feature_mask is not None and missing_values_in_feature_mask[current_feature]: i, current_end = self.start, self.end - 1 # Missing values are placed at the end and do not participate in the sorting. while i <= current_end: @@ -1018,7 +1018,7 @@ cdef class SparsePartitioner: cdef SIZE_t start cdef SIZE_t end cdef SIZE_t n_missing - cdef const unsigned char[::1] missing_mask_per_feature + cdef const unsigned char[::1] missing_values_in_feature_mask cdef const DTYPE_t[::1] X_data cdef const INT32_t[::1] X_indices @@ -1039,7 +1039,7 @@ cdef class SparsePartitioner: SIZE_t[::1] samples, SIZE_t n_samples, DTYPE_t[::1] feature_values, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ): if not isspmatrix_csc(X): raise ValueError("X should be in csc format") @@ -1063,7 +1063,7 @@ cdef class SparsePartitioner: for p in range(n_samples): self.index_to_samples[samples[p]] = p - self.missing_mask_per_feature = missing_mask_per_feature + self.missing_values_in_feature_mask = missing_values_in_feature_mask cdef inline void init_node_split(self, SIZE_t start, SIZE_t end) noexcept nogil: """Initialize splitter at the beginning of node_split.""" @@ -1434,11 +1434,11 @@ cdef class BestSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1: - Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) + Splitter.init(self, X, y, sample_weight, missing_values_in_feature_mask) self.partitioner = DensePartitioner( - X, self.samples, self.feature_values, missing_mask_per_feature + X, self.samples, self.feature_values, missing_values_in_feature_mask ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1460,11 +1460,11 @@ cdef class BestSparseSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1: - Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) + Splitter.init(self, X, y, sample_weight, missing_values_in_feature_mask) self.partitioner = SparsePartitioner( - X, self.samples, self.n_samples, self.feature_values, missing_mask_per_feature + X, self.samples, self.n_samples, self.feature_values, missing_values_in_feature_mask ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1486,11 +1486,11 @@ cdef class RandomSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1: - Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) + Splitter.init(self, X, y, sample_weight, missing_values_in_feature_mask) self.partitioner = DensePartitioner( - X, self.samples, self.feature_values, missing_mask_per_feature + X, self.samples, self.feature_values, missing_values_in_feature_mask ) cdef int node_split(self, double impurity, SplitRecord* split, @@ -1512,11 +1512,11 @@ cdef class RandomSparseSplitter(Splitter): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight, - const unsigned char[::1] missing_mask_per_feature, + const unsigned char[::1] missing_values_in_feature_mask, ) except -1: - Splitter.init(self, X, y, sample_weight, missing_mask_per_feature) + Splitter.init(self, X, y, sample_weight, missing_values_in_feature_mask) self.partitioner = SparsePartitioner( - X, self.samples, self.n_samples, self.feature_values, missing_mask_per_feature + X, self.samples, self.n_samples, self.feature_values, missing_values_in_feature_mask ) cdef int node_split(self, double impurity, SplitRecord* split, diff --git a/sklearn/tree/_tree.pxd b/sklearn/tree/_tree.pxd index cc8f827c4932c..b99f44c0472a2 100644 --- a/sklearn/tree/_tree.pxd +++ b/sklearn/tree/_tree.pxd @@ -107,7 +107,7 @@ cdef class TreeBuilder: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=*, - const unsigned char[::1] missing_mask_per_feature=*, + const unsigned char[::1] missing_values_in_feature_mask=*, ) cdef _check_input( diff --git a/sklearn/tree/_tree.pyx b/sklearn/tree/_tree.pyx index f86629d4e9c32..e7a0ab2f2966d 100644 --- a/sklearn/tree/_tree.pyx +++ b/sklearn/tree/_tree.pyx @@ -94,7 +94,7 @@ cdef class TreeBuilder: object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] missing_mask_per_feature=None, + const unsigned char[::1] missing_values_in_feature_mask=None, ): """Build a decision tree from the training set (X, y).""" pass @@ -168,7 +168,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] missing_mask_per_feature=None, + const unsigned char[::1] missing_values_in_feature_mask=None, ): """Build a decision tree from the training set (X, y).""" @@ -194,7 +194,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder): cdef double min_impurity_decrease = self.min_impurity_decrease # Recursive partition (without actual recursion) - splitter.init(X, y, sample_weight, missing_mask_per_feature) + splitter.init(X, y, sample_weight, missing_values_in_feature_mask) cdef SIZE_t start cdef SIZE_t end @@ -366,7 +366,7 @@ cdef class BestFirstTreeBuilder(TreeBuilder): object X, const DOUBLE_t[:, ::1] y, const DOUBLE_t[:] sample_weight=None, - const unsigned char[::1] missing_mask_per_feature=None, + const unsigned char[::1] missing_values_in_feature_mask=None, ): """Build a decision tree from the training set (X, y).""" @@ -378,7 +378,7 @@ cdef class BestFirstTreeBuilder(TreeBuilder): cdef SIZE_t max_leaf_nodes = self.max_leaf_nodes # Recursive partition (without actual recursion) - splitter.init(X, y, sample_weight, missing_mask_per_feature) + splitter.init(X, y, sample_weight, missing_values_in_feature_mask) cdef vector[FrontierRecord] frontier cdef FrontierRecord record