desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Evaluate decision function output for X relative to y_true. Parameters clf : object Trained classifier to use for scoring. Must have either a decision_function method or a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.decision_f...
def __call__(self, clf, X, y, sample_weight=None):
super(_ThresholdScorer, self).__call__(clf, X, y, sample_weight=sample_weight) y_type = type_of_target(y) if (y_type not in ('binary', 'multilabel-indicator')): raise ValueError('{0} format is not supported'.format(y_type)) if is_regressor(clf): y_pred = clf.predict(X) el...
'Fit model to data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. Y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of samples and n_targets is the number of res...
def fit(self, X, Y):
check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if (Y.ndim == 1): Y = Y.reshape((-1), 1) n = X.shape[0] p = X.shape[1] q = Y.shape[1] if ((self.n_components < 1) or (self.n_co...
'Apply the dimension reduction learned on the train data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. Y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of samp...
def transform(self, X, Y=None, copy=True):
check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy, dtype=FLOAT_DTYPES) X -= self.x_mean_ X /= self.x_std_ x_scores = np.dot(X, self.x_rotations_) if (Y is not None): Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES) if (Y.ndim == 1): Y = Y...
'Apply the dimension reduction learned on the train data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. copy : boolean, default True Whether to copy X and Y, or perform in-place normalization. Notes This ...
def predict(self, X, copy=True):
check_is_fitted(self, 'x_mean_') X = check_array(X, copy=copy, dtype=FLOAT_DTYPES) X -= self.x_mean_ X /= self.x_std_ Ypred = np.dot(X, self.coef_) return (Ypred + self.y_mean_)
'Learn and apply the dimension reduction on the train data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of sa...
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
'Fit model to data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. Y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of samples and n_targets is the number of res...
def fit(self, X, Y):
check_consistent_length(X, Y) X = check_array(X, dtype=np.float64, copy=self.copy) Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False) if (Y.ndim == 1): Y = Y.reshape((-1), 1) if (self.n_components > max(Y.shape[1], X.shape[1])): raise ValueError(('Invalid number...
'Apply the dimension reduction learned on the train data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. Y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of samp...
def transform(self, X, Y=None):
check_is_fitted(self, 'x_mean_') X = check_array(X, dtype=np.float64) Xr = ((X - self.x_mean_) / self.x_std_) x_scores = np.dot(Xr, self.x_weights_) if (Y is not None): if (Y.ndim == 1): Y = Y.reshape((-1), 1) Yr = ((Y - self.y_mean_) / self.y_std_) y_scores = np....
'Learn and apply the dimension reduction on the train data. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of predictors. y : array-like, shape = [n_samples, n_targets] Target vectors, where n_samples is the number of sa...
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
'Fit the imputer on X. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Returns self : object Returns self.'
def fit(self, X, y=None):
allowed_strategies = ['mean', 'median', 'most_frequent'] if (self.strategy not in allowed_strategies): raise ValueError('Can only use these strategies: {0} got strategy={1}'.format(allowed_strategies, self.strategy)) if (self.axis not in [0, 1]): raise ValueError('Ca...
'Fit the transformer on sparse data.'
def _sparse_fit(self, X, strategy, missing_values, axis):
if (axis == 1): X = X.tocsr() else: X = X.tocsc() if (missing_values == 0): n_zeros_axis = np.zeros(X.shape[(not axis)], dtype=int) else: n_zeros_axis = (X.shape[axis] - np.diff(X.indptr)) if (strategy == 'mean'): if (missing_values != 0): n_non_mi...
'Fit the transformer on dense data.'
def _dense_fit(self, X, strategy, missing_values, axis):
X = check_array(X, force_all_finite=False) mask = _get_mask(X, missing_values) masked_X = ma.masked_array(X, mask=mask) if (strategy == 'mean'): mean_masked = np.ma.mean(masked_X, axis=axis) mean = np.ma.getdata(mean_masked) mean[np.ma.getmask(mean_masked)] = np.nan retur...
'Impute all missing values in X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] The input data to complete.'
def transform(self, X):
if (self.axis == 0): check_is_fitted(self, 'statistics_') X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy) statistics = self.statistics_ if (X.shape[1] != statistics.shape[0]): raise ValueError(('X has %d feature...
'Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters X : array-like, shape (n_samples, n_features) Input array. Returns self'
def fit(self, X, y=None):
if self.validate: check_array(X, self.accept_sparse) return self
'Transform X using the forward function. Parameters X : array-like, shape (n_samples, n_features) Input array. y : (ignored) .. deprecated::0.19 Returns X_out : array-like, shape (n_samples, n_features) Transformed input.'
def transform(self, X, y='deprecated'):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) return self._transform(X, y=y, func=self.func, kw_args=self.kw_args)
'Transform X using the inverse function. Parameters X : array-like, shape (n_samples, n_features) Input array. y : (ignored) .. deprecated::0.19 Returns X_out : array-like, shape (n_samples, n_features) Transformed input.'
def inverse_transform(self, X, y='deprecated'):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on inverse_transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) return self._transform(X, y=y, func=self.inverse_func, kw_args=self.in...
'Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.'
def _reset(self):
if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_
'Compute the minimum and maximum to be used for later scaling. Parameters X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.'
def fit(self, X, y=None):
self._reset() return self.partial_fit(X, y)
'Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters X : array-like, shape [n_samples, n_features] The data used to compu...
def partial_fit(self, X, y=None):
feature_range = self.feature_range if (feature_range[0] >= feature_range[1]): raise ValueError(('Minimum of desired feature range must be smaller than maximum. Got %s.' % str(feature_range))) if sparse.issparse(X): raise TypeError('MinMaxScaler does no ...
'Scaling features of X according to feature_range. Parameters X : array-like, shape [n_samples, n_features] Input data that will be transformed.'
def transform(self, X):
check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X *= self.scale_ X += self.min_ return X
'Undo the scaling of X according to feature_range. Parameters X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse.'
def inverse_transform(self, X):
check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES) X -= self.min_ X /= self.scale_ return X
'Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.'
def _reset(self):
if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_
'Compute the mean and std to be used for later scaling. Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility.'
def fit(self, X, y=None):
self._reset() return self.partial_fit(X, y)
'Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan...
def partial_fit(self, X, y=None):
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for ...
'Perform standardization by centering and scaling Parameters X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool, optional (default: None) Copy the input X or not.'
def transform(self, X, y='deprecated', copy=None):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) check_is_fitted(self, 'scale_') copy = (copy if (copy is not None) else se...
'Scale back the data to the original representation Parameters X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. copy : bool, optional (default: None) Copy the input X or not. Returns X_tr : array-like, shape [n_samples, n_features] Transformed array.'
def inverse_transform(self, X, copy=None):
check_is_fitted(self, 'scale_') copy = (copy if (copy is not None) else self.copy) if sparse.issparse(X): if self.with_mean: raise ValueError('Cannot uncenter sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives...
'Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.'
def _reset(self):
if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_
'Compute the maximum absolute value to be used for later scaling. Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.'
def fit(self, X, y=None):
self._reset() return self.partial_fit(X, y)
'Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features...
def partial_fit(self, X, y=None):
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): (mins, maxs) = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) if (not hasattr(self, 'n_sampl...
'Scale the data Parameters X : {array-like, sparse matrix} The data that should be scaled.'
def transform(self, X):
check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, (1.0 / self.scale_)) else: X /= self.scale_ return X
'Scale back the data to the original representation Parameters X : {array-like, sparse matrix} The data that should be transformed back.'
def inverse_transform(self, X):
check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X
'Makes sure centering is not enabled for sparse matrices.'
def _check_array(self, X, copy):
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError('Cannot center sparse matrices: use `with_centering=False` instead. See docstring for motivation ...
'Compute the median and quantiles to be used for scaling. Parameters X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis.'
def fit(self, X, y=None):
if sparse.issparse(X): raise TypeError('RobustScaler cannot be fitted on sparse inputs') X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: (q_min, q_max) = self.quantile_range if (not (0...
'Center and scale the data. Can be called on sparse input, provided that ``RobustScaler`` has been fitted to dense input and ``with_centering=False``. Parameters X : {array-like, sparse matrix} The data used to scale along the specified axis.'
def transform(self, X):
if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, (1.0 / self.scale_)) else: if self.with_...
'Scale back the data to the original representation Parameters X : array-like The data used to scale along the specified axis.'
def inverse_transform(self, X):
if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling:...
'Return feature names for output features Parameters input_features : list of string, length n_features, optional String names for input features if available. By default, "x0", "x1", ... "xn_features" is used. Returns output_feature_names : list of string, length n_output_features'
def get_feature_names(self, input_features=None):
powers = self.powers_ if (input_features is None): input_features = [('x%d' % i) for i in range(powers.shape[1])] feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = ' '.join(((('%s^%d' % (input_features[ind], exp)) if (exp != 1) else...
'Compute number of output features. Parameters X : array-like, shape (n_samples, n_features) The data. Returns self : instance'
def fit(self, X, y=None):
(n_samples, n_features) = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum((1 for _ in combinations)) return self
'Transform data to polynomial features Parameters X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs.'
def transform(self, X):
check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) (n_samples, n_features) = X.shape if (n_features != self.n_input_features_): raise ValueError('X shape does not match training shape') XP = np.empty((n_samples, self.n_...
'Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. Parameters X : array-like'
def fit(self, X, y=None):
X = check_array(X, accept_sparse='csr') return self
'Scale each non zero row of X to unit norm Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool, opt...
def transform(self, X, y='deprecated', copy=None):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) copy = (copy if (copy is not None) else self.copy) X = check_array(X, acce...
'Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. Parameters X : array-like'
def fit(self, X, y=None):
check_array(X, accept_sparse='csr') return self
'Binarize each element of X Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : bool Copy the inp...
def transform(self, X, y='deprecated', copy=None):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) copy = (copy if (copy is not None) else self.copy) return binarize(X, thre...
'Fit KernelCenterer Parameters K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns self : returns an instance of self.'
def fit(self, K, y=None):
K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = (np.sum(K, axis=0) / n_samples) self.K_fit_all_ = (self.K_fit_rows_.sum() / n_samples) return self
'Center kernel matrix. Parameters K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. y : (ignored) .. deprecated:: 0.19 This parameter will be removed in 0.21. copy : boolean, optional, default True Set to False to perform inplace computation. Returns K_new : numpy array of shape [n_samples1, n_samples2]'...
def transform(self, K, y='deprecated', copy=True):
if ((not isinstance(y, string_types)) or (y != 'deprecated')): warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning) check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FL...
'Fit OneHotEncoder to X. Parameters X : array-like, shape [n_samples, n_feature] Input array of type int. Returns self'
def fit(self, X, y=None):
self.fit_transform(X) return self
'Assumes X contains only categorical features.'
def _fit_transform(self, X):
X = check_array(X, dtype=np.int) if np.any((X < 0)): raise ValueError('X needs to contain only non-negative integers.') (n_samples, n_features) = X.shape if (isinstance(self.n_values, six.string_types) and (self.n_values == 'auto')): n_values = (np.max(X, axis=0) + 1) ...
'Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. Parameters X : array-like, shape [n_samples, n_feature] Input array of type int.'
def fit_transform(self, X, y=None):
return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True)
'Assumes X contains only categorical features.'
def _transform(self, X):
X = check_array(X, dtype=np.int) if np.any((X < 0)): raise ValueError('X needs to contain only non-negative integers.') (n_samples, n_features) = X.shape indices = self.feature_indices_ if (n_features != (indices.shape[0] - 1)): raise ValueError(('X has differ...
'Transform X using one-hot encoding. Parameters X : array-like, shape [n_samples, n_features] Input array of type int. Returns X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input.'
def transform(self, X):
return _transform_selected(X, self._transform, self.categorical_features, copy=True)
'Compute percentiles for dense matrices. Parameters X : ndarray, shape (n_samples, n_features) The data used to scale along the features axis.'
def _dense_fit(self, X, random_state):
if self.ignore_implicit_zeros: warnings.warn("'ignore_implicit_zeros' takes effect only with sparse matrix. This parameter has no effect.") (n_samples, n_features) = X.shape references = (self.references_ * 100).tolist() self.quantiles_ = [] for col in X.T: ...
'Compute percentiles for sparse matrices. Parameters X : sparse matrix CSC, shape (n_samples, n_features) The data used to scale along the features axis. The sparse matrix needs to be nonnegative.'
def _sparse_fit(self, X, random_state):
(n_samples, n_features) = X.shape references = list(map((lambda x: (x * 100)), self.references_)) self.quantiles_ = [] for feature_idx in range(n_features): column_nnz_data = X.data[X.indptr[feature_idx]:X.indptr[(feature_idx + 1)]] if (len(column_nnz_data) > self.subsample): ...
'Compute the quantiles used for transforming. Parameters X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_impli...
def fit(self, X, y=None):
if (self.n_quantiles <= 0): raise ValueError(("Invalid value for 'n_quantiles': %d. The number of quantiles must be at least one." % self.n_quantiles)) if (self.subsample <= 0): raise ValueError(("Invalid value for 'subsample': %d. The num...
'Private function to transform a single feature'
def _transform_col(self, X_col, quantiles, inverse):
if (self.output_distribution == 'normal'): output_distribution = 'norm' else: output_distribution = self.output_distribution output_distribution = getattr(stats, output_distribution) if (not inverse): lower_bound_x = quantiles[0] upper_bound_x = quantiles[(-1)] lo...
'Check inputs before fit and transform'
def _check_inputs(self, X, accept_sparse_negative=False):
X = check_array(X, accept_sparse='csc', copy=self.copy, dtype=[np.float64, np.float32]) if ((not accept_sparse_negative) and (not self.ignore_implicit_zeros) and (sparse.issparse(X) and np.any((X.data < 0)))): raise ValueError('QuantileTransformer only accepts non-negative sparse matrices...
'Check the inputs before transforming'
def _check_is_fitted(self, X):
check_is_fitted(self, 'quantiles_') if (X.shape[1] != self.quantiles_.shape[1]): raise ValueError('X does not have the same number of features as the previously fitted data. Got {} instead of {}.'.format(X.shape[1], self.quantiles_.shape[1]))
'Forward and inverse transform. Parameters X : ndarray, shape (n_samples, n_features) The data used to scale along the features axis. inverse : bool, optional (default=False) If False, apply forward transform. If True, apply inverse transform. Returns X : ndarray, shape (n_samples, n_features) Projected data'
def _transform(self, X, inverse=False):
if sparse.issparse(X): for feature_idx in range(X.shape[1]): column_slice = slice(X.indptr[feature_idx], X.indptr[(feature_idx + 1)]) X.data[column_slice] = self._transform_col(X.data[column_slice], self.quantiles_[:, feature_idx], inverse) else: for feature_idx in range(...
'Feature-wise transformation of the data. Parameters X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_...
def transform(self, X):
X = self._check_inputs(X) self._check_is_fitted(X) return self._transform(X, inverse=False)
'Back-projection to the original space. Parameters X : ndarray or sparse matrix, shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse ``csc_matrix``. Additionally, the sparse matrix needs to be nonnegative if `ignore_implicit_ze...
def inverse_transform(self, X):
X = self._check_inputs(X, accept_sparse_negative=True) self._check_is_fitted(X) return self._transform(X, inverse=True)
'Fit label encoder Parameters y : array-like of shape (n_samples,) Target values. Returns self : returns an instance of self.'
def fit(self, y):
y = column_or_1d(y, warn=True) self.classes_ = np.unique(y) return self
'Fit label encoder and return encoded labels Parameters y : array-like of shape [n_samples] Target values. Returns y : array-like of shape [n_samples]'
def fit_transform(self, y):
y = column_or_1d(y, warn=True) (self.classes_, y) = np.unique(y, return_inverse=True) return y
'Transform labels to normalized encoding. Parameters y : array-like of shape [n_samples] Target values. Returns y : array-like of shape [n_samples]'
def transform(self, y):
check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) classes = np.unique(y) if (len(np.intersect1d(classes, self.classes_)) < len(classes)): diff = np.setdiff1d(classes, self.classes_) raise ValueError(('y contains new labels: %s' % str(diff))) return np.searc...
'Transform labels back to original encoding. Parameters y : numpy array of shape [n_samples] Target values. Returns y : numpy array of shape [n_samples]'
def inverse_transform(self, y):
check_is_fitted(self, 'classes_') diff = np.setdiff1d(y, np.arange(len(self.classes_))) if diff: raise ValueError(('y contains new labels: %s' % str(diff))) y = np.asarray(y) return self.classes_[y]
'Fit label binarizer Parameters y : array of shape [n_samples,] or [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns self : returns an instance of self.'
def fit(self, y):
self.y_type_ = type_of_target(y) if ('multioutput' in self.y_type_): raise ValueError('Multioutput target data is not supported with label binarization') if (_num_samples(y) == 0): raise ValueError(('y has 0 samples: %r' % y)) self.sparse_input_ = sp.i...
'Fit label binarizer and transform multi-class labels to binary labels. The output of transform is sometimes referred to as the 1-of-K coding scheme. Parameters y : array or sparse matrix of shape [n_samples,] or [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents...
def fit_transform(self, y):
return self.fit(y).transform(y)
'Transform multi-class labels to binary labels The output of transform is sometimes referred to by some authors as the 1-of-K coding scheme. Parameters y : array or sparse matrix of shape [n_samples,] or [n_samples, n_classes] Target values. The 2-d matrix should only contain 0 and 1, represents multilabel ...
def transform(self, y):
check_is_fitted(self, 'classes_') y_is_multilabel = type_of_target(y).startswith('multilabel') if (y_is_multilabel and (not self.y_type_.startswith('multilabel'))): raise ValueError('The object was not fitted with multilabel input.') return label_binarize(y, self.classes_, p...
'Transform binary labels back to multi-class labels Parameters Y : numpy array or sparse matrix with shape [n_samples, n_classes] Target values. All sparse matrices are converted to CSR before inverse transformation. threshold : float or None Threshold used in the binary and multi-label cases. Use 0 when ``Y`` contains...
def inverse_transform(self, Y, threshold=None):
check_is_fitted(self, 'classes_') if (threshold is None): threshold = ((self.pos_label + self.neg_label) / 2.0) if (self.y_type_ == 'multiclass'): y_inv = _inverse_binarize_multiclass(Y, self.classes_) else: y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, t...
'Fit the label sets binarizer, storing `classes_` Parameters y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns self : returns this MultiLabelBinarizer instance'
def fit(self, y):
if (self.classes is None): classes = sorted(set(itertools.chain.from_iterable(y))) else: classes = self.classes dtype = (np.int if all((isinstance(c, int) for c in classes)) else object) self.classes_ = np.empty(len(classes), dtype=dtype) self.classes_[:] = classes return self
'Fit the label sets binarizer and transform the given label sets Parameters y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such th...
def fit_transform(self, y):
if (self.classes is not None): return self.fit(y).transform(y) class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) tmp = sorted(class_mapping, key=class_mapping.get) dtype = (np.int if all((isinstance(c, int) for c...
'Transform the given label sets Parameters y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the `classes` parameter is set, `y` will not be iterated. Returns y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that `y_indicator[i, j] = 1` iff `c...
def transform(self, y):
check_is_fitted(self, 'classes_') class_to_index = dict(zip(self.classes_, range(len(self.classes_)))) yt = self._transform(y, class_to_index) if (not self.sparse_output): yt = yt.toarray() return yt
'Transforms the label sets with a given mapping Parameters y : iterable of iterables class_mapping : Mapping Maps from label to column index in label indicator matrix Returns y_indicator : sparse CSR matrix, shape (n_samples, n_classes) Label indicator matrix'
def _transform(self, y, class_mapping):
indices = array.array('i') indptr = array.array('i', [0]) for labels in y: indices.extend(set((class_mapping[label] for label in labels))) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) return sp.csr_matrix((data, indices, indptr), shape=((len(indptr) - 1), len(c...
'Transform the given indicator matrix into label sets Parameters yt : array or sparse matrix of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns y : list of tuples The set of labels for each sample such that `y[i]` consists of `classes_[j]` for each `yt[i, j] == 1`.'
def inverse_transform(self, yt):
check_is_fitted(self, 'classes_') if (yt.shape[1] != len(self.classes_)): raise ValueError('Expected indicator for {0} classes, but got {1}'.format(len(self.classes_), yt.shape[1])) if sp.issparse(yt): yt = yt.tocsr() if ((len(yt.data) != 0) and (len(np.setdiff1d...
'Check the estimator and the n_estimator attribute, set the `base_estimator_` attribute.'
def _validate_estimator(self, default=None):
if (not isinstance(self.n_estimators, (numbers.Integral, np.integer))): raise ValueError('n_estimators must be an integer, got {0}.'.format(type(self.n_estimators))) if (self.n_estimators <= 0): raise ValueError('n_estimators must be greater than zero, got ...
'Make and configure a copy of the `base_estimator_` attribute. Warning: This method should be used to properly instantiate new sub-estimators.'
def _make_estimator(self, append=True, random_state=None):
estimator = clone(self.base_estimator_) estimator.set_params(**dict(((p, getattr(self, p)) for p in self.estimator_params))) if (random_state is not None): _set_random_states(estimator, random_state) if append: self.estimators_.append(estimator) return estimator
'Returns the number of estimators in the ensemble.'
def __len__(self):
return len(self.estimators_)
'Returns the index\'th estimator in the ensemble.'
def __getitem__(self, index):
return self.estimators_[index]
'Returns iterator over estimators in the ensemble.'
def __iter__(self):
return iter(self.estimators_)
'Default ``init`` estimator for loss function.'
def init_estimator(self):
raise NotImplementedError()
'Update the terminal regions (=leaves) of the given tree and updates the current predictions of the model. Traverses tree and invokes template method `_update_terminal_region`. Parameters tree : tree.Tree The tree object. X : ndarray, shape=(n, m) The data array. y : ndarray, shape=(n,) The target labels. residual : nd...
def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0):
terminal_regions = tree.apply(X) masked_terminal_regions = terminal_regions.copy() masked_terminal_regions[(~ sample_mask)] = (-1) for leaf in np.where((tree.children_left == TREE_LEAF))[0]: self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_we...
'Least squares does not need to update terminal regions. But it has to update the predictions.'
def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0):
y_pred[:, k] += (learning_rate * tree.predict(X).ravel())
'1.0 if y - pred > 0.0 else -1.0'
def negative_gradient(self, y, pred, **kargs):
pred = pred.ravel() return ((2.0 * ((y - pred) > 0.0)) - 1.0)
'LAD updates terminal regions to median estimates.'
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
terminal_region = np.where((terminal_regions == leaf))[0] sample_weight = sample_weight.take(terminal_region, axis=0) diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) tree.value[(leaf, 0, 0)] = _weighted_percentile(diff, sample_weight, percentile=50)
'Template method to convert scores to probabilities. the does not support probabilities raises AttributeError.'
def _score_to_proba(self, score):
raise TypeError(('%s does not support predict_proba' % type(self).__name__))
'Compute the deviance (= 2 * negative log-likelihood).'
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel() if (sample_weight is None): return ((-2.0) * np.mean(((y * pred) - np.logaddexp(0.0, pred)))) else: return (((-2.0) / sample_weight.sum()) * np.sum((sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))))
'Compute the residual (= negative gradient).'
def negative_gradient(self, y, pred, **kargs):
return (y - expit(pred.ravel()))
'Make a single Newton-Raphson step. our node estimate is given by: sum(w * (y - prob)) / sum(w * prob * (1 - prob)) we take advantage that: y - prob = residual'
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
terminal_region = np.where((terminal_regions == leaf))[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum((sample_weight * residual)) denominator = np.sum(((sample_weight * (y - r...
'Compute negative gradient for the ``k``-th class.'
def negative_gradient(self, y, pred, k=0, **kwargs):
return (y - np.nan_to_num(np.exp((pred[:, k] - logsumexp(pred, axis=1)))))
'Make a single Newton-Raphson step.'
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
terminal_region = np.where((terminal_regions == leaf))[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum((sample_weight * residual)) numerator *= ((self.K - 1) / self.K) deno...
'Update reporter with new iteration.'
def update(self, j, est):
do_oob = (est.subsample < 1) i = (j - self.begin_at_stage) if (((i + 1) % self.verbose_mod) == 0): oob_impr = (est.oob_improvement_[j] if do_oob else 0) remaining_time = (((est.n_estimators - (j + 1)) * (time() - self.start_time)) / float((i + 1))) if (remaining_time > 60): ...
'Fit another stage of ``n_classes_`` trees to the boosting model.'
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc=None, X_csr=None):
assert (sample_mask.dtype == np.bool) loss = self.loss_ original_y = y for k in range(loss.K): if loss.is_multi_class: y = np.array((original_y == k), dtype=np.float64) residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) tree = DecisionTreeR...
'Check validity of parameters and raise ValueError if not valid.'
def _check_params(self):
if (self.n_estimators <= 0): raise ValueError(('n_estimators must be greater than 0 but was %r' % self.n_estimators)) if (self.learning_rate <= 0.0): raise ValueError(('learning_rate must be greater than 0 but was %r' % self.learning_rate)) if ...
'Initialize model state and allocate model state data structures.'
def _init_state(self):
if (self.init is None): self.init_ = self.loss_.init_estimator() elif isinstance(self.init, six.string_types): self.init_ = INIT_ESTIMATORS[self.init]() else: self.init_ = self.init self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object) self.train_sco...
'Clear the state of the gradient boosting model.'
def _clear_state(self):
if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=np.object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ if hasattr(self, '_r...
'Add additional ``n_estimators`` entries to all attributes.'
def _resize_state(self):
total_n_estimators = self.n_estimators if (total_n_estimators < self.estimators_.shape[0]): raise ValueError(('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0]))) self.estimators_.resize((total_n_estimators, self.loss_.K)) self.train_score_.r...
'Check that the estimator is initialized, raising an error if not.'
def _check_initialized(self):
check_is_fitted(self, 'estimators_')
'Fit the gradient boosting model. Parameters X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification,...
def fit(self, X, y, sample_weight=None, monitor=None):
if (not self.warm_start): self._clear_state() (X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) (n_samples, self.n_features_) = X.shape if (sample_weight is None): sample_weight = np.ones(n_samples, dtype=np.float32) else: sample_weight = column_or_1d...
'Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping.'
def _fit_stages(self, X, y, y_pred, sample_weight, random_state, X_val, y_val, sample_weight_val, begin_at_stage=0, monitor=None, X_idx_sorted=None):
n_samples = X.shape[0] do_oob = (self.subsample < 1.0) sample_mask = np.ones((n_samples,), dtype=np.bool) n_inbag = max(1, int((self.subsample * n_samples))) loss_ = self.loss_ if ((self.min_weight_fraction_leaf != 0.0) and (sample_weight is not None)): min_weight_leaf = (self.min_weight...
'Check input and compute prediction of ``init``.'
def _init_decision_function(self, X):
self._check_initialized() X = self.estimators_[(0, 0)]._validate_X_predict(X, check_input=True) if (X.shape[1] != self.n_features_): raise ValueError('X.shape[1] should be {0:d}, not {1:d}.'.format(self.n_features_, X.shape[1])) score = self.init_.predict(X).astype(np.float64) ...
'Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is ...
def _staged_decision_function(self, X):
X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr') score = self._init_decision_function(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, score) (yield score.copy())
'Return the feature importances (the higher, the more important the feature). Returns feature_importances_ : array, shape = [n_features]'
@property def feature_importances_(self):
self._check_initialized() total_sum = np.zeros((self.n_features_,), dtype=np.float64) for stage in self.estimators_: stage_sum = (sum((tree.feature_importances_ for tree in stage)) / len(stage)) total_sum += stage_sum importances = (total_sum / len(self.estimators_)) return importanc...
'Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. ...
def apply(self, X):
self._check_initialized() X = self.estimators_[(0, 0)]._validate_X_predict(X, check_input=True) (n_estimators, n_classes) = self.estimators_.shape leaves = np.zeros((X.shape[0], n_estimators, n_classes)) for i in range(n_estimators): for j in range(n_classes): estimator = self.es...
'Compute the decision function of ``X``. Parameters X : array-like or sparse matrix, shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns score : array, shape = [n_samples, n_classes] or [n_samp...
def decision_function(self, X):
X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr') score = self._decision_function(X) if (score.shape[1] == 1): return score.ravel() return score