desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Predict the class labels for the provided data
Parameters
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\'
Test samples.
Returns
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.'
| def predict(self, X):
| X = check_array(X, accept_sparse='csr')
(neigh_dist, neigh_ind) = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if (not self.outputs_2d_):
_y = self._y.reshape(((-1), 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get... |
'Return probability estimates for the test data X.
Parameters
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\'
Test samples.
Returns
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabiliti... | def predict_proba(self, X):
| X = check_array(X, accept_sparse='csr')
(neigh_dist, neigh_ind) = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if (not self.outputs_2d_):
_y = self._y.reshape(((-1), 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weig... |
'Predict the class labels for the provided data
Parameters
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\'
Test samples.
Returns
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.'
| def predict(self, X):
| X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
(neigh_dist, neigh_ind) = self.radius_neighbors(X)
inliers = [i for (i, nind) in enumerate(neigh_ind) if (len(nind) != 0)]
outliers = [i for (i, nind) in enumerate(neigh_ind) if (len(nind) == 0)]
classes_ = self.classes_
_y = sel... |
'"Fits the model to the training set X and returns the labels
(1 inlier, -1 outlier) on the training set according to the LOF score
and the contamination parameter.
Parameters
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the traini... | def fit_predict(self, X, y=None):
| return self.fit(X)._predict()
|
'Fit the model using X as training data.
Parameters
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric=\'precomputed\'.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| if (not (0.0 < self.contamination <= 0.5)):
raise ValueError('contamination must be in (0, 0.5]')
super(LocalOutlierFactor, self).fit(X)
n_samples = self._fit_X.shape[0]
if (self.n_neighbors > n_samples):
warn(('n_neighbors (%s) is greater than the total ... |
'Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
This method allows to generalize prediction to new observations (not
in the training set). As LOF originally does not deal with new data,
this method is kept private.
Parameters
X : array-like, shap... | def _predict(self, X=None):
| check_is_fitted(self, ['threshold_', 'negative_outlier_factor_', 'n_neighbors_', '_distances_fit_X_'])
if (X is not None):
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[(self._decision_function(X) <= self.threshold_)] = (-1)
else:
... |
'Opposite of the Local Outlier Factor of X (as bigger is better,
i.e. large values correspond to inliers).
The argument X is supposed to contain *new data*: if X contains a
point from training, it consider the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The... | def _decision_function(self, X):
| check_is_fitted(self, ['threshold_', 'negative_outlier_factor_', '_distances_fit_X_'])
X = check_array(X, accept_sparse='csr')
(distances_X, neighbors_indices_X) = self.kneighbors(X, n_neighbors=self.n_neighbors_)
X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)
lrd_ratios_... |
'The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors... | def _local_reachability_density(self, distances_X, neighbors_indices):
| dist_k = self._distances_fit_X_[(neighbors_indices, (self.n_neighbors_ - 1))]
reach_dist_array = np.maximum(distances_X, dist_k)
return (1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10))
|
'Fit the NearestCentroid model according to the given training data.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array... | def fit(self, X, y):
| if (self.metric == 'precomputed'):
raise ValueError('Precomputed is not supported.')
if (self.metric == 'manhattan'):
(X, y) = check_X_y(X, y, ['csc'])
else:
(X, y) = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if (is_X_sparse and self.shrink_thresho... |
'Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array, shape = [n_samples]
Notes
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the ... | def predict(self, X):
| check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
'Predict the target for the provided data
Parameters
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\'
Test samples.
Returns
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values'
| def predict(self, X):
| X = check_array(X, accept_sparse='csr')
(neigh_dist, neigh_ind) = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if (_y.ndim == 1):
_y = _y.reshape(((-1), 1))
if (weights is None):
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred ... |
'Predict the target for the provided data
Parameters
X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\'
Test samples.
Returns
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values'
| def predict(self, X):
| X = check_array(X, accept_sparse='csr')
(neigh_dist, neigh_ind) = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if (_y.ndim == 1):
_y = _y.reshape(((-1), 1))
if (weights is None):
y_pred = np.array([np.mean(_y[ind, :], axis=0) for ind in n... |
'Perform classification on an array of test vectors X.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array, shape = [n_samples]
Predicted target values for X'
| def predict(self, X):
| jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
|
'Return log-probability estimates for the test vector X.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the ... | def predict_log_proba(self, X):
| jll = self._joint_log_likelihood(X)
log_prob_x = logsumexp(jll, axis=1)
return (jll - np.atleast_2d(log_prob_x).T)
|
'Return probability estimates for the test vector X.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribut... | def predict_proba(self, X):
| return np.exp(self.predict_log_proba(X))
|
'Fit Gaussian Naive Bayes according to X, y
Parameters
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=No... | def fit(self, X, y, sample_weight=None):
| (X, y) = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight)
|
'Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and v... | @staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
| if (X.shape[0] == 0):
return (mu, var)
if (sample_weight is not None):
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=(sample_weight / n_new))
new_var = np.average(((X - new_mu) ** 2), axis=0, weights=(sample_weight / n_new))
else:
n_new = X... |
'Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerica... | def partial_fit(self, X, y, classes=None, sample_weight=None):
| return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight)
|
'Actual implementation of Gaussian NB fitting.
Parameters
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)... | def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
| (X, y) = check_X_y(X, y)
if (sample_weight is not None):
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
epsilon = (1e-09 * np.var(X, axis=0).max())
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, cl... |
'Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hen... | def partial_fit(self, X, y, classes=None, sample_weight=None):
| X = check_array(X, accept_sparse='csr', dtype=np.float64)
(_, n_features) = X.shape
if _check_partial_fit_first_call(self, classes):
n_effective_classes = (len(classes) if (len(classes) > 1) else 2)
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_coun... |
'Fit Naive Bayes classifier according to X, y
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples... | def fit(self, X, y, sample_weight=None):
| (X, y) = check_X_y(X, y, 'csr')
(_, n_features) = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if (Y.shape[1] == 1):
Y = np.concatenate(((1 - Y), Y), axis=1)
Y = Y.astype(np.float64)
if (sample_weight is not None):
sa... |
'Count and smooth feature occurrences.'
| def _count(self, X, Y):
| if np.any(((X.data if issparse(X) else X) < 0)):
raise ValueError('Input X must be non-negative')
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
|
'Apply smoothing to raw counts and recompute log probabilities'
| def _update_feature_log_prob(self, alpha):
| smoothed_fc = (self.feature_count_ + alpha)
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape((-1), 1)))
|
'Calculate the posterior log probability of the samples X'
| def _joint_log_likelihood(self, X):
| check_is_fitted(self, 'classes_')
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_)
|
'Count and smooth feature occurrences.'
| def _count(self, X, Y):
| if (self.binarize is not None):
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
|
'Apply smoothing to raw counts and recompute log probabilities'
| def _update_feature_log_prob(self, alpha):
| smoothed_fc = (self.feature_count_ + alpha)
smoothed_cc = (self.class_count_ + (alpha * 2))
self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape((-1), 1)))
|
'Calculate the posterior log probability of the samples X'
| def _joint_log_likelihood(self, X):
| check_is_fitted(self, 'classes_')
X = check_array(X, accept_sparse='csr')
if (self.binarize is not None):
X = binarize(X, threshold=self.binarize)
(n_classes, n_features) = self.feature_log_prob_.shape
(n_samples, n_features_X) = X.shape
if (n_features_X != n_features):
raise Val... |
'Validate X whenever one tries to predict, apply, predict_proba'
| def _validate_X_predict(self, X, check_input):
| if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
if (issparse(X) and ((X.indices.dtype != np.intc) or (X.indptr.dtype != np.intc))):
raise ValueError('No support for np.int64 index based sparse matrices')
n_features = X.shape[1]
if (self.n... |
'Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converte... | def predict(self, X, check_input=True):
| check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
if is_classifier(self):
if (self.n_outputs_ == 1):
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = ... |
'Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boo... | def apply(self, X, check_input=True):
| check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
|
'Return the decision path in the tree
.. versionadded:: 0.18
Parameters
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow ... | def decision_path(self, X, check_input=True):
| X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
|
'Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
feature_importances_ : array, shape = [n_features]'
| @property
def feature_importances_(self):
| check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
|
'Build a decision tree classifier from the training set (X, y).
Parameters
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_sampl... | def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):
| super(DecisionTreeClassifier, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)
return self
|
'Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don\'t use this parameter unless you know what you do.
Parameters
X : array-like or sparse matrix of ... | def predict_proba(self, X, check_input=True):
| check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if (self.n_outputs_ == 1):
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[(normalizer == 0.0)] = 1.0
proba /= normalizer
... |
'Predict class log-probabilities of the input samples X.
Parameters
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
p : array of shape = [n_samples, n_cla... | def predict_log_proba(self, X):
| proba = self.predict_proba(X)
if (self.n_outputs_ == 1):
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
'Build a decision tree regressor from the training set (X, y).
Parameters
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_sample... | def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):
| super(DecisionTreeRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)
return self
|
'Fit the random classifier.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = ... | def fit(self, X, y, sample_weight=None):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False)
if (self.strategy not in ('most_frequent', 'stratified', 'uniform', 'constant', 'prior')):
raise ValueError('Unknown strategy type.')
if ((self.strategy == 'uniform') and sp.issparse(y)):
y = y.toarray()
... |
'Perform classification on test vectors X.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.'
| def predict(self, X):
| check_is_fitted(self, 'classes_')
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False)
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.... |
'Return probability estimates for the test vectors X.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the pr... | def predict_proba(self, X):
| check_is_fitted(self, 'classes_')
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False)
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.... |
'Return log probability estimates for the test vectors X.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns t... | def predict_log_proba(self, X):
| proba = self.predict_proba(X)
if (self.n_outputs_ == 1):
return np.log(proba)
else:
return [np.log(p) for p in proba]
|
'Fit the random regressor.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [... | def fit(self, X, y, sample_weight=None):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False)
if (self.strategy not in ('mean', 'median', 'quantile', 'constant')):
raise ValueError(("Unknown strategy type: %s, expected 'mean', 'median', 'quantile' or 'constant'" % self.strategy))
y = ch... |
'Perform classification on test vectors X.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.'
| def predict(self, X):
| check_is_fitted(self, 'constant_')
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], force_all_finite=False)
n_samples = X.shape[0]
y = (np.ones((n_samples, 1)) * self.constant_)
if ((self.n_outputs_ == 1) and (not self.output_2d_)):
y = np.ravel(y)
return y
|
'The Gaussian Process model fitting method.
Parameters
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
gp... | def fit(self, X, y):
| self._check_params()
self.random_state = check_random_state(self.random_state)
(X, y) = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if (y.ndim == 1):
y = y[:, np.newaxis]
(n_samples, n_features) = X.shape
(_, n_targets) = y.shape
self._check_params(n_... |
'This function evaluates the Gaussian Process model at x.
Parameters
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE... | def predict(self, X, eval_MSE=False, batch_size=None):
| check_is_fitted(self, 'X')
X = check_array(X)
(n_eval, _) = X.shape
(n_samples, n_features) = self.X.shape
(n_samples_y, n_targets) = self.y.shape
self._check_params(n_samples)
if (X.shape[1] != n_features):
raise ValueError(('The number of features in X (X.shape[1]... |
'This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evalua... | def reduced_likelihood_function(self, theta=None):
| check_is_fitted(self, 'X')
if (theta is None):
theta = self.theta_
reduced_likelihood_function_value = (- np.inf)
par = {}
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if (D is None):
(D, ij) = l1_cross_distances(self.X)
if ((np.min(np.sum(D,... |
'This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
self : All parameters are stored in the Gaussian Process model object.
Returns
optimal_theta : array_like
The b... | def _arg_max_reduced_likelihood_function(self):
| best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print(('The chosen optimizer is: ' + str(self.optimizer)))
if (self.random_start > 1):
print((str(self.random_start) + ' random starts are required.'))
perce... |
'Fit Gaussian process classification model
Parameters
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
self : returns an instance of self.'
| def fit(self, X, y):
| if (self.kernel is None):
self.kernel_ = (C(1.0, constant_value_bounds='fixed') * RBF(1.0, length_scale_bounds='fixed'))
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = (np.copy(X) if self.copy_X_train else X)
label_encoder = L... |
'Perform classification on an array of test vectors X.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``'
| def predict(self, X):
| check_is_fitted(self, ['X_train_', 'y_train_', 'pi_', 'W_sr_', 'L_'])
K_star = self.kernel_(self.X_train_, X)
f_star = K_star.T.dot((self.y_train_ - self.pi_))
return np.where((f_star > 0), self.classes_[1], self.classes_[0])
|
'Return probability estimates for the test vector X.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribut... | def predict_proba(self, X):
| check_is_fitted(self, ['X_train_', 'y_train_', 'pi_', 'W_sr_', 'L_'])
K_star = self.kernel_(self.X_train_, X)
f_star = K_star.T.dot((self.y_train_ - self.pi_))
v = solve(self.L_, (self.W_sr_[:, np.newaxis] * K_star))
var_f_star = (self.kernel_.diag(X) - np.einsum('ij,ij->j', v, v))
alpha = (1 / ... |
'Returns log-marginal likelihood of theta for training data.
Parameters
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default:... | def log_marginal_likelihood(self, theta=None, eval_gradient=False):
| if (theta is None):
if eval_gradient:
raise ValueError('Gradient can only be evaluated for theta!=None')
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
(K, K_gradient) = kernel(self.X_train_, ev... |
'Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton\'s iteration to find the mode of this approximation.'
| def _posterior_mode(self, K, return_temporaries=False):
| if (self.warm_start and hasattr(self, 'f_cached') and (self.f_cached.shape == self.y_train_.shape)):
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
log_marginal_likelihood = (- np.inf)
for _ in range(self.max_iter_predict):
pi = expit(f)
W = (p... |
'Fit Gaussian process classification model
Parameters
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
self : returns an instance of self.'
| def fit(self, X, y):
| (X, y) = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(self.kernel, self.optimizer, self.n_restarts_optimizer, self.max_iter_predict, self.warm_start, self.copy_X_train, self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.s... |
'Perform classification on an array of test vectors X.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``'
| def predict(self, X):
| check_is_fitted(self, ['classes_', 'n_classes_'])
X = check_array(X)
return self.base_estimator_.predict(X)
|
'Return probability estimates for the test vector X.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribut... | def predict_proba(self, X):
| check_is_fitted(self, ['classes_', 'n_classes_'])
if ((self.n_classes_ > 2) and (self.multi_class == 'one_vs_one')):
raise ValueError('one_vs_one multi-class mode does not support predicting probability estimates. Use one_vs_rest mode instead.')
X = check_array(X)... |
'Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
eval... | def log_marginal_likelihood(self, theta=None, eval_gradient=False):
| check_is_fitted(self, ['classes_', 'n_classes_'])
if (theta is None):
if eval_gradient:
raise ValueError('Gradient can only be evaluated for theta!=None')
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if (self.n_classes_ == 2):
... |
'Fit Gaussian process regression model.
Parameters
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
self : returns an instance of self.'
| def fit(self, X, y):
| if (self.kernel is None):
self.kernel_ = (C(1.0, constant_value_bounds='fixed') * RBF(1.0, length_scale_bounds='fixed'))
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
(X, y) = check_X_y(X, y, multi_output=True, y_numeric=True)
if self.norma... |
'Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
... | def predict(self, X, return_std=False, return_cov=False):
| if (return_std and return_cov):
raise RuntimeError('Not returning standard deviation of predictions when returning full covariance.')
X = check_array(X)
if (not hasattr(self, 'X_train_')):
if (self.kernel is None):
kernel = (C(1.0, constant_value_bounds... |
'Draw samples from Gaussian process and evaluate at X.
Parameters
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If ... | def sample_y(self, X, n_samples=1, random_state=0):
| rng = check_random_state(random_state)
(y_mean, y_cov) = self.predict(X, return_cov=True)
if (y_mean.ndim == 1):
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_me... |
'Returns log-marginal likelihood of theta for training data.
Parameters
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default:... | def log_marginal_likelihood(self, theta=None, eval_gradient=False):
| if (theta is None):
if eval_gradient:
raise ValueError('Gradient can only be evaluated for theta!=None')
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
(K, K_gradient) = kernel(self.X_train_, ev... |
'Get parameters of this kernel.
Parameters
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
params : mapping of string to any
Parameter names mapped to their values.'
| def get_params(self, deep=True):
| params = dict()
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
(args, varargs) = ([], [])
for parameter in init_sign.parameters.values():
if ((parameter.kind != parameter.VAR_KEYWORD) and (parameter.name != 'self')):
... |
'Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it\'s possible to update each component of a nested object.
Returns
self'
| def set_params(self, **params):
| if (not params):
return self
valid_params = self.get_params(deep=True)
for (key, value) in six.iteritems(params):
split = key.split('__', 1)
if (len(split) > 1):
(name, sub_name) = split
if (name not in valid_params):
raise ValueError(('Invalid... |
'Returns a clone of self with given hyperparameters theta.'
| def clone_with_theta(self, theta):
| cloned = clone(self)
cloned.theta = theta
return cloned
|
'Returns the number of non-fixed hyperparameters of the kernel.'
| @property
def n_dims(self):
| return self.theta.shape[0]
|
'Returns a list of all hyperparameter specifications.'
| @property
def hyperparameters(self):
| r = []
for attr in dir(self):
if attr.startswith('hyperparameter_'):
r.append(getattr(self, attr))
return r
|
'Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel\'s hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Retur... | @property
def theta(self):
| theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if (not hyperparameter.fixed):
theta.append(params[hyperparameter.name])
if (len(theta) > 0):
return np.log(np.hstack(theta))
else:
return np.array([])
|
'Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel'
| @theta.setter
def theta(self, theta):
| params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if (hyperparameter.n_elements > 1):
params[hyperparameter.name] = np.exp(theta[i:(i + hyperparameter.n_elements)])
i += hyperparameter.n_elements... |
'Returns the log-transformed bounds on the theta.
Returns
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel\'s hyperparameters theta'
| @property
def bounds(self):
| bounds = []
for hyperparameter in self.hyperparameters:
if (not hyperparameter.fixed):
bounds.append(hyperparameter.bounds)
if (len(bounds) > 0):
return np.log(np.vstack(bounds))
else:
return np.array([])
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return np.ones(X.shape[0])
|
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return True
|
'Get parameters of this kernel.
Parameters
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
params : mapping of string to any
Parameter names mapped to their values.'
| def get_params(self, deep=True):
| return dict(kernels=self.kernels)
|
'Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel\'s hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Retur... | @property
def theta(self):
| return np.hstack([kernel.theta for kernel in self.kernels])
|
'Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel'
| @theta.setter
def theta(self, theta):
| k_dims = self.k1.n_dims
for (i, kernel) in enumerate(self.kernels):
kernel.theta = theta[(i * k_dims):((i + 1) * k_dims)]
|
'Returns the log-transformed bounds on the theta.
Returns
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel\'s hyperparameters theta'
| @property
def bounds(self):
| return np.vstack([kernel.bounds for kernel in self.kernels])
|
'Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, defa... | def __call__(self, X, Y=None, eval_gradient=False):
| if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
(K_single, K_grad_single) = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return (np.dstack(K), np.concatenate(K_grad, 3))
else:
... |
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return np.all([kernel.is_stationary() for kernel in self.kernels])
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
|
'Get parameters of this kernel.
Parameters
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
params : mapping of string to any
Parameter names mapped to their values.'
| def get_params(self, deep=True):
| params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(((('k1__' + k), val) for (k, val) in deep_items))
deep_items = self.k2.get_params().items()
params.update(((('k2__' + k), val) for (k, val) in deep_items))
return params
|
'Returns a list of all hyperparameter.'
| @property
def hyperparameters(self):
| r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter(('k1__' + hyperparameter.name), hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter(('k2__' + hyperparameter.name), h... |
'Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel\'s hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Retur... | @property
def theta(self):
| return np.append(self.k1.theta, self.k2.theta)
|
'Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel'
| @theta.setter
def theta(self, theta):
| k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
|
'Returns the log-transformed bounds on the theta.
Returns
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel\'s hyperparameters theta'
| @property
def bounds(self):
| if (self.k1.bounds.size == 0):
return self.k2.bounds
if (self.k2.bounds.size == 0):
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
|
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return (self.k1.is_stationary() and self.k2.is_stationary())
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| if eval_gradient:
(K1, K1_gradient) = self.k1(X, Y, eval_gradient=True)
(K2, K2_gradient) = self.k2(X, Y, eval_gradient=True)
return ((K1 + K2), np.dstack((K1_gradient, K2_gradient)))
else:
return (self.k1(X, Y) + self.k2(X, Y))
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return (self.k1.diag(X) + self.k2.diag(X))
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| if eval_gradient:
(K1, K1_gradient) = self.k1(X, Y, eval_gradient=True)
(K2, K2_gradient) = self.k2(X, Y, eval_gradient=True)
return ((K1 * K2), np.dstack(((K1_gradient * K2[:, :, np.newaxis]), (K2_gradient * K1[:, :, np.newaxis]))))
else:
return (self.k1(X, Y) * self.k2(X, Y))
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return (self.k1.diag(X) * self.k2.diag(X))
|
'Get parameters of this kernel.
Parameters
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
params : mapping of string to any
Parameter names mapped to their values.'
| def get_params(self, deep=True):
| params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(((('kernel__' + k), val) for (k, val) in deep_items))
return params
|
'Returns a list of all hyperparameter.'
| @property
def hyperparameters(self):
| r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter(('kernel__' + hyperparameter.name), hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements))
return r
|
'Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel\'s hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Retur... | @property
def theta(self):
| return self.kernel.theta
|
'Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel'
| @theta.setter
def theta(self, theta):
| self.kernel.theta = theta
|
'Returns the log-transformed bounds on the theta.
Returns
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel\'s hyperparameters theta'
| @property
def bounds(self):
| return self.kernel.bounds
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| if eval_gradient:
(K, K_gradient) = self.kernel(X, Y, eval_gradient=True)
K_gradient *= (self.exponent * (K[:, :, np.newaxis] ** (self.exponent - 1)))
return ((K ** self.exponent), K_gradient)
else:
K = self.kernel(X, Y, eval_gradient=False)
return (K ** self.exponent)
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return (self.kernel.diag(X) ** self.exponent)
|
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return self.kernel.is_stationary()
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
if (Y is None):
Y = X
elif eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
K = (self.constant_value * np.ones((X.shape[0], Y.shape[0])))
if eval_gradient:
if (not self.hyperparameter_constant_value... |
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return (self.constant_value * np.ones(X.shape[0]))
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
if ((Y is not None) and eval_gradient):
raise ValueError('Gradient can only be evaluated when Y is None.')
if (Y is None):
K = (self.noise_level * np.eye(X.shape[0]))
if eval_gradient:
if (not self.hyperparameter_noise_level.fi... |
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n... | def diag(self, X):
| return (self.noise_level * np.ones(X.shape[0]))
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if (Y is None):
dists = pdist((X / length_scale), metric='sqeuclidean')
K = np.exp(((-0.5) * dists))
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise Va... |
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradi... | def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if (Y is None):
dists = pdist((X / length_scale), metric='euclidean')
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.