desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Outlyingness of observations in X according to the fitted model.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
is_outliers : array, shape = (n_samples, ), dtype = bool
For each observation, tells whether or not it should be considered
as an outlier according to the fitted model.
threshold : float,... | def predict(self, X):
| check_is_fitted(self, 'threshold_')
X = check_array(X)
is_inlier = (- np.ones(X.shape[0], dtype=int))
if (self.contamination is not None):
values = self.decision_function(X, raw_values=True)
is_inlier[(values <= self.threshold_)] = 1
else:
raise NotImplementedError('You mu... |
'Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = ... | def score(self, X, y, sample_weight=None):
| return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
'Fits the GraphLasso model to X.
Parameters
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_features=2, ensure_min_samples=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
(self.covariance_, self.precision_, ... |
'Fits the GraphLasso covariance model to X.
Parameters
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_features=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
path = lis... |
'Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.'
| def _set_covariance(self, covariance):
| covariance = check_array(covariance)
self.covariance_ = covariance
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
|
'Getter for the precision matrix.
Returns
precision_ : array-like,
The precision matrix associated to the current covariance object.'
| def get_precision(self):
| if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
|
'Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Ret... | def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
|
'Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is... | def score(self, X_test, y=None):
| test_cov = empirical_covariance((X_test - self.location_), assume_centered=True)
res = log_likelihood(test_cov, self.get_precision())
return res
|
'Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- \'frobenius\' (default): sqrt(tr(A^... | def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True):
| error = (comp_cov - self.covariance_)
if (norm == 'frobenius'):
squared_norm = np.sum((error ** 2))
elif (norm == 'spectral'):
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError('Only spectral and frobenius norms are i... |
'Computes the squared Mahalanobis distances of given observations.
Parameters
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
mahalanobi... | def mahalanobis(self, observations):
| precision = self.get_precision()
centered_obs = (observations - self.location_)
mahalanobis_dist = np.sum((np.dot(centered_obs, precision) * centered_obs), 1)
return mahalanobis_dist
|
'Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self : object
Ret... | def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance... |
'Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self ... | def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
(covariance, shrinkage) = ledoit_wolf((X - self.location_), assume_centered=True, block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covari... |
'Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
R... | def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
(covariance, shrinkage) = oas((X - self.location_), assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
'Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
cov : array, shape=(n_features, n_features)
Estimated covariance of data.'
| def get_covariance(self):
| components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
cov = np.dot((components_.T * exp_var_diff), components_)
cov.flat[::(len(cov) ... |
'Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
precision : array, shape=(n_features, n_features)
Estimated precision of data.'
| def get_precision(self):
| n_features = self.components_.shape[1]
if (self.n_components_ == 0):
return (np.eye(n_features) / self.noise_variance_)
if (self.n_components_ == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten... |
'Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samp... | def transform(self, X):
| check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if (self.mean_ is not None):
X = (X - self.mean_)
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
|
'Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
X_original array-like, shape (n_samples, ... | def inverse_transform(self, X):
| if self.whiten:
return (np.dot(X, (np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_)) + self.mean_)
else:
return (np.dot(X, self.components_) + self.mean_)
|
'Fit the model
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to F... | def _fit(self, X, compute_sources=False):
| fun_args = ({} if (self.fun_args is None) else self.fun_args)
(whitening, unmixing, sources, X_mean, self.n_iter_) = fastica(X=X, n_components=self.n_components, algorithm=self.algorithm, whiten=self.whiten, fun=self.fun, fun_args=fun_args, max_iter=self.max_iter, tol=self.tol, w_init=self.w_init, random_state=... |
'Fit the model and recover the sources from X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| return self._fit(X, compute_sources=True)
|
'Fit the model to X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
self'
| def fit(self, X, y=None):
| self._fit(X, compute_sources=False)
return self
|
'Recover the sources from X (apply the unmixing matrix).
Parameters
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool (optional)
If Fal... | def transform(self, X, y='deprecated', copy=True):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT... |
'Transform the sources back to the mixed data (apply mixing matrix).
Parameters
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
X... | def inverse_transform(self, X, copy=True):
| check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
'Fit LSI model on training data X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
self : object
Returns the transformer object.'
| def fit(self, X, y=None):
| self.fit_transform(X)
return self
|
'Fit LSI model to X and perform dimensionality reduction on X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.'
| def fit_transform(self, X, y=None):
| X = check_array(X, accept_sparse=['csr', 'csc'])
random_state = check_random_state(self.random_state)
if (self.algorithm == 'arpack'):
(U, Sigma, VT) = svds(X, k=self.n_components, tol=self.tol)
Sigma = Sigma[::(-1)]
(U, VT) = svd_flip(U[:, ::(-1)], VT[::(-1)])
elif (self.algorit... |
'Perform dimensionality reduction on X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.'
| def transform(self, X):
| X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
|
'Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data.
Returns
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.'
| def inverse_transform(self, X):
| X = check_array(X)
return np.dot(X, self.components_)
|
'Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
X_new : array... | def transform(self, X):
| check_is_fitted(self, 'components_')
X = check_array(X)
(n_samples, n_features) = X.shape
code = sparse_encode(X, self.components_, algorithm=self.transform_algorithm, n_nonzero_coefs=self.transform_n_nonzero_coefs, alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
(n_sampl... |
'Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the ... | def fit(self, X, y=None):
| return self
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the object itself'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
(V, U, E, self.n_iter_) = dict_learning(X, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=s... |
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
(U, (A, B), self.n_iter_) = dict_learning_online(X, self.n_components, self.alpha, n_iter=self.n_iter, return_code=False, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=self.dict_init, batch_size=self.batch_size, shuffle=self.... |
'Updates the model using the data in X as a mini-batch.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset : integer, optional
The number of iteration on data batches that has been
performed before this... | def partial_fit(self, X, y=None, iter_offset=None):
| if (not hasattr(self, 'random_state_')):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if (iter... |
'Fit the FactorAnalysis model to X using EM
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, copy=self.copy, dtype=np.float64)
(n_samples, n_features) = X.shape
n_components = self.n_components
if (n_components is None):
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
nsqrt = sqrt(n_samples)
llconst = ((n_features * log((2.0 *... |
'Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
Returns
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.'
| def transform(self, X):
| check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = (X - self.mean_)
Wpsi = (self.components_ / self.noise_variance_)
cov_z = linalg.inv((Ih + np.dot(Wpsi, self.components_.T)))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.... |
'Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
cov : array, shape (n_features, n_features)
Estimated covariance of data.'
| def get_covariance(self):
| check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::(len(cov) + 1)] += self.noise_variance_
return cov
|
'Compute data precision matrix with the FactorAnalysis model.
Returns
precision : array, shape (n_features, n_features)
Estimated precision of data.'
| def get_precision(self):
| check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
if (self.n_components == 0):
return np.diag((1.0 / self.noise_variance_))
if (self.n_components == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
precision = np.dot((... |
'Compute the log-likelihood of each sample
Parameters
X : array, shape (n_samples, n_features)
The data
Returns
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model'
| def score_samples(self, X):
| check_is_fitted(self, 'components_')
Xr = (X - self.mean_)
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = ((-0.5) * (Xr * np.dot(Xr, precision)).sum(axis=1))
log_like -= (0.5 * ((n_features * log((2.0 * np.pi))) - fast_logdet(precision)))
... |
'Compute the average log-likelihood of the samples
Parameters
X : array, shape (n_samples, n_features)
The data
Returns
ll : float
Average log-likelihood of the samples under the current model'
| def score(self, X, y=None):
| return np.mean(self.score_samples(X))
|
'Fit\'s using kernel K'
| def _fit_transform(self, K):
| K = self._centerer.fit_transform(K)
if (self.n_components is None):
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
if (self.eigen_solver == 'auto'):
if ((K.shape[0] > 200) and (n_components < 10)):
eigen_solver = 'arpack'
els... |
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr', copy=self.copy_X)
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
... |
'Fit the model from data in X and transform X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None, **params):
| self.fit(X, **params)
X_transformed = (self.alphas_ * np.sqrt(self.lambdas_))
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
|
'Transform X.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
X_new : array-like, shape (n_samples, n_components)'
| def transform(self, X):
| check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, (self.alphas_ / np.sqrt(self.lambdas_)))
|
'Transform X back to original space.
Parameters
X : array-like, shape (n_samples, n_components)
Returns
X_new : array-like, shape (n_samples, n_features)
References
"Learning to Find Pre-Images", G BakIr et al, 2004.'
| def inverse_transform(self, X):
| if (not self.fit_inverse_transform):
raise NotFittedError('The fit_inverse_transform parameter was not set to True when instantiating and hence the inverse transform is not available.')
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(... |
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = (self.V_init.T if (self.V_init is not None) else None)
dict_init = (self.U_init.T if (self.U_init i... |
'Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple li... | def transform(self, X, ridge_alpha='deprecated'):
| check_is_fitted(self, 'components_')
X = check_array(X)
if (ridge_alpha != 'deprecated'):
warnings.warn('The ridge_alpha parameter on transform() is deprecated since 0.19 and will be removed in 0.21. Specify ridge_alpha in the SparsePCA con... |
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
(Vt, _, self.n_iter_) = dict_learning_online(X.T, n_components, alpha=self.alpha, n_iter=self.n_iter, return_code=T... |
'Fit the model with X, using minibatches of size batch_size.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Passthrough for ``Pipeline`` compatibility.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
self.noise_variance_ = None
X = check_array(X, copy=self.copy, dtype=[... |
'Incremental fit with X. All of X is processed as a single batch.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
check_input : bool
Run check_array on X.
Returns
self : object
Returns the instance itself.'
| def partial_fit(self, X, y=None, check_input=True):
| if check_input:
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if (not hasattr(self, 'components_')):
self.components_ = None
if (self.n_components is None):
self.n_components_ = n_features
elif (not (1 <= self.n_component... |
'Fit the model with X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self._fit(X)
return self
|
'Fit the model with X and apply the dimensionality reduction on X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| (U, S, V) = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
U *= sqrt((X.shape[0] - 1))
else:
U *= S[:self.n_components_]
return U
|
'Dispatch to the right submethod depending on the chosen solver.'
| def _fit(self, X):
| if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if (self.n_components is None):
n_components = X.shape[1]
... |
'Fit the model by computing full SVD on X'
| def _fit_full(self, X, n_components):
| (n_samples, n_features) = X.shape
if (n_components == 'mle'):
if (n_samples < n_features):
raise ValueError("n_components='mle' is only supported if n_samples >= n_features")
elif (not (0 <= n_components <= n_features)):
raise ValueError(("n_components=%r ... |
'Fit the model by computing truncated SVD (by ARPACK or randomized)
on X'
| def _fit_truncated(self, X, n_components, svd_solver):
| (n_samples, n_features) = X.shape
if isinstance(n_components, six.string_types):
raise ValueError(("n_components=%r cannot be a string with svd_solver='%s'" % (n_components, svd_solver)))
elif (not (1 <= n_components <= n_features)):
raise ValueError(("n_components=%r mu... |
'Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
X : array, shape(n_samples, n_features)
The data.
Returns
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current mo... | def score_samples(self, X):
| check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = (X - self.mean_)
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = ((-0.5) * (Xr * np.dot(Xr, precision)).sum(axis=1))
log_like -= (0.5 * ((n_features * log((2.0 * np.pi))) - fast_log... |
'Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
X : array, shape(n_samples, n_features)
The data.
Returns
ll : float
Average log-likelihood of the samples under the current model'... | def score(self, X, y=None):
| return np.mean(self.score_samples(X))
|
'Fit the model with X by extracting the first principal components.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self._fit(check_array(X))
return self
|
'Fit the model to the data X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.'
| def _fit(self, X):
| random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
... |
'Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_sample... | def transform(self, X):
| check_is_fitted(self, 'mean_')
X = check_array(X)
if (self.mean_ is not None):
X = (X - self.mean_)
X = np.dot(X, self.components_.T)
return X
|
'Fit the model with X and apply the dimensionality reduction on X.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| X = check_array(X)
X = self._fit(X)
return np.dot(X, self.components_.T)
|
'Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
X_original array-like, shape (n_samples, n_features)
Not... | def inverse_transform(self, X):
| check_is_fitted(self, 'mean_')
X_original = np.dot(X, self.components_)
if (self.mean_ is not None):
X_original = (X_original + self.mean_)
return X_original
|
'Check model parameters.'
| def _check_params(self):
| if (self.n_topics is not None):
self._n_components = self.n_topics
warnings.warn('n_topics has been renamed to n_components in version 0.19 and will be removed in 0.21', DeprecationWarning)
else:
self._n_components = self.n_components
if (sel... |
'Initialize latent variables.'
| def _init_latent_vars(self, n_features):
| self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if (self.doc_topic_prior is None):
self.doc_topic_prior_ = (1.0 / self._n_components)
else:
self.doc_topic_prior_ = self.doc_topic_prior
if (self.topic_word_prior is None):
... |
'E-step in EM update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate wheth... | def _e_step(self, X, cal_sstats, random_init, parallel=None):
| random_state = (self.random_state_ if random_init else None)
n_jobs = _get_n_jobs(self.n_jobs)
if (parallel is None):
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, (self.verbose - 1)))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.... |
'EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total number of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updati... | def _em_step(self, X, total_samples, batch_update, parallel=None):
| (_, suff_stats) = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel)
if batch_update:
self.components_ = (self.topic_word_prior_ + suff_stats)
else:
weight = np.power((self.learning_offset + self.n_batch_iter_), (- self.learning_decay))
doc_ratio = (float(total_sam... |
'check X format
check X format and make sure no negative value in X.
Parameters
X : array-like or sparse matrix'
| def _check_non_neg_array(self, X, whom):
| X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
|
'Online VB with Mini-Batch update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
self'
| def partial_fit(self, X, y=None):
| self._check_params()
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.partial_fit')
(n_samples, n_features) = X.shape
batch_size = self.batch_size
if (not hasattr(self, 'components_')):
self._init_latent_vars(n_features)
if (n_features != self.components_.shape[1]):
ra... |
'Learn model for the data X with variational Bayes method.
When `learning_method` is \'online\', use mini-batch update.
Otherwise, use batch update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
self'
| def fit(self, X, y=None):
| self._check_params()
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.fit')
(n_samples, n_features) = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if (learning_method is None):
warnings.warn("The default ... |
'Transform data X according to fitted model.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.'
| def _unnormalized_transform(self, X):
| if (not hasattr(self, 'components_')):
raise NotFittedError("no 'components_' attribute in model. Please fit model first.")
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.transform')
(n_samples, n_features) = X.shape
if (n_features != self.components_.shape[1... |
'Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.'
| def transform(self, X):
| doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
|
'Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
d... | def _approx_bound(self, X, doc_topic_distr, sub_sampling):
| def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum(((prior - distr) * dirichlet_distr))
score += np.sum((gammaln(distr) - gammaln(prior)))
score += np.sum((gammaln((prior * size)) - gammaln(np.sum(distr, 1))))
return score
is_sparse_x = sp.issparse(X)
(n_... |
'Calculate approximate log-likelihood as score.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
score : float
Use approximate bound as score.'
| def score(self, X, y=None):
| X = self._check_non_neg_array(X, 'LatentDirichletAllocation.score')
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
|
'Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document ... | def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
| if (not hasattr(self, 'components_')):
raise NotFittedError("no 'components_' attribute in model. Please fit model first.")
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.perplexity')
if (doc_topic_distr is None):
doc_topic_distr = self._unnormalized_tran... |
'Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
X : array-like or sparse matrix, [n_samples, n_featur... | def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False):
| if (doc_topic_distr != 'deprecated'):
warnings.warn("Argument 'doc_topic_distr' is deprecated and is being ignored as of 0.19. Support for this argument will be removed in 0.21.", DeprecationWarning)
return self._perplexity_precomp_distr(X, sub_sa... |
'Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init=\'custom\', it is used as init... | def fit_transform(self, X, y=None, W=None, H=None):
| X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
(W, H, n_iter_) = non_negative_factorization(X=X, W=W, H=H, n_components=self.n_components, init=self.init, update_H=True, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, r... |
'Learn a NMF model for the data X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Returns
self'
| def fit(self, X, y=None, **params):
| self.fit_transform(X, **params)
return self
|
'Transform the data X according to the fitted NMF model
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
W : array, shape (n_samples, n_components)
Transformed data'
| def transform(self, X):
| check_is_fitted(self, 'n_components_')
(W, _, n_iter_) = non_negative_factorization(X=X, W=None, H=self.components_, n_components=self.n_components_, init=self.init, update_H=False, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, regu... |
'Transform data back to its original space.
Parameters
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18'
| def inverse_transform(self, W):
| check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
|
'Fit Kernel Ridge regression model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.... | def fit(self, X, y=None, sample_weight=None):
| (X, y) = check_X_y(X, y, accept_sparse=('csr', 'csc'), multi_output=True, y_numeric=True)
if ((sample_weight is not None) and (not isinstance(sample_weight, float))):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel =... |
'Predict using the kernel ridge model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.'
| def predict(self, X):
| check_is_fitted(self, ['X_fit_', 'dual_coef_'])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
'Perform DBSCAN clustering from features or distance matrix.
Parameters
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric=\'precomputed\'``.
sample_weight : array, shape (n_sample... | def fit(self, X, y=None, sample_weight=None):
| X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
(self.core_sample_indices_, self.labels_) = clust
if len(self.core_sample_indices_):
self.components_ = X[self.core_sample_indices_].copy()
else:
self.components_ = np.empty((0... |
'Performs clustering on X and returns cluster labels.
Parameters
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric=\'precomputed\'``.
sample_weight : array, shape (n_samples,), op... | def fit_predict(self, X, y=None, sample_weight=None):
| self.fit(X, sample_weight=sample_weight)
return self.labels_
|
'Perform clustering.
Parameters
X : array-like, shape=[n_samples, n_features]
Samples to cluster.'
| def fit(self, X, y=None):
| X = check_array(X)
(self.cluster_centers_, self.labels_) = mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds, min_bin_freq=self.min_bin_freq, bin_seeding=self.bin_seeding, cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
|
'Predict the closest cluster each sample in X belongs to.
Parameters
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.'
| def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
return pairwise_distances_argmin(X, self.cluster_centers_)
|
'Verify that the number of samples given is larger than k'
| def _check_fit_data(self, X):
| X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if (X.shape[0] < self.n_clusters):
raise ValueError(('n_samples=%d should be >= n_clusters=%d' % (X.shape[0], self.n_clusters)))
return X
|
'Compute k-means clustering.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_) = k_means(X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, precompute_distances=self.precompute_... |
'Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belo... | def fit_predict(self, X, y=None):
| return self.fit(X).labels_
|
'Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
X_new : array, shape [n_samples, k]
X transformed in the new space.'
| def fit_transform(self, X, y=None):
| X = self._check_fit_data(X)
return self.fit(X)._transform(X)
|
'Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
X_new :... | def transform(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
|
'guts of transform method; no input validation'
| def _transform(self, X):
| return euclidean_distances(X, self.cluster_centers_)
|
'Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to pred... | def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
|
'Opposite of the value of X on the K-means objective.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
score : float
Opposite of the value of X on the K-means objective.'
| def score(self, X, y=None):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return (- _labels_inertia(X, x_squared_norms, self.cluster_centers_)[1])
|
'Compute the centroids on X by chunking it into mini-batches.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse='csr', order='C', dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if (n_samples < self.n_clusters):
raise ValueError('Number of samples smaller than number of clusters.')
... |
'Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
X : array-like, shape (n_samples, n_features)
Input data.
Returns
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared dist... | def _labels_inertia_minibatch(self, X):
| if self.verbose:
print 'Computing label assignment and total inertia'
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices]
(labels, inertia) =... |
'Update k means estimate on a single mini-batch X.
Parameters
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.'
| def partial_fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr')
(n_samples, n_features) = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if (n_samples == 0):
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self... |
'Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to pred... | def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
'Fit the hierarchical clustering on the data
Parameters
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_samples=2, estimator=self)
memory = self.memory
if (memory is None):
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif (not isinstance(memory, Memory)):
raise Val... |
'Fit the hierarchical clustering on the data
Parameters
X : array-like, shape = [n_samples, n_features]
The data
Returns
self'
| def fit(self, X, y=None, **params):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params)
|
'Transform a new matrix using the built clustering
Parameters
X : array-like, shape = [n_samples, n_features] or [n_features]
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
Y : array, shape = [n_samples, n_clusters] or [n_clusters]
The pooled values for e... | def transform(self, X):
| check_is_fitted(self, 'labels_')
pooling_func = self.pooling_func
X = check_array(X)
nX = []
if (len(self.labels_) != X.shape[1]):
raise ValueError('X has a different number of features than during fitting.')
for l in np.unique(self.labels_):
nX.append(... |
'Inverse the transformation.
Return a vector of size nb_features with the values of Xred assigned
to each group of features
Parameters
Xred : array-like, shape=[n_samples, n_clusters] or [n_clusters,]
The values to be assigned to each cluster of samples
Returns
X : array, shape=[n_samples, n_features] or [n_features]
A... | def inverse_transform(self, Xred):
| check_is_fitted(self, 'labels_')
(unil, inverse) = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse]
|
'Remove a subcluster from a node and update it with the
split subclusters.'
| def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
| ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
|
'Insert a new subcluster into the node.'
| def insert_cf_subcluster(self, subcluster):
| if (not self.subclusters_):
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= (-2.0)
dist_matrix += self.squared_norm_
closest_index = ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.