desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradi...
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) if (Y is None): dists = squareform(pdist(X, metric='sqeuclidean')) tmp = (dists / ((2 * self.alpha) * (self.length_scale ** 2))) base = (1 + tmp) K = (base ** (- self.alpha)) np.fill_diagonal(K, 1) else: if eval_gradient: raise...
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradi...
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) if (Y is None): dists = squareform(pdist(X, metric='euclidean')) arg = ((np.pi * dists) / self.periodicity) sin_of_arg = np.sin(arg) K = np.exp(((-2) * ((sin_of_arg / self.length_scale) ** 2))) else: if eval_gradient: raise ValueError(...
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradi...
def __call__(self, X, Y=None, eval_gradient=False):
X = np.atleast_2d(X) if (Y is None): K = (np.inner(X, X) + (self.sigma_0 ** 2)) else: if eval_gradient: raise ValueError('Gradient can only be evaluated when Y is None.') K = (np.inner(X, Y) + (self.sigma_0 ** 2)) if eval_gradient: if (...
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n...
def diag(self, X):
return (np.einsum('ij,ij->i', X, X) + (self.sigma_0 ** 2))
'Returns whether the kernel is stationary.'
def is_stationary(self):
return False
'Return the kernel k(X, Y) and optionally its gradient. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradi...
def __call__(self, X, Y=None, eval_gradient=False):
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs if (self.pairwise_kernels_kwargs is None): pairwise_kernels_kwargs = {} X = np.atleast_2d(X) K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs) if eval_gradient: if sel...
'Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns K_diag : array, shape (n...
def diag(self, X):
return np.apply_along_axis(self, 1, X).ravel()
'Returns whether the kernel is stationary.'
def is_stationary(self):
return (self.metric in ['rbf'])
'Check values of the basic parameters. Parameters X : array-like, shape (n_samples, n_features)'
def _check_initial_parameters(self, X):
if (self.n_components < 1): raise ValueError(("Invalid value for 'n_components': %d Estimation requires at least one component" % self.n_components)) if (self.tol < 0.0): raise ValueError(("Invalid value for 'tol': %.5f Tolerance used by the ...
'Check initial parameters of the derived class. Parameters X : array-like, shape (n_samples, n_features)'
@abstractmethod def _check_parameters(self, X):
pass
'Initialize the model parameters. Parameters X : array-like, shape (n_samples, n_features) random_state : RandomState A random number generator instance.'
def _initialize_parameters(self, X, random_state):
(n_samples, _) = X.shape if (self.init_params == 'kmeans'): resp = np.zeros((n_samples, self.n_components)) label = cluster.KMeans(n_clusters=self.n_components, n_init=1, random_state=random_state).fit(X).labels_ resp[(np.arange(n_samples), label)] = 1 elif (self.init_params == 'rand...
'Initialize the model parameters of the derived class. Parameters X : array-like, shape (n_samples, n_features) resp : array-like, shape (n_samples, n_components)'
@abstractmethod def _initialize(self, X, resp):
pass
'Estimate model parameters with the EM algorithm. The method fit the model `n_init` times and set the parameters with which the model has the largest likelihood or lower bound. Within each trial, the method iterates between E-step and M-step for `max_iter` times until the change of likelihood or lower bound is less tha...
def fit(self, X, y=None):
X = _check_X(X, self.n_components) self._check_initial_parameters(X) do_init = (not (self.warm_start and hasattr(self, 'converged_'))) n_init = (self.n_init if do_init else 1) max_lower_bound = (- np.infty) self.converged_ = False random_state = check_random_state(self.random_state) (n_s...
'E step. Parameters X : array-like, shape (n_samples, n_features) Returns log_prob_norm : float Mean of the logarithms of the probabilities of each sample in X log_responsibility : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.'
def _e_step(self, X):
(log_prob_norm, log_resp) = self._estimate_log_prob_resp(X) return (np.mean(log_prob_norm), log_resp)
'M step. Parameters X : array-like, shape (n_samples, n_features) log_resp : array-like, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.'
@abstractmethod def _m_step(self, X, log_resp):
pass
'Compute the weighted log probabilities for each sample. Parameters X : array-like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns log_prob : array, shape (n_samples,) Log probabilities of each data point in X.'
def score_samples(self, X):
self._check_is_fitted() X = _check_X(X, None, self.means_.shape[1]) return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
'Compute the per-sample average log-likelihood of the given data X. Parameters X : array-like, shape (n_samples, n_dimensions) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns log_likelihood : float Log likelihood of the Gaussian mixture given X.'
def score(self, X, y=None):
return self.score_samples(X).mean()
'Predict the labels for the data samples in X using trained model. Parameters X : array-like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns labels : array, shape (n_samples,) Component labels.'
def predict(self, X):
self._check_is_fitted() X = _check_X(X, None, self.means_.shape[1]) return self._estimate_weighted_log_prob(X).argmax(axis=1)
'Predict posterior probability of each component given the data. Parameters X : array-like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns resp : array, shape (n_samples, n_components) Returns the probability each Gaussian (state) in the mo...
def predict_proba(self, X):
self._check_is_fitted() X = _check_X(X, None, self.means_.shape[1]) (_, log_resp) = self._estimate_log_prob_resp(X) return np.exp(log_resp)
'Generate random samples from the fitted Gaussian distribution. Parameters n_samples : int, optional Number of samples to generate. Defaults to 1. Returns X : array, shape (n_samples, n_features) Randomly generated sample y : array, shape (nsamples,) Component labels'
def sample(self, n_samples=1):
self._check_is_fitted() if (n_samples < 1): raise ValueError(("Invalid value for 'n_samples': %d . The sampling requires at least one sample." % self.n_components)) (_, n_features) = self.means_.shape rng = check_random_state(self.random_state) n_samples_c...
'Estimate the weighted log-probabilities, log P(X | Z) + log weights. Parameters X : array-like, shape (n_samples, n_features) Returns weighted_log_prob : array, shape (n_samples, n_component)'
def _estimate_weighted_log_prob(self, X):
return (self._estimate_log_prob(X) + self._estimate_log_weights())
'Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. Returns log_weight : array, shape (n_components, )'
@abstractmethod def _estimate_log_weights(self):
pass
'Estimate the log-probabilities log P(X | Z). Compute the log-probabilities per each component for each sample. Parameters X : array-like, shape (n_samples, n_features) Returns log_prob : array, shape (n_samples, n_component)'
@abstractmethod def _estimate_log_prob(self, X):
pass
'Estimate log probabilities and responsibilities for each sample. Compute the log probabilities, weighted log probabilities per component and responsibilities for each sample in X with respect to the current state of the model. Parameters X : array-like, shape (n_samples, n_features) Returns log_prob_norm : array, shap...
def _estimate_log_prob_resp(self, X):
weighted_log_prob = self._estimate_weighted_log_prob(X) log_prob_norm = logsumexp(weighted_log_prob, axis=1) with np.errstate(under='ignore'): log_resp = (weighted_log_prob - log_prob_norm[:, np.newaxis]) return (log_prob_norm, log_resp)
'Print verbose message on initialization.'
def _print_verbose_msg_init_beg(self, n_init):
if (self.verbose == 1): print(('Initialization %d' % n_init)) elif (self.verbose >= 2): print(('Initialization %d' % n_init)) self._init_prev_time = time() self._iter_prev_time = self._init_prev_time
'Print verbose message on initialization.'
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
if ((n_iter % self.verbose_interval) == 0): if (self.verbose == 1): print((' Iteration %d' % n_iter)) elif (self.verbose >= 2): cur_time = time() print((' Iteration %d DCTB time lapse %.5fs DCTB ll change %.5f' % (n_iter, (c...
'Print verbose message on the end of iteration.'
def _print_verbose_msg_init_end(self, ll):
if (self.verbose == 1): print(('Initialization converged: %s' % self.converged_)) elif (self.verbose >= 2): print(('Initialization converged: %s DCTB time lapse %.5fs DCTB ll %.5f' % (self.converged_, (time() - self._init_prev_time), ll)))
'Covariance parameters for each mixture component. The shape depends on ``cvtype``:: (n_states, n_features) if \'spherical\', (n_features, n_features) if \'tied\', (n_states, n_features) if \'diag\', (n_states, n_features, n_features) if \'full\''
def _get_covars(self):
if (self.covariance_type == 'full'): return self.covars_ elif (self.covariance_type == 'diag'): return [np.diag(cov) for cov in self.covars_] elif (self.covariance_type == 'tied'): return ([self.covars_] * self.n_components) elif (self.covariance_type == 'spherical'): ret...
'Provide values for covariance.'
def _set_covars(self, covars):
covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars
'Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. E...
def score_samples(self, X):
check_is_fitted(self, 'means_') X = check_array(X) if (X.ndim == 1): X = X[:, np.newaxis] if (X.size == 0): return (np.array([]), np.empty((0, self.n_components))) if (X.shape[1] != self.means_.shape[1]): raise ValueError('The shape of X is not compatibl...
'Compute the log probability under the model. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns logprob : array_like, shape (n_samples,) Log probabilities of each data point in X'
def score(self, X, y=None):
(logprob, _) = self.score_samples(X) return logprob
'Predict label for data. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array, shape = (n_samples,) component memberships'
def predict(self, X):
(logprob, responsibilities) = self.score_samples(X) return responsibilities.argmax(axis=1)
'Predict posterior probability of data under each Gaussian in the model. Parameters X : array-like, shape = [n_samples, n_features] Returns responsibilities : array-like, shape = (n_samples, n_components) Returns the probability of the sample for each Gaussian (state) in the model.'
def predict_proba(self, X):
(logprob, responsibilities) = self.score_samples(X) return responsibilities
'Generate random samples from the model. Parameters n_samples : int, optional Number of samples to generate. Defaults to 1. Returns X : array_like, shape (n_samples, n_features) List of samples'
def sample(self, n_samples=1, random_state=None):
check_is_fitted(self, 'means_') if (random_state is None): random_state = self.random_state random_state = check_random_state(random_state) weight_cdf = np.cumsum(self.weights_) X = np.empty((n_samples, self.means_.shape[1])) rand = random_state.rand(n_samples) comps = weight_cdf.sea...
'Fit and then predict labels for data. Warning: Due to the final maximization step in the EM algorithm, with low iterations the prediction may not be 100% accurate. .. versionadded:: 0.17 *fit_predict* method in Gaussian Mixture Model. Parameters X : array-like, shape = [n_samples, n_features] Returns C : array, shape...
def fit_predict(self, X, y=None):
return self._fit(X, y).argmax(axis=1)
'Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string \'\' when creating the GMM object. Likewise, if you would like just to do an initiali...
def _fit(self, X, y=None, do_prediction=False):
X = check_array(X, dtype=np.float64, ensure_min_samples=2, estimator=self) if (X.shape[0] < self.n_components): raise ValueError(('GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0]))) max_log_prob = (- np.infty) if (self.verbo...
'Estimate model parameters with the EM algorithm. A initialization step is performed before entering the expectation-maximization (EM) algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string \'\' when creating the GMM object. Likewise, if you would like just to do an initiali...
def fit(self, X, y=None):
self._fit(X, y) return self
'Perform the Mstep of the EM algorithm and return the cluster weights.'
def _do_mstep(self, X, responsibilities, params, min_covar=0):
weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = (1.0 / (weights[:, np.newaxis] + (10 * EPS))) if ('w' in params): self.weights_ = ((weights / (weights.sum() + (10 * EPS))) + EPS) if ('m' in params): self.means_ = (weighted_X_sum...
'Return the number of free parameters in the model.'
def _n_parameters(self):
ndim = self.means_.shape[1] if (self.covariance_type == 'full'): cov_params = (((self.n_components * ndim) * (ndim + 1)) / 2.0) elif (self.covariance_type == 'diag'): cov_params = (self.n_components * ndim) elif (self.covariance_type == 'tied'): cov_params = ((ndim * (ndim + 1)) ...
'Bayesian information criterion for the current model fit and the proposed data. Parameters X : array of shape(n_samples, n_dimensions) Returns bic : float (the lower the better)'
def bic(self, X):
return (((-2) * self.score(X).sum()) + (self._n_parameters() * np.log(X.shape[0])))
'Akaike information criterion for the current model fit and the proposed data. Parameters X : array of shape(n_samples, n_dimensions) Returns aic : float (the lower the better)'
def aic(self, X):
return (((-2) * self.score(X).sum()) + (2 * self._n_parameters()))
'Check that the parameters are well defined. Parameters X : array-like, shape (n_samples, n_features)'
def _check_parameters(self, X):
if (self.covariance_type not in ['spherical', 'tied', 'diag', 'full']): raise ValueError(("Invalid value for 'covariance_type': %s 'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']" % self.covariance_type)) if (self.weight_concentration_prior_type n...
'Check the parameter of the Dirichlet distribution.'
def _check_weights_parameters(self):
if (self.weight_concentration_prior is None): self.weight_concentration_prior_ = (1.0 / self.n_components) elif (self.weight_concentration_prior > 0.0): self.weight_concentration_prior_ = self.weight_concentration_prior else: raise ValueError(("The parameter 'weight_concentrati...
'Check the parameters of the Gaussian distribution. Parameters X : array-like, shape (n_samples, n_features)'
def _check_means_parameters(self, X):
(_, n_features) = X.shape if (self.mean_precision_prior is None): self.mean_precision_prior_ = 1.0 elif (self.mean_precision_prior > 0.0): self.mean_precision_prior_ = self.mean_precision_prior else: raise ValueError(("The parameter 'mean_precision_prior' should be ...
'Check the prior parameters of the precision distribution. Parameters X : array-like, shape (n_samples, n_features)'
def _check_precision_parameters(self, X):
(_, n_features) = X.shape if (self.degrees_of_freedom_prior is None): self.degrees_of_freedom_prior_ = n_features elif (self.degrees_of_freedom_prior > (n_features - 1.0)): self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior else: raise ValueError(("The parameter ...
'Check the `covariance_prior_`. Parameters X : array-like, shape (n_samples, n_features)'
def _checkcovariance_prior_parameter(self, X):
(_, n_features) = X.shape if (self.covariance_prior is None): self.covariance_prior_ = {'full': np.atleast_2d(np.cov(X.T)), 'tied': np.atleast_2d(np.cov(X.T)), 'diag': np.var(X, axis=0, ddof=1), 'spherical': np.var(X, axis=0, ddof=1).mean()}[self.covariance_type] elif (self.covariance_type in ['full...
'Initialization of the mixture parameters. Parameters X : array-like, shape (n_samples, n_features) resp : array-like, shape (n_samples, n_components)'
def _initialize(self, X, resp):
(nk, xk, sk) = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type) self._estimate_weights(nk) self._estimate_means(nk, xk) self._estimate_precisions(nk, xk, sk)
'Estimate the parameters of the Dirichlet distribution. Parameters nk : array-like, shape (n_components,)'
def _estimate_weights(self, nk):
if (self.weight_concentration_prior_type == 'dirichlet_process'): self.weight_concentration_ = ((1.0 + nk), (self.weight_concentration_prior_ + np.hstack((np.cumsum(nk[::(-1)])[(-2)::(-1)], 0)))) else: self.weight_concentration_ = (self.weight_concentration_prior_ + nk)
'Estimate the parameters of the Gaussian distribution. Parameters nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features)'
def _estimate_means(self, nk, xk):
self.mean_precision_ = (self.mean_precision_prior_ + nk) self.means_ = (((self.mean_precision_prior_ * self.mean_prior_) + (nk[:, np.newaxis] * xk)) / self.mean_precision_[:, np.newaxis])
'Estimate the precisions parameters of the precision distribution. Parameters nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features) sk : array-like The shape depends of `covariance_type`: \'full\' : (n_components, n_features, n_features) \'tied\' : (n_features, n_features) \'diag\' : ...
def _estimate_precisions(self, nk, xk, sk):
{'full': self._estimate_wishart_full, 'tied': self._estimate_wishart_tied, 'diag': self._estimate_wishart_diag, 'spherical': self._estimate_wishart_spherical}[self.covariance_type](nk, xk, sk) self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)
'Estimate the full Wishart distribution parameters. Parameters X : array-like, shape (n_samples, n_features) nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features) sk : array-like, shape (n_components, n_features, n_features)'
def _estimate_wishart_full(self, nk, xk, sk):
(_, n_features) = xk.shape self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk) self.covariances_ = np.empty((self.n_components, n_features, n_features)) for k in range(self.n_components): diff = (xk[k] - self.mean_prior_) self.covariances_[k] = ((self.covariance_prior_ + (nk...
'Estimate the tied Wishart distribution parameters. Parameters X : array-like, shape (n_samples, n_features) nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features) sk : array-like, shape (n_features, n_features)'
def _estimate_wishart_tied(self, nk, xk, sk):
(_, n_features) = xk.shape self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + (nk.sum() / self.n_components)) diff = (xk - self.mean_prior_) self.covariances_ = ((self.covariance_prior_ + ((sk * nk.sum()) / self.n_components)) + ((self.mean_precision_prior_ / self.n_components) * np.dot(((nk /...
'Estimate the diag Wishart distribution parameters. Parameters X : array-like, shape (n_samples, n_features) nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features) sk : array-like, shape (n_components, n_features)'
def _estimate_wishart_diag(self, nk, xk, sk):
(_, n_features) = xk.shape self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk) diff = (xk - self.mean_prior_) self.covariances_ = (self.covariance_prior_ + (nk[:, np.newaxis] * (sk + ((self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] * np.square(diff))))) self.covarianc...
'Estimate the spherical Wishart distribution parameters. Parameters X : array-like, shape (n_samples, n_features) nk : array-like, shape (n_components,) xk : array-like, shape (n_components, n_features) sk : array-like, shape (n_components,)'
def _estimate_wishart_spherical(self, nk, xk, sk):
(_, n_features) = xk.shape self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk) diff = (xk - self.mean_prior_) self.covariances_ = (self.covariance_prior_ + (nk * (sk + ((self.mean_precision_prior_ / self.mean_precision_) * np.mean(np.square(diff), 1))))) self.covariances_ /= self.degree...
'M step. Parameters X : array-like, shape (n_samples, n_features) log_resp : array-like, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.'
def _m_step(self, X, log_resp):
(n_samples, _) = X.shape (nk, xk, sk) = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type) self._estimate_weights(nk) self._estimate_means(nk, xk) self._estimate_precisions(nk, xk, sk)
'Estimate the lower bound of the model. The lower bound on the likelihood (of the training data with respect to the model) is used to detect the convergence and has to decrease at each iteration. Parameters X : array-like, shape (n_samples, n_features) log_resp : array, shape (n_samples, n_components) Logarithm of the ...
def _compute_lower_bound(self, log_resp, log_prob_norm):
(n_features,) = self.mean_prior_.shape log_det_precisions_chol = (_compute_log_det_cholesky(self.precisions_cholesky_, self.covariance_type, n_features) - ((0.5 * n_features) * np.log(self.degrees_of_freedom_))) if (self.covariance_type == 'tied'): log_wishart = (self.n_components * np.float64(_log_...
'Check the Gaussian mixture parameters are well defined.'
def _check_parameters(self, X):
(_, n_features) = X.shape if (self.covariance_type not in ['spherical', 'tied', 'diag', 'full']): raise ValueError(("Invalid value for 'covariance_type': %s 'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']" % self.covariance_type)) if (self.wei...
'Initialization of the Gaussian mixture parameters. Parameters X : array-like, shape (n_samples, n_features) resp : array-like, shape (n_samples, n_components)'
def _initialize(self, X, resp):
(n_samples, _) = X.shape (weights, means, covariances) = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type) weights /= n_samples self.weights_ = (weights if (self.weights_init is None) else self.weights_init) self.means_ = (means if (self.means_init is None) else self.means...
'M step. Parameters X : array-like, shape (n_samples, n_features) log_resp : array-like, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.'
def _m_step(self, X, log_resp):
(n_samples, _) = X.shape (self.weights_, self.means_, self.covariances_) = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type) self.weights_ /= n_samples self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)
'Return the number of free parameters in the model.'
def _n_parameters(self):
(_, n_features) = self.means_.shape if (self.covariance_type == 'full'): cov_params = (((self.n_components * n_features) * (n_features + 1)) / 2.0) elif (self.covariance_type == 'diag'): cov_params = (self.n_components * n_features) elif (self.covariance_type == 'tied'): cov_para...
'Bayesian information criterion for the current model on the input X. Parameters X : array of shape (n_samples, n_dimensions) Returns bic : float The lower the better.'
def bic(self, X):
return ((((-2) * self.score(X)) * X.shape[0]) + (self._n_parameters() * np.log(X.shape[0])))
'Akaike information criterion for the current model on the input X. Parameters X : array of shape (n_samples, n_dimensions) Returns aic : float The lower the better.'
def aic(self, X):
return ((((-2) * self.score(X)) * X.shape[0]) + (2 * self._n_parameters()))
'Return precisions as a full matrix.'
def _get_precisions(self):
if (self.covariance_type == 'full'): return self.precs_ elif (self.covariance_type in ['diag', 'spherical']): return [np.diag(cov) for cov in self.precs_] elif (self.covariance_type == 'tied'): return ([self.precs_] * self.n_components)
'Return the likelihood of the data under the model. Compute the bound on log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. This is done by computing the parameters for the mean-field of z for each observation. Parameters X : ar...
def score_samples(self, X):
check_is_fitted(self, 'gamma_') X = check_array(X) if (X.ndim == 1): X = X[:, np.newaxis] z = np.zeros((X.shape[0], self.n_components)) sd = digamma((self.gamma_.T[1] + self.gamma_.T[2])) dgamma1 = (digamma(self.gamma_.T[1]) - sd) dgamma2 = np.zeros(self.n_components) dgamma2[0] ...
'Update the concentration parameters for each cluster'
def _update_concentration(self, z):
sz = np.sum(z, axis=0) self.gamma_.T[1] = (1.0 + sz) self.gamma_.T[2].fill(0) for i in range((self.n_components - 2), (-1), (-1)): self.gamma_[(i, 2)] = (self.gamma_[((i + 1), 2)] + sz[i]) self.gamma_.T[2] += self.alpha
'Update the variational distributions for the means'
def _update_means(self, X, z):
n_features = X.shape[1] for k in range(self.n_components): if (self.covariance_type in ['spherical', 'diag']): num = np.sum((z.T[k].reshape(((-1), 1)) * X), axis=0) num *= self.precs_[k] den = (1.0 + (self.precs_[k] * np.sum(z.T[k]))) self.means_[k] = (num...
'Update the variational distributions for the precisions'
def _update_precisions(self, X, z):
n_features = X.shape[1] if (self.covariance_type == 'spherical'): self.dof_ = ((0.5 * n_features) * np.sum(z, axis=0)) for k in range(self.n_components): sq_diff = np.sum(((X - self.means_[k]) ** 2), axis=1) self.scale_[k] = 1.0 self.scale_[k] += (0.5 * np.sum...
'Monitor the lower bound during iteration Debug method to help see exactly when it is failing to converge as expected. Note: this is very expensive and should not be used by default.'
def _monitor(self, X, z, n, end=False):
if (self.verbose > 0): print(('Bound after updating %8s: %f' % (n, self.lower_bound(X, z)))) if end: print('Cluster proportions:', self.gamma_.T[1]) print('covariance_type:', self.covariance_type)
'Maximize the variational lower bound Update each of the parameters to maximize the lower bound.'
def _do_mstep(self, X, z, params):
self._monitor(X, z, 'z') self._update_concentration(z) self._monitor(X, z, 'gamma') if ('m' in params): self._update_means(X, z) self._monitor(X, z, 'mu') if ('c' in params): self._update_precisions(X, z) self._monitor(X, z, 'a and b', end=True)
'Initializes the concentration parameters'
def _initialize_gamma(self):
self.gamma_ = (self.alpha * np.ones((self.n_components, 3)))
'The variational lower bound for the concentration parameter.'
def _bound_concentration(self):
logprior = (gammaln(self.alpha) * self.n_components) logprior += np.sum(((self.alpha - 1) * (digamma(self.gamma_.T[2]) - digamma((self.gamma_.T[1] + self.gamma_.T[2]))))) logprior += np.sum((- gammaln((self.gamma_.T[1] + self.gamma_.T[2])))) logprior += np.sum((gammaln(self.gamma_.T[1]) + gammaln(self.g...
'The variational lower bound for the mean parameters'
def _bound_means(self):
logprior = 0.0 logprior -= (0.5 * squared_norm(self.means_)) logprior -= ((0.5 * self.means_.shape[1]) * self.n_components) return logprior
'Returns the bound term related to precisions'
def _bound_precisions(self):
logprior = 0.0 if (self.covariance_type == 'spherical'): logprior += np.sum(gammaln(self.dof_)) logprior -= np.sum(((self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))) logprior += np.sum((((- np.log(self.scale_)) + self.dof_) - self.precs_[:, 0])) elif (self.covariance_type == 'd...
'Returns the bound term related to proportions'
def _bound_proportions(self, z):
dg12 = digamma((self.gamma_.T[1] + self.gamma_.T[2])) dg1 = (digamma(self.gamma_.T[1]) - dg12) dg2 = (digamma(self.gamma_.T[2]) - dg12) cz = stable_cumsum(z[:, ::(-1)], axis=(-1))[:, (-2)::(-1)] logprior = (np.sum((cz * dg2[:(-1)])) + np.sum((z * dg1))) del cz z_non_zeros = z[(z > np.finfo(n...
'returns a lower bound on model evidence based on X and membership'
def lower_bound(self, X, z):
check_is_fitted(self, 'means_') if (self.covariance_type not in ['full', 'tied', 'diag', 'spherical']): raise NotImplementedError(('This ctype is not implemented: %s' % self.covariance_type)) X = np.asarray(X) if (X.ndim == 1): X = X[:, np.newaxis] c = np.sum((z * _bou...
'Estimate model parameters with the variational algorithm. For a full derivation and description of the algorithm see doc/modules/dp-derivation.rst or http://scikit-learn.org/stable/modules/dp-derivation.html A initialization step is performed before entering the em algorithm. If you want to avoid this step, set the ke...
def _fit(self, X, y=None):
self.random_state_ = check_random_state(self.random_state) X = check_array(X) if (X.ndim == 1): X = X[:, np.newaxis] (n_samples, n_features) = X.shape z = np.ones((n_samples, self.n_components)) z /= self.n_components self._initial_bound = (((-0.5) * n_features) * np.log((2 * np.pi))...
'Estimate model parameters with the variational algorithm. For a full derivation and description of the algorithm see doc/modules/dp-derivation.rst or http://scikit-learn.org/stable/modules/dp-derivation.html A initialization step is performed before entering the EM algorithm. If you want to avoid this step, set the ke...
def _fit(self, X, y=None):
self.alpha_ = (float(self.alpha) / self.n_components) return super(VBGMM, self)._fit(X, y)
'Return the likelihood of the data under the model. Compute the bound on log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. This is done by computing the parameters for the mean-field of z for each observation. Parameters X : ar...
def score_samples(self, X):
check_is_fitted(self, 'gamma_') X = check_array(X) if (X.ndim == 1): X = X[:, np.newaxis] dg = (digamma(self.gamma_) - digamma(np.sum(self.gamma_))) if (self.covariance_type not in ['full', 'tied', 'diag', 'spherical']): raise NotImplementedError(('This ctype is not imple...
'Monitor the lower bound during iteration Debug method to help see exactly when it is failing to converge as expected. Note: this is very expensive and should not be used by default.'
def _monitor(self, X, z, n, end=False):
if (self.verbose > 0): print(('Bound after updating %8s: %f' % (n, self.lower_bound(X, z)))) if end: print('Cluster proportions:', self.gamma_) print('covariance_type:', self.covariance_type)
'Build the f_ interp1d function.'
def _build_f(self, X, y):
if (self.out_of_bounds not in ['raise', 'nan', 'clip']): raise ValueError("The argument ``out_of_bounds`` must be in 'nan', 'clip', 'raise'; got {0}".format(self.out_of_bounds)) bounds_error = (self.out_of_bounds == 'raise') if (len(y) == 1): self.f_ = (lambda x...
'Build the y_ IsotonicRegression.'
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
check_consistent_length(X, y, sample_weight) (X, y) = [check_array(x, ensure_2d=False) for x in [X, y]] y = as_float_array(y) self._check_fit_data(X, y, sample_weight) if (self.increasing == 'auto'): self.increasing_ = check_increasing(X, y) else: self.increasing_ = self.increasi...
'Fit the model using X, y as training data. Parameters X : array-like, shape=(n_samples,) Training data. y : array-like, shape=(n_samples,) Training target. sample_weight : array-like, shape=(n_samples,), optional, default: None Weights. If set to None, all weights will be set to 1 (equal weights). Returns self : objec...
def fit(self, X, y, sample_weight=None):
(X, y) = self._build_y(X, y, sample_weight) (self._necessary_X_, self._necessary_y_) = (X, y) self._build_f(X, y) return self
'Transform new data by linear interpolation Parameters T : array-like, shape=(n_samples,) Data to transform. Returns T_ : array, shape=(n_samples,) The transformed data'
def transform(self, T):
T = as_float_array(T) if (len(T.shape) != 1): raise ValueError('Isotonic regression input should be a 1d array') if (self.out_of_bounds not in ['raise', 'nan', 'clip']): raise ValueError("The argument ``out_of_bounds`` must be in 'nan', 'clip', 'r...
'Predict new data by linear interpolation. Parameters T : array-like, shape=(n_samples,) Data to transform. Returns T_ : array, shape=(n_samples,) Transformed data.'
def predict(self, T):
return self.transform(T)
'Pickle-protocol - return state of the estimator.'
def __getstate__(self):
state = super(IsotonicRegression, self).__getstate__() state.pop('f_', None) return state
'Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function.'
def __setstate__(self, state):
super(IsotonicRegression, self).__setstate__(state) if (hasattr(self, '_necessary_X_') and hasattr(self, '_necessary_y_')): self._build_f(self._necessary_X_, self._necessary_y_)
'Fit Ridge regression model Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample Returns self : returns an instanc...
def fit(self, X, y, sample_weight=None):
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
'Fit Ridge regression model. Parameters X : {array-like, sparse matrix}, shape = [n_samples,n_features] Training data y : array-like, shape = [n_samples] Target values sample_weight : float or numpy array of shape (n_samples,) Sample weight. .. versionadded:: 0.17 *sample_weight* support to Classifier. Returns self : r...
def fit(self, X, y, sample_weight=None):
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=(-1)) Y = self._label_binarizer.fit_transform(y) if (not self._label_binarizer.y_type_.startswith('multilabel')): y = column_or_1d(y, warn=True) else: raise ValueError(("%s doesn't support multi-label classificatio...
'Helper function to avoid code duplication between self._errors and self._values. Notes We don\'t construct matrix G, instead compute action on y & diagonal.'
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
w = (1.0 / (v + alpha)) constant_column = (np.var(Q, 0) < 1e-12) w[constant_column] = 0 c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) if (len(y.shape) != 1): G_diag = G_diag[:, np.newaxis] return (G_diag, c)
'Helper function to avoid code duplication between self._errors_svd and self._values_svd.'
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
constant_column = (np.var(U, 0) < 1e-12) w = (((v + alpha) ** (-1)) - (alpha ** (-1))) w[constant_column] = (- (alpha ** (-1))) c = (np.dot(U, self._diag_dot(w, UT_y)) + ((alpha ** (-1)) * y)) G_diag = (self._decomp_diag(w, U) + (alpha ** (-1))) if (len(y.shape) != 1): G_diag = G_diag[:,...
'Fit Ridge regression model Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. Will be cast to X\'s dtype if necessary sample_weight : float or array-like of shape [n_samples] Sample weight Returns self :...
def fit(self, X, y, sample_weight=None):
(X, y) = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True) if ((sample_weight is not None) and (not isinstance(sample_weight, float))): sample_weight = check_array(sample_weight, ensure_2d=False) (n_samples, n_features) = X.shape (X, y, X_offset, y_offse...
'Fit Ridge regression model Parameters X : array-like, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. Will be cast to X\'s dtype if necessary sample_weight : float or array-like of shape [n_samples] Sample weight Returns self : Returns self.'
def fit(self, X, y, sample_weight=None):
if (self.cv is None): estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values) estimator.fit(X, y, sample_weight=sample_weight) self.alpha_ = estimator.alpha_ if ...
'Fit the ridge classifier. Parameters X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. Will be cast to X\'s dtype if necessary sample_weight : float or numpy array of shape...
def fit(self, X, y, sample_weight=None):
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=(-1)) Y = self._label_binarizer.fit_transform(y) if (not self._label_binarizer.y_type_.startswith('multilabel')): y = column_or_1d(y, warn=True) if self.class_weight: if (sample_weight is None): sample_weight = 1.0...
'Fit model with coordinate descent. Parameters X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape (n_samples,) or (n_samples, n_targets) Target. Will be cast to X\'s dtype if necessary check_input : boolean, (default=True) Allow to bypass several input checking. Don\'t use this paramete...
def fit(self, X, y, check_input=True):
if (self.alpha == 0): warnings.warn('With alpha=0, this algorithm does not converge well. You are advised to use the LinearRegression estimator', stacklevel=2) if isinstance(self.precompute, six.string_types): raise ValueError(('precompute should ...
'sparse representation of the fitted ``coef_``'
@property def sparse_coef_(self):
return sparse.csr_matrix(self.coef_)
'Decision function of the linear model Parameters X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns T : array, shape (n_samples,) The predicted decision function'
def _decision_function(self, X):
check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return (safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_) else: return super(ElasticNet, self)._decision_function(X)
'Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-lik...
def fit(self, X, y):
y = check_array(y, copy=False, dtype=[np.float64, np.float32], ensure_2d=False) if (y.shape[0] == 0): raise ValueError(('y has 0 samples: %r' % y)) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if (isinstance(self, ElasticNetCV) ...
'Fit MultiTaskElasticNet model with coordinate descent Parameters X : ndarray, shape (n_samples, n_features) Data y : ndarray, shape (n_samples, n_tasks) Target. Will be cast to X\'s dtype if necessary Notes Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically conv...
def fit(self, X, y):
X = check_array(X, dtype=[np.float64, np.float32], order='F', copy=(self.copy_X and self.fit_intercept)) y = check_array(y, dtype=X.dtype.type, ensure_2d=False) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if (y.ndim == 1): raise ValueError...
'Predict using the linear model Parameters X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns C : array, shape = (n_samples,) Returns predicted values.'
def predict(self, X):
return self._decision_function(X)
'Set the intercept_'
def _set_intercept(self, X_offset, y_offset, X_scale):
if self.fit_intercept: self.coef_ = (self.coef_ / X_scale) self.intercept_ = (y_offset - np.dot(X_offset, self.coef_.T)) else: self.intercept_ = 0.0