desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns the number of splitting iterations in the cross-validator Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for co...
def get_n_splits(self, X, y=None, groups=None):
if (X is None): raise ValueError("The 'X' parameter should not be None.") return int(comb(_num_samples(X), self.p, exact=True))
'Generate indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. groups : array-l...
def split(self, X, y=None, groups=None):
(X, y, groups) = indexable(X, y, groups) n_samples = _num_samples(X) if (self.n_splits > n_samples): raise ValueError('Cannot have number of splits n_splits={0} greater than the number of samples: {1}.'.format(self.n_splits, n_samples)) for (train, test) in su...
'Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : int Returns the number of splitting iterations in the cross...
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
'Generate indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be u...
def split(self, X, y, groups=None):
y = check_array(y, ensure_2d=False, dtype=None) return super(StratifiedKFold, self).split(X, y, groups)
'Generate indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Always ignored, exists for compatibility. groups : array-like, with sh...
def split(self, X, y=None, groups=None):
(X, y, groups) = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits n_folds = (n_splits + 1) if (n_folds > n_samples): raise ValueError('Cannot have number of folds ={0} greater than the number of samples: {1}.'.format(n_folds, n_...
'Returns the number of splitting iterations in the cross-validator Parameters X : object, optional Always ignored, exists for compatibility. y : object, optional Always ignored, exists for compatibility. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset...
def get_n_splits(self, X=None, y=None, groups=None):
if (groups is None): raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) return len(np.unique(groups))
'Returns the number of splitting iterations in the cross-validator Parameters X : object, optional Always ignored, exists for compatibility. y : object, optional Always ignored, exists for compatibility. groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset...
def get_n_splits(self, X=None, y=None, groups=None):
if (groups is None): raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
'Generates indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, of length n_samples The target variable for supervised learning problems. groups : array...
def split(self, X, y=None, groups=None):
n_repeats = self.n_repeats rng = check_random_state(self.random_state) for idx in range(n_repeats): cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) for (train_index, test_index) in cv.split(X, y, groups): (yield (train_index, test_index))
'Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. y : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. groups : array-like, with sh...
def get_n_splits(self, X=None, y=None, groups=None):
rng = check_random_state(self.random_state) cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) return (cv.get_n_splits(X, y, groups) * self.n_repeats)
'Generate indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target variable for supervised learning problems. groups : array-l...
def split(self, X, y=None, groups=None):
(X, y, groups) = indexable(X, y, groups) for (train, test) in self._iter_indices(X, y, groups): (yield (train, test))
'Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : int Returns the number of splitting iterations in the cross...
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
'Generate indices to split data into training and test set. Parameters X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be u...
def split(self, X, y, groups=None):
y = check_array(y, ensure_2d=False, dtype=None) return super(StratifiedShuffleSplit, self).split(X, y, groups)
'Generate indices to split data into training and test set. Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns train : ndarray The training set indices for that split. test : ndarray The t...
def split(self, X=None, y=None, groups=None):
ind = np.arange(len(self.test_fold)) for test_index in self._iter_test_masks(): train_index = ind[np.logical_not(test_index)] test_index = ind[test_index] (yield (train_index, test_index))
'Generates boolean masks corresponding to test sets.'
def _iter_test_masks(self):
for f in self.unique_folds: test_index = np.where((self.test_fold == f))[0] test_mask = np.zeros(len(self.test_fold), dtype=np.bool) test_mask[test_index] = True (yield test_mask)
'Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : int Returns the number of splitting iterations in the cross...
def get_n_splits(self, X=None, y=None, groups=None):
return len(self.unique_folds)
'Returns the number of splitting iterations in the cross-validator Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns n_splits : int Returns the number of splitting iterations in the cross...
def get_n_splits(self, X=None, y=None, groups=None):
return len(self.cv)
'Generate indices to split data into training and test set. Parameters X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns train : ndarray The training set indices for that split. test : ndarray The t...
def split(self, X=None, y=None, groups=None):
for (train, test) in self.cv: (yield (train, test))
'Iterate over the points in the grid. Returns params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values.'
def __iter__(self):
for p in self.param_grid: items = sorted(p.items()) if (not items): (yield {}) else: (keys, values) = zip(*items) for v in product(*values): params = dict(zip(keys, v)) (yield params)
'Number of points on the grid.'
def __len__(self):
product = partial(reduce, operator.mul) return sum(((product((len(v) for v in p.values())) if p else 1) for p in self.param_grid))
'Get the parameters that would be ``ind``th in iteration Parameters ind : int The iteration index Returns params : dict of string to any Equal to list(self)[ind]'
def __getitem__(self, ind):
for sub_grid in self.param_grid: if (not sub_grid): if (ind == 0): return {} else: ind -= 1 continue (keys, values_lists) = zip(*sorted(sub_grid.items())[::(-1)]) sizes = [len(v_list) for v_list in values_lists] ...
'Number of points that will be sampled.'
def __len__(self):
return self.n_iter
'Simple custom repr to summarize the main info'
def __repr__(self):
return 'mean: {0:.5f}, std: {1:.5f}, params: {2}'.format(self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters)
'Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the numb...
def score(self, X, y=None):
self._check_is_fitted('score') if (self.scorer_ is None): raise ValueError(("No score function explicitly defined, and the estimator doesn't provide one %s" % self.best_estimator_)) score = (self.scorer_[self.refit] if self.multimetric_ else self.scorer_) return ...
'Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X):
self._check_is_fitted('predict') return self.best_estimator_.predict(X)
'Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X):
self._check_is_fitted('predict_proba') return self.best_estimator_.predict_proba(X)
'Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_log_proba(self, X):
self._check_is_fitted('predict_log_proba') return self.best_estimator_.predict_log_proba(X)
'Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def decision_function(self, X):
self._check_is_fitted('decision_function') return self.best_estimator_.decision_function(X)
'Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def transform(self, X):
self._check_is_fitted('transform') return self.best_estimator_.transform(X)
'Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def inverse_transform(self, Xt):
self._check_is_fitted('inverse_transform') return self.best_estimator_.inverse_transform(Xt)
'Run fit with all sets of parameters. Parameters X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regressio...
def fit(self, X, y=None, groups=None, **fit_params):
if (self.fit_params is not None): warnings.warn('"fit_params" as a constructor argument was deprecated in version 0.19 and will be removed in version 0.21. Pass fit parameters to the "fit" method instead.', DeprecationWarning) i...
'Return ParameterGrid instance for the given param_grid'
def _get_param_iterator(self):
return ParameterGrid(self.param_grid)
'Return ParameterSampler instance for the given distributions'
def _get_param_iterator(self):
return ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state)
'Iterate over the points in the grid. Returns params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values.'
def __iter__(self):
for p in self.param_grid: items = sorted(p.items()) if (not items): (yield {}) else: (keys, values) = zip(*items) for v in product(*values): params = dict(zip(keys, v)) (yield params)
'Number of points on the grid.'
def __len__(self):
product = partial(reduce, operator.mul) return sum(((product((len(v) for v in p.values())) if p else 1) for p in self.param_grid))
'Get the parameters that would be ``ind``th in iteration Parameters ind : int The iteration index Returns params : dict of string to any Equal to list(self)[ind]'
def __getitem__(self, ind):
for sub_grid in self.param_grid: if (not sub_grid): if (ind == 0): return {} else: ind -= 1 continue (keys, values_lists) = zip(*sorted(sub_grid.items())[::(-1)]) sizes = [len(v_list) for v_list in values_lists] ...
'Number of points that will be sampled.'
def __len__(self):
return self.n_iter
'Simple custom repr to summarize the main info'
def __repr__(self):
return 'mean: {0:.5f}, std: {1:.5f}, params: {2}'.format(self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters)
'Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the numb...
def score(self, X, y=None):
if (self.scorer_ is None): raise ValueError(("No score function explicitly defined, and the estimator doesn't provide one %s" % self.best_estimator_)) return self.scorer_(self.best_estimator_, X, y)
'Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict(self, X):
return self.best_estimator_.predict(X)
'Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_proba(self, X):
return self.best_estimator_.predict_proba(X)
'Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def predict_log_proba(self, X):
return self.best_estimator_.predict_log_proba(X)
'Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def decision_function(self, X):
return self.best_estimator_.decision_function(X)
'Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def transform(self, X):
return self.best_estimator_.transform(X)
'Call inverse_transform on the estimator with the best found parameters. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator.'
@if_delegate_has_method(delegate=('best_estimator_', 'estimator')) def inverse_transform(self, Xt):
return self.best_estimator_.inverse_transform(Xt)
'Actual fitting, performing the search over parameters.'
def _fit(self, X, y, parameter_iterable):
estimator = self.estimator cv = self.cv self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) n_samples = _num_samples(X) (X, y) = indexable(X, y) if (y is not None): if (len(y) != n_samples): raise ValueError(('Target variable (y) has a different ...
'Run fit with all sets of parameters. Parameters X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regressio...
def fit(self, X, y=None):
return self._fit(X, y, ParameterGrid(self.param_grid))
'Run fit on the estimator with randomly drawn parameters. Parameters X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classif...
def fit(self, X, y=None):
sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, sampled_params)
'This is a mock delegated function'
@if_delegate_has_method(delegate='a_prefix') def func(self):
pass
'Function f Parameter a : int Parameter a b : float Parameter b Results c : list Parameter c'
def f_bad_sections(self, X, y):
pass
'MetaEstimator to check if doctest on delegated methods work. Parameters delegate : estimator Delegated estimator.'
def __init__(self, delegate):
self.delegate = delegate
'This is available only if delegate has predict. Parameters y : ndarray Parameter y'
@if_delegate_has_method(delegate='delegate') def predict(self, X):
return self.delegate.predict(X)
'This is available only if delegate has predict_proba. Parameters X : ndarray Parameter X'
@if_delegate_has_method(delegate='delegate') def predict_proba(self, X):
return X
'This is available only if delegate has predict_proba. Parameters y : ndarray Parameter X'
@deprecated('Testing deprecated function with incorrect params') @if_delegate_has_method(delegate='delegate') def predict_log_proba(self, X):
return X
'Find the first prime element in the specified row. Returns the column index, or -1 if no starred element was found.'
def _find_prime_in_row(self, row):
col = np.argmax((self.marked[row] == 2)) if (self.marked[(row, col)] != 2): col = (-1) return col
'Clear all covered matrix cells'
def _clear_covers(self):
self.row_uncovered[:] = True self.col_uncovered[:] = True
'Call method Parameters obj : object'
def __call__(self, obj):
if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj)
'Decorate function fun'
def _decorate_fun(self, fun):
msg = ('Function %s is deprecated' % fun.__name__) if self.extra: msg += ('; %s' % self.extra) def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__...
'Decorator to catch and hide warnings without visual nesting.'
def __call__(self, fn):
@wraps(fn) def wrapper(*args, **kwargs): clean_warning_registry() with warnings.catch_warnings(): warnings.simplefilter('ignore', self.category) return fn(*args, **kwargs) return wrapper
'Object that mocks the urlopen function to fake requests to mldata. `mock_datasets` is a dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (...
def __init__(self, mock_datasets):
self.mock_datasets = mock_datasets
'Get a mask, or integer index, of the features selected Parameters indices : boolean (default False) If True, the return value will be an array of integers, rather than a boolean mask. Returns support : array An index that selects the retained features from a feature vector. If `indices` is False, this is a boolean arr...
def get_support(self, indices=False):
mask = self._get_support_mask() return (mask if (not indices) else np.where(mask)[0])
'Reduce X to the selected features. Parameters X : array of shape [n_samples, n_features] The input samples. Returns X_r : array of shape [n_samples, n_selected_features] The input samples with only the selected features.'
def transform(self, X):
X = check_array(X, accept_sparse='csr') mask = self.get_support() if (not mask.any()): warn('No features were selected: either the data is too noisy or the selection test too strict.', UserWarning) return np.empty(0).reshape((X.shape[0], 0)) i...
'Reverse the transformation operation Parameters X : array of shape [n_samples, n_selected_features] The input samples. Returns X_r : array of shape [n_samples, n_original_features] `X` with columns of zeros inserted where features would have been removed by `transform`.'
def inverse_transform(self, X):
if issparse(X): X = X.tocsc() it = self.inverse_transform(np.diff(X.indptr).reshape(1, (-1))) col_nonzeros = it.ravel() indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) Xt = csc_matrix((X.data, X.indices, indptr), shape=(X.shape[0], (len(indptr) - 1)), dtype=X.dtype) ...
'Fit the RFE model and then the underlying estimator on the selected features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values.'
def fit(self, X, y):
return self._fit(X, y)
'Reduce X to the selected features and then predict using the underlying estimator. Parameters X : array of shape [n_samples, n_features] The input samples. Returns y : array of shape [n_samples] The predicted target values.'
@if_delegate_has_method(delegate='estimator') def predict(self, X):
check_is_fitted(self, 'estimator_') return self.estimator_.predict(self.transform(X))
'Reduce X to the selected features and then return the score of the underlying estimator. Parameters X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values.'
@if_delegate_has_method(delegate='estimator') def score(self, X, y):
check_is_fitted(self, 'estimator_') return self.estimator_.score(self.transform(X), y)
'Fit the RFE model and automatically tune the number of selected features. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like, shape = [n_samples] Target values (integers...
def fit(self, X, y):
(X, y) = check_X_y(X, y, 'csr') cv = check_cv(self.cv, y, is_classifier(self.estimator)) scorer = check_scoring(self.estimator, scoring=self.scoring) n_features = X.shape[1] n_features_to_select = 1 if (0.0 < self.step < 1.0): step = int(max(1, (self.step * n_features))) else: ...
'Learn empirical variances from X. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Sample vectors from which to compute variances. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns self'
def fit(self, X, y=None):
X = check_array(X, ('csr', 'csc'), dtype=np.float64) if hasattr(X, 'toarray'): (_, self.variances_) = mean_variance_axis(X, axis=0) else: self.variances_ = np.var(X, axis=0) if np.all((self.variances_ <= self.threshold)): msg = 'No feature in X meets the varianc...
'Fit the SelectFromModel meta-transformer. Parameters X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : Other estimator specific parameters R...
def fit(self, X, y=None, **fit_params):
if self.prefit: raise NotFittedError("Since 'prefit=True', call transform directly") self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) return self
'Fit the SelectFromModel meta-transformer only once. Parameters X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : Other estimator specific pa...
@if_delegate_has_method('estimator') def partial_fit(self, X, y=None, **fit_params):
if self.prefit: raise NotFittedError("Since 'prefit=True', call transform directly") if (not hasattr(self, 'estimator_')): self.estimator_ = clone(self.estimator) self.estimator_.partial_fit(X, y, **fit_params) return self
'Run score function on (X, y) and get the appropriate features. Parameters X : array-like, shape = [n_samples, n_features] The training input samples. y : array-like, shape = [n_samples] The target values (class labels in classification, real numbers in regression). Returns self : object Returns self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, ['csr', 'csc'], multi_output=True) if (not callable(self.score_func)): raise TypeError(('The score function should be a callable, %s (%s) was passed.' % (self.score_func, type(self.score_func)))) self._check_params(X, y) score_func_ret = sel...
'Generate a sparse random projection matrix Parameters X : numpy array or scipy.sparse of shape [n_samples, n_features] Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y : is not used: placeholder to allow for usage in a Pipelin...
def fit(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc']) (n_samples, n_features) = X.shape if (self.n_components == 'auto'): self.n_components_ = johnson_lindenstrauss_min_dim(n_samples=n_samples, eps=self.eps) if (self.n_components_ <= 0): raise ValueError(('eps=%f and n_samples=%...
'Project the data by using matrix product with the random matrix Parameters X : numpy array or scipy.sparse of shape [n_samples, n_features] The input data to project into a smaller dimensional space. Returns X_new : numpy array or scipy sparse of shape [n_samples, n_components] Projected array.'
def transform(self, X):
X = check_array(X, accept_sparse=['csr', 'csc']) check_is_fitted(self, 'components_') if (X.shape[1] != self.components_.shape[1]): raise ValueError(('Impossible to perform projection:X at fit stage had a different number of features. (%s != %s)' % (X.sha...
'Generate the random projection matrix Parameters n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns components : numpy array or CSR matrix [n_components, n_features] The generated random matrix.'
def _make_random_matrix(self, n_components, n_features):
random_state = check_random_state(self.random_state) return gaussian_random_matrix(n_components, n_features, random_state=random_state)
'Generate the random projection matrix Parameters n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns components : numpy array or CSR matrix [n_components, n_features] The generated random matrix.'
def _make_random_matrix(self, n_components, n_features):
random_state = check_random_state(self.random_state) self.density_ = _check_density(self.density, n_features) return sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)
'Removes comments (#...) from python code.'
@classmethod def split_comment(cls, code):
if ('#' not in code): return code subf = (lambda m: ('' if (m.group(0)[0] == '#') else m.group(0))) return re.sub(cls.re_pytokens, subf, code)
'Render the template using keyword arguments as local variables.'
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg) stdout = [] self.execute(stdout, kwargs) return ''.join(stdout)
'The Picard docs suggest setting this as a convenience.'
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PICARD', join_path(self.prefix, 'bin', 'picard.jar'))
'Make the install targets'
@when('@:1.7.0') def install(self, spec, prefix):
with working_dir(self.build_directory): install_tree(join_path(self.stage.source_path, 'include'), prefix.include) mkdirp(prefix.lib) install('libgtest.a', prefix.lib) install('libgtest_main.a', prefix.lib)
'Internal compile.sh scripts hardcode number of cores to build with. Filter these out so Spack can control it.'
def patch(self):
files = ['compile.sh', 'parallel/modified_kahip/compile.sh', 'parallel/parallel_src/compile.sh'] for f in files: filter_file('NCORES=.*', 'NCORES={0}'.format(make_jobs), f)
'Build using the KaHIP compile.sh script. Uses scons internally.'
def build(self, spec, prefix):
builder = Executable('./compile.sh') builder()
'Install under the prefix'
def install(self, spec, prefix):
mkdirp(prefix.bin) mkdirp(prefix.include) mkdirp(prefix.lib) with working_dir('deploy'): for f in os.listdir('.'): if re.match('.*\\.(a|so|dylib)$', f): install(f, prefix.lib) elif re.match('.*\\.h$', f): install(f, prefix.include) ...
'Run before install so that the standard Spack sbang install hook can fix up the path to the perl binary.'
@run_before('install') def filter_sbang(self):
with working_dir('src/perl'): match = '^#!/usr/bin/env perl' perl = join_path(self.spec['perl'].prefix.bin, 'perl') substitute = '#!{perl}'.format(perl=perl) files = ['fill-aa', 'fill-an-ac', 'fill-fs', 'fill-ref-md5', 'tab-to-vcf', 'vcf-annotate', 'vcf-compare', 'vcf-concat', 'vc...
'Run after install to inject dependencies into LD_LIBRARY_PATH. If we don\'t do this, the run files will clear the LD_LIBRARY_PATH. Since the installer is a binary file, we have no means of specifying an RPATH to use.'
def filter_ld_library_path(self, spec, prefix):
files = glob.glob((prefix + '/binaries/*.run')) ld_library_path = ':'.join([spec['zlib'].prefix.lib, spec['freetype'].prefix.lib, spec['fontconfig'].prefix.lib, spec['libxrender'].prefix.lib, spec['libcanberra'].prefix.lib]) for runfile in files: filter_file('(export LD_LIBRARY_PATH=)$', '\\1{0}'...
'execute their autotools wrapper script'
def autoreconf(self, spec, prefix):
if os.path.exists('./buildconf.sh'): bash = which('bash') bash('./buildconf.sh', '--force')
'Build and run a small program to test the installed HDF5 Blosc plugin'
def check_install(self, spec):
print('Checking HDF5-Blosc plugin...') checkdir = 'spack-check' with working_dir(checkdir, create=True): source = '\\\n#include <hdf5.h>\n#include <assert.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define FILTER_BLOSC 32001 /* Blosc filter ID registered ...
'Called before Octave modules\' install() methods. In most cases, extensions will only need to have one line: octave(\'--eval\', \'pkg install %s\' % self.stage.archive_file)'
def setup_dependent_package(self, module, dependent_spec):
module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
'Provide location of the OpenFOAM project. This is identical to the WM_PROJECT_DIR value, but we avoid that variable since it would mask the normal OpenFOAM cleanup of previous versions.'
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
'Absolute location of project directory: WM_PROJECT_DIR/'
@property def projectdir(self):
return self.prefix
'Relative location of architecture-specific executables'
@property def archbin(self):
return join_path('platforms', self.foam_arch, 'bin')
'Relative location of architecture-specific libraries'
@property def archlib(self):
return join_path('platforms', self.foam_arch, 'lib')
'Adjust OpenFOAM build for spack. Where needed, apply filter as an alternative to normal patching.'
def patch(self):
add_extra_files(self, self.common, self.assets) edits = {'WM_THIRD_PARTY_DIR': '$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party'} rewrite_environ_files(edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc'))
'Make adjustments to the OpenFOAM configuration files in their various locations: etc/bashrc, etc/config.sh/FEATURE and customizations that don\'t properly fit get placed in the etc/prefs.sh file (similiarly for csh).'
def configure(self, spec, prefix):
edits = {} edits.update(self.foam_arch.foam_dict()) rewrite_environ_files(edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc')) self.etc_prefs = {} user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}') self.etc_config = {'CGAL': [('BOOST_ARCH_PATH', spec['boost'].prefix), ('CGA...
'Build using the OpenFOAM Allwmake script, with a wrapper to source its environment first. Only build if the compiler is known to be supported.'
def build(self, spec, prefix):
self.foam_arch.has_rule(self.stage.source_path) self.foam_arch.create_rules(self.stage.source_path, self) args = ['-silent'] if self.parallel: args.append('-j{0}'.format(make_jobs)) builder = Executable(self.build_script) builder(*args)
'Install under the projectdir'
def install(self, spec, prefix):
mkdirp(self.projectdir) projdir = os.path.basename(self.projectdir) edits = {'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir), 'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir)} if ('+source' in spec): ignored = re.compile('^spack-.*') else: ignored = re.compile('^(...
'Add symlinks into bin/, lib/ (eg, for other applications)'
def install_links(self):
with working_dir(self.projectdir): os.symlink(join_path('.spack', 'build.out'), join_path(('log.' + str(self.foam_arch)))) if (not self.config['link']): return with working_dir(self.projectdir): if os.path.isdir(self.archlib): os.symlink(self.archlib, 'lib') with work...
'Returns a dictionary for OpenFOAM prefs, bashrc, cshrc.'
def foam_dict(self):
return dict([('WM_COMPILER', self.compiler), ('WM_ARCH_OPTION', self.arch_option), ('WM_LABEL_SIZE', self.label_size), ('WM_PRECISION_OPTION', self.precision_option), ('WM_COMPILE_OPTION', self.compile_option), ('WM_MPLIB', self.mplib)])
'The wmake/rules/ compiler directory'
def _rule_directory(self, projdir=None, general=False):
if general: relative = os.path.join('wmake', 'rules', 'General') else: relative = os.path.join('wmake', 'rules', self.rule) if projdir: return os.path.join(projdir, relative) else: return relative
'Verify that a wmake/rules/ compiler rule exists in the project directory.'
def has_rule(self, projdir):
rule_dir = self._rule_directory(projdir) if (not os.path.isdir(rule_dir)): raise InstallError('No wmake rule for {0}'.format(self.rule)) if (not re.match('.+Opt$', self.compile_option)): raise InstallError("WM_COMPILE_OPTION={0} is not type '*Opt'".format(self.compile...
'Create cRpathOpt,c++RpathOpt and mplibUSER,mplibUSERMPI rules in the specified project directory. The compiler rules are based on the respective cOpt,c++Opt rules but with additional rpath information for the OpenFOAM libraries. The rpath rules allow wmake to use spack information with minimal modification to OpenFOAM...
def create_rules(self, projdir, foam_pkg):
rpath = '{0}{1}'.format(foam_pkg.compiler.cxx_rpath_arg, join_path(foam_pkg.projectdir, foam_pkg.archlib)) user_mpi = mplib_content(foam_pkg.spec) rule_dir = self._rule_directory(projdir) with working_dir(rule_dir): for lang in ['c', 'c++']: src = '{0}Opt'.format(lang) ds...
'Run after install to tell the Makefile and SConstruct files to use the compilers that Spack built the package with. If this isn\'t done, they\'ll have CC, CXX, F77, and FC set to Spack\'s generic cc, c++, f77, and f90. We want them to be bound to whatever compiler they were built with.'
@run_after('install') def filter_compilers(self):
kwargs = {'ignore_absent': True, 'backup': False, 'string': True} dirname = os.path.join(self.prefix, 'share/cantera/samples') cc_files = ['cxx/rankine/Makefile', 'cxx/NASA_coeffs/Makefile', 'cxx/kinetics1/Makefile', 'cxx/flamespeed/Makefile', 'cxx/combustor/Makefile', 'f77/SConstruct'] cxx_files = ['cx...