desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'The |_Body| instance containing the content for this document.'
@property def _body(self):
if (self.__body is None): self.__body = _Body(self._element.body, self) return self.__body
'Return this |_Body| instance after clearing it of all content. Section properties for the main document story, if present, are preserved.'
def clear_content(self):
self._body.clear_content() return self
'Return a paragraph newly added to the end of the content in this container, having *text* in a single run if present, and having paragraph style *style*. If *style* is |None|, no paragraph style is applied, which has the same effect as applying the \'Normal\' style.'
def add_paragraph(self, text='', style=None):
paragraph = self._add_paragraph() if text: paragraph.add_run(text) if (style is not None): paragraph.style = style return paragraph
'Return a table of *width* having *rows* rows and *cols* columns, newly appended to the content in this container. *width* is evenly distributed between the table columns.'
def add_table(self, rows, cols, width):
from .table import Table tbl = CT_Tbl.new_tbl(rows, cols, width) self._element._insert_tbl(tbl) return Table(tbl, self)
'A list containing the paragraphs in this container, in document order. Read-only.'
@property def paragraphs(self):
return [Paragraph(p, self) for p in self._element.p_lst]
'A list containing the tables in this container, in document order. Read-only.'
@property def tables(self):
from .table import Table return [Table(tbl, self) for tbl in self._element.tbl_lst]
'Return a paragraph newly added to the end of the content in this container.'
def _add_paragraph(self):
return Paragraph(self._element.add_p(), self)
'Provide indexed access, e.g. \'inline_shapes[idx]\''
def __getitem__(self, idx):
try: inline = self._inline_lst[idx] except IndexError: msg = (u'inline shape index [%d] out of range' % idx) raise IndexError(msg) return InlineShape(inline)
'Read/write. The display height of this inline shape as an |Emu| instance.'
@property def height(self):
return self._inline.extent.cy
'The type of this inline shape as a member of ``docx.enum.shape.WD_INLINE_SHAPE``, e.g. ``LINKED_PICTURE``. Read-only.'
@property def type(self):
graphicData = self._inline.graphic.graphicData uri = graphicData.uri if (uri == nsmap[u'pic']): blip = graphicData.pic.blipFill.blip if (blip.link is not None): return WD_INLINE_SHAPE.LINKED_PICTURE return WD_INLINE_SHAPE.PICTURE if (uri == nsmap[u'c']): retur...
'Read/write. The display width of this inline shape as an |Emu| instance.'
@property def width(self):
return self._inline.extent.cx
'Notify the observers.'
def changed(self, event):
for observer in self.observers: observer.update(event, self)
'Register an observer.'
def add_observer(self, observer):
self.observers.append(observer)
'Refit the model if already fitted.'
def refit(self):
if self.fitted: self.fit()
'Remove old decision surface.'
def remove_surface(self):
if (len(self.contours) > 0): for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = []
'Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.'
def plot_support_vectors(self, support_vectors):
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors='k', facecolors='none') self.contours.append(cs)
'Get a valid link, False if not found'
def _get_link(self, cobj):
fname_idx = None full_name = ((cobj['module_short'] + '.') + cobj['name']) if (full_name in self._searchindex['objects']): value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[next(iter(value.keys()))] fname_idx = value[0] elif (co...
'Resolve the link to the documentation, returns None if not found Parameters cobj : dict Dict with information about the "code object" for which we are resolving a link. cobj[\'name\'] : function or class name (str) cobj[\'module_short\'] : shortened module name (str) cobj[\'module\'] : module name (str) this_url: str ...
def resolve(self, cobj, this_url):
full_name = ((cobj['module_short'] + '.') + cobj['name']) link = self._link_cache.get(full_name, None) if (link is None): link = self._get_link(cobj) self._link_cache[full_name] = link if ((link is False) or (link is None)): return None if self.relative: link = os.pat...
'supported attributes by name are: - lineno - returns the line number of the exception text - col - returns the column number of the exception text - line - returns the line containing the exception text'
def __getattr__(self, aname):
if (aname == 'lineno'): return lineno(self.loc, self.pstr) elif (aname in ('col', 'column')): return col(self.loc, self.pstr) elif (aname == 'line'): return line(self.loc, self.pstr) else: raise AttributeError, aname
'Extracts the exception line from the input string, and marks the location of the exception with a special symbol.'
def markInputline(self, markerString='>!<'):
line_str = self.line line_column = (self.column - 1) if markerString: line_str = ''.join([line_str[:line_column], markerString, line_str[line_column:]]) return line_str.strip()
'Returns all named result keys.'
def keys(self):
return self.__tokdict.keys()
'Removes and returns item at specified index (default=last). Will work with either numeric indices or dict-key indicies.'
def pop(self, index=(-1)):
ret = self[index] del self[index] return ret
'Returns named result matching the given key, or if there is no such name, then returns the given defaultValue or None if no defaultValue is specified.'
def get(self, key, defaultValue=None):
if (key in self): return self[key] else: return defaultValue
'Returns all named result keys and values as a list of tuples.'
def items(self):
return [(k, self[k]) for k in self.__tokdict.keys()]
'Returns all named result values.'
def values(self):
return [v[(-1)][0] for v in self.__tokdict.values()]
'Returns the parse results as a nested list of matching tokens, all converted to strings.'
def asList(self):
out = [] for res in self.__toklist: if isinstance(res, ParseResults): out.append(res.asList()) else: out.append(res) return out
'Returns the named parse results as dictionary.'
def asDict(self):
return dict(self.items())
'Returns a new copy of a ParseResults object.'
def copy(self):
ret = ParseResults(self.__toklist) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent ret.__accumNames.update(self.__accumNames) ret.__name = self.__name return ret
'Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.'
def asXML(self, doctag=None, namedItemsOnly=False, indent='', formatted=True):
nl = '\n' out = [] namedItems = dict([(v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist]) nextLevelIndent = (indent + ' ') if (not formatted): indent = '' nextLevelIndent = '' nl = '' selfTag = None if (doctag is not None): selfTag = doc...
'Returns the results name for this token expression.'
def getName(self):
if self.__name: return self.__name elif self.__parent: par = self.__parent() if par: return par.__lookup(self) else: return None elif ((len(self) == 1) and (len(self.__tokdict) == 1) and (self.__tokdict.values()[0][0][1] in (0, (-1)))): return ...
'Diagnostic method for listing out the contents of a ParseResults. Accepts an optional indent argument so that this string can be embedded in a nested display of other data.'
def dump(self, indent='', depth=0):
out = [] out.append((indent + _ustr(self.asList()))) keys = self.items() keys.sort() for (k, v) in keys: if out: out.append('\n') out.append(('%s%s- %s: ' % (indent, (' ' * depth), k))) if isinstance(v, ParseResults): if v.keys(): ...
'Overrides the default whitespace chars'
def setDefaultWhitespaceChars(chars):
ParserElement.DEFAULT_WHITE_CHARS = chars
'Make a copy of this ParserElement. Useful for defining different parse actions for the same parsing pattern, using copies of the original parse element.'
def copy(self):
cpy = copy.copy(self) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] if self.copyDefaultWhiteChars: cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS return cpy
'Define name for this expression, for use in debugging.'
def setName(self, name):
self.name = name self.errmsg = ('Expected ' + self.name) if hasattr(self, 'exception'): self.exception.msg = self.errmsg return self
'Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original ParserElement object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names.'
def setResultsName(self, name, listAllMatches=False):
newself = self.copy() newself.resultsName = name newself.modalResults = (not listAllMatches) return newself
'Method to invoke the Python pdb debugger when this element is about to be parsed. Set breakFlag to True to enable, False to disable.'
def setBreak(self, breakFlag=True):
if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() _parseMethod(instring, loc, doActions, callPreParse) breaker._originalParseMethod = _parseMethod self._parse = breake...
'Internal method used to decorate parse actions that take fewer than 3 arguments, so that all parse actions can be called as f(s,l,t).'
def _normalizeParseActionArgs(f):
STAR_ARGS = 4 try: restore = None if isinstance(f, type): restore = f f = f.__init__ if (f.func_code.co_flags & STAR_ARGS): return f numargs = f.func_code.co_argcount if hasattr(f, 'im_self'): numargs -= 1 if restore...
'Define action to perform when successfully matching parse element definition. Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks), fn(loc,toks), fn(toks), or just fn(), where: - s = the original string being parsed (see note below) - loc = the location of the matching substring - toks =...
def setParseAction(self, *fns, **kwargs):
self.parseAction = map(self._normalizeParseActionArgs, list(fns)) self.callDuringTry = (('callDuringTry' in kwargs) and kwargs['callDuringTry']) return self
'Add parse action to expression\'s list of parse actions. See L{I{setParseAction}<setParseAction>}.'
def addParseAction(self, *fns, **kwargs):
self.parseAction += map(self._normalizeParseActionArgs, list(fns)) self.callDuringTry = (self.callDuringTry or (('callDuringTry' in kwargs) and kwargs['callDuringTry'])) return self
'Define action to perform if parsing fails at this expression. Fail acton fn is a callable function that takes the arguments fn(s,loc,expr,err) where: - s = string being parsed - loc = location where expression match was attempted and failed - expr = the parse expression that failed - err = the exception thrown The fun...
def setFailAction(self, fn):
self.failAction = fn return self
'Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exception...
def enablePackrat():
if (not ParserElement._packratEnabled): ParserElement._packratEnabled = True ParserElement._parse = ParserElement._parseCache
'Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the gramm...
def parseString(self, instring):
ParserElement.resetCache() if (not self.streamlined): self.streamline() for e in self.ignoreExprs: e.streamline() if self.keepTabs: (loc, tokens) = self._parse(instring, 0) else: (loc, tokens) = self._parse(instring.expandtabs(), 0) return tokens
'Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional maxMatches argument, to clip scanning after \'n\' matches are found. Note that the start and end locations are reported relative to the string being parsed. See L{I...
def scanString(self, instring, maxMatches=__MAX_INT__):
if (not self.streamlined): self.streamline() for e in self.ignoreExprs: e.streamline() if (not self.keepTabs): instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() ...
'Extension to scanString, to modify matching text with modified tokens that may be returned from a parse action. To use transformString, define a grammar and attach a parse action to it that modifies the returned token list. Invoking transformString() on a target string will then scan for matches, and replace the matc...
def transformString(self, instring):
out = [] lastE = 0 self.keepTabs = True for (t, s, e) in self.scanString(instring): out.append(instring[lastE:s]) if t: if isinstance(t, ParseResults): out += t.asList() elif isinstance(t, list): out += t else: ...
'Another extension to scanString, simplifying the access to the tokens found to match the given parse expression. May be called with optional maxMatches argument, to clip searching after \'n\' matches are found.'
def searchString(self, instring, maxMatches=__MAX_INT__):
return ParseResults([t for (t, s, e) in self.scanString(instring, maxMatches)])
'Implementation of + operator - returns And'
def __add__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return And([self, other])
'Implementation of + operator when left operand is not a ParserElement'
def __radd__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other + self)
'Implementation of | operator - returns MatchFirst'
def __or__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return MatchFirst([self, other]...
'Implementation of | operator when left operand is not a ParserElement'
def __ror__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other | self)
'Implementation of ^ operator - returns Or'
def __xor__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return Or([self, other])
'Implementation of ^ operator when left operand is not a ParserElement'
def __rxor__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other ^ self)
'Implementation of & operator - returns Each'
def __and__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return Each([self, other])
'Implementation of & operator when left operand is not a ParserElement'
def __rand__(self, other):
if isinstance(other, __BASE_STRING__): other = Literal(other) if (not isinstance(other, ParserElement)): warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2) return None return (other & self)
'Implementation of ~ operator - returns NotAny'
def __invert__(self):
return NotAny(self)
'Shortcut for setResultsName, with listAllMatches=default:: userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") could be written as:: userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")'
def __call__(self, name):
return self.setResultsName(name)
'Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output.'
def suppress(self):
return Suppress(self)
'Disables the skipping of whitespace before matching the characters in the ParserElement\'s defined pattern. This is normally only used internally by the pyparsing module, but may be needed in some whitespace-sensitive grammars.'
def leaveWhitespace(self):
self.skipWhitespace = False return self
'Overrides the default whitespace chars'
def setWhitespaceChars(self, chars):
self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
'Overrides default behavior to expand <TAB>s to spaces before parsing the input string. Must be called before parseString when the input grammar contains elements that match <TAB> characters.'
def parseWithTabs(self):
self.keepTabs = True return self
'Define expression to be ignored (e.g., comments) while doing pattern matching; may be called repeatedly, to define multiple comment or other ignorable patterns.'
def ignore(self, other):
if isinstance(other, Suppress): if (other not in self.ignoreExprs): self.ignoreExprs.append(other) else: self.ignoreExprs.append(Suppress(other)) return self
'Enable display of debugging messages while doing pattern matching.'
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction)) self.debug = True return self
'Enable display of debugging messages while doing pattern matching. Set flag to True to enable, False to disable.'
def setDebug(self, flag=True):
if flag: self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) else: self.debug = False return self
'Check defined expressions for valid structure, check for infinite recursive definitions.'
def validate(self, validateTrace=[]):
self.checkRecursion([])
'Execute the parse expression on the given file or filename. If a filename is specified (instead of a file object), the entire file is opened, read, and closed before parsing.'
def parseFile(self, file_or_filename):
try: file_contents = file_or_filename.read() except AttributeError: f = open(file_or_filename, 'rb') file_contents = f.read() f.close() return self.parseString(file_contents)
'Overrides the default Keyword chars'
def setDefaultKeywordChars(chars):
Keyword.DEFAULT_KEYWORD_CHARS = chars
'The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.'
def __init__(self, pattern, flags=0):
super(Regex, self).__init__() if (len(pattern) == 0): warnings.warn('null string passed to Regex; use Empty() instead', SyntaxWarning, stacklevel=2) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reStrin...
'Defined with the following parameters: - quoteChar - string of one or more characters defining the quote delimiting string - escChar - character to escape quotes, typically backslash (default=None) - escQuote - special quote sequence to escape an embedded quote string (such as SQL\'s "" to escape an embedded ") (defau...
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
super(QuotedString, self).__init__() quoteChar = quoteChar.strip() if (len(quoteChar) == 0): warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() if (endQuoteChar is None): endQuoteChar = quoteChar else: ...
'Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on all contained expressions.'
def leaveWhitespace(self):
self.skipWhitespace = False self.exprs = [e.copy() for e in self.exprs] for e in self.exprs: e.leaveWhitespace() return self
'Get parameter names for the estimator'
@classmethod def _get_param_names(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if (init is object.__init__): return [] init_signature = signature(init) parameters = [p for p in init_signature.parameters.values() if ((p.name != 'self') and (p.kind != p.VAR_KEYWORD))] for p in parameters: if (p.kin...
'Get parameters for this estimator. Parameters deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns params : mapping of string to any Parameter names mapped to their values.'
def get_params(self, deep=True):
out = dict() for key in self._get_param_names(): warnings.simplefilter('always', DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if (len(w) and (w[0].category == DeprecationWarning)): ...
'Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it\'s possible to update each component of a nested object. Returns self'
def set_params(self, **params):
if (not params): return self valid_params = self.get_params(deep=True) for (key, value) in six.iteritems(params): split = key.split('__', 1) if (len(split) > 1): (name, sub_name) = split if (name not in valid_params): raise ValueError(('Invalid...
'Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = ...
def score(self, X, y, sample_weight=None):
from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
'Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the ...
def score(self, X, y, sample_weight=None):
from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted')
'Performs clustering on X and returns cluster labels. Parameters X : ndarray, shape (n_samples, n_features) Input data. Returns y : ndarray, shape (n_samples,) cluster labels'
def fit_predict(self, X, y=None):
self.fit(X) return self.labels_
'Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members.'
@property def biclusters_(self):
return (self.rows_, self.columns_)
'Row and column indices of the i\'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Parameters i : int The index of the cluster. Returns row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the datas...
def get_indices(self, i):
rows = self.rows_[i] columns = self.columns_[i] return (np.nonzero(rows)[0], np.nonzero(columns)[0])
'Shape of the i\'th bicluster. Parameters i : int The index of the cluster. Returns shape : (int, int) Number of rows and columns (resp.) in the bicluster.'
def get_shape(self, i):
indices = self.get_indices(i) return tuple((len(i) for i in indices))
'Returns the submatrix corresponding to bicluster `i`. Parameters i : int The index of the cluster. data : array The data. Returns submatrix : array The submatrix corresponding to bicluster i. Notes Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist.'
def get_submatrix(self, i, data):
from .utils.validation import check_array data = check_array(data, accept_sparse='csr') (row_ind, col_ind) = self.get_indices(i) return data[(row_ind[:, np.newaxis], col_ind)]
'Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns X_new : numpy array of shape [n_samples, n_featur...
def fit_transform(self, X, y=None, **fit_params):
if (y is None): return self.fit(X, **fit_params).transform(X) else: return self.fit(X, y, **fit_params).transform(X)
'Returns the score of the model on the data X Parameters X : array-like, shape = (n_samples, n_features) Returns score : float'
def score(self, X, y=None):
pass
'Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances.'
def _compute_distances(self, query, candidates):
if (candidates.shape == (0,)): return (np.empty(0, dtype=np.int), np.empty(0, dtype=float)) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate...
'Creates left and right masks for all hash lengths.'
def _generate_masks(self):
tri_size = (MAX_HASH_SIZE + 1) left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::(-1), ::(-1)] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
'Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances.'
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
index_size = self._fit_X.shape[0] n_candidates = 0 candidate_set = set() min_candidates = (self.n_candidates * self.n_estimators) while ((max_depth > self.min_hash_match) and ((n_candidates < min_candidates) or (len(candidate_set) < n_neighbors))): left_mask = self._left_mask[max_depth] ...
'Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances.'
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
ratio_within_radius = 1 threshold = (1 - self.radius_cutoff_ratio) total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while ((max_depth > self.min_hash_match) and (ratio_within_radius > threshold)): left_mask =...
'Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters X : ar...
def fit(self, X, y=None):
self._fit_X = check_array(X, accept_sparse='csr') self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE...
'Performs descending phase to find maximum depth.'
def _query(self, X):
bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for (tree, tree_queries) in zip(self.trees_, np.rollaxis(bin_queries, ...
'Returns n_neighbors of approximate nearest neighbors. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, optional (default = None) Number of neighbors required. If not provided, this wil...
def kneighbors(self, X, n_neighbors=None, return_distance=True):
if (not hasattr(self, 'hash_functions_')): raise ValueError('estimator should be fitted.') if (n_neighbors is None): n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') (neighbors, distances) = ([], []) (bin_queries, max_depth) = self._query(X) for i i...
'Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distanc...
def radius_neighbors(self, X, radius=None, return_distance=True):
if (not hasattr(self, 'hash_functions_')): raise ValueError('estimator should be fitted.') if (radius is None): radius = self.radius X = check_array(X, accept_sparse='csr') (neighbors, distances) = ([], []) (bin_queries, max_depth) = self._query(X) for i in range(X.shape...
'Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest.'
def partial_fit(self, X, y=None):
X = check_array(X, accept_sparse='csr') if (not hasattr(self, 'hash_functions_')): return self.fit(X) if (X.shape[1] != self._fit_X.shape[1]): raise ValueError('Number of features in X and fitted array does not match.') n_samples = X.shape[0] n_indexed =...
'Finds the K-neighbors of a point. Returns indices of and distances to the neighbors of each point. Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' The query point or points. If not provided, neighbors of each indexed point are returned. In th...
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
if (self._fit_method is None): raise NotFittedError('Must fit neighbors before querying.') if (n_neighbors is None): n_neighbors = self.n_neighbors if (X is not None): query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train ...
'Computes the (weighted) graph of k-Neighbors for points in X Parameters X : array-like, shape (n_query, n_features), or (n_query, n_indexed) if metric == \'precomputed\' The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not consid...
def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'):
if (n_neighbors is None): n_neighbors = self.n_neighbors if (X is not None): X = check_array(X, accept_sparse='csr') n_samples1 = X.shape[0] else: n_samples1 = self._fit_X.shape[0] n_samples2 = self._fit_X.shape[0] n_nonzero = (n_samples1 * n_neighbors) A_indptr =...
'Finds the neighbors within a given radius of a point or points. Return the indices and distances of each point from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance...
def radius_neighbors(self, X=None, radius=None, return_distance=True):
if (self._fit_method is None): raise NotFittedError('Must fit neighbors before querying.') if (X is not None): query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train = True X = self._fit_X if (radius is None): radiu...
'Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Parameters X : array-like, shape = [n_samples, n_features], optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query poin...
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
if (X is not None): X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) n_samples2 = self._fit_X.shape[0] if (radius is None): radius = self.radius if (mode == 'connectivity'): A_ind = self.radius_neighbors(X, radius, return_distance=False) A_data = None elif (mod...
'Fit the model using X as training data and y as target values Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'. y : {array-like, sparse matrix} Target values, array of float values, shape =...
def fit(self, X, y):
if (not isinstance(X, (KDTree, BallTree))): (X, y) = check_X_y(X, y, 'csr', multi_output=True) self._y = y return self._fit(X)
'Fit the model using X as training data and y as target values Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'. y : {array-like, sparse matrix} Target values of shape = [n_samples] or [n_sa...
def fit(self, X, y):
if (not isinstance(X, (KDTree, BallTree))): (X, y) = check_X_y(X, y, 'csr', multi_output=True) if ((y.ndim == 1) or ((y.ndim == 2) and (y.shape[1] == 1))): if (y.ndim != 1): warnings.warn('A column-vector y was passed when a 1d array was expected. Ple...
'Fit the model using X as training data Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape [n_samples, n_features], or [n_samples, n_samples] if metric=\'precomputed\'.'
def fit(self, X, y=None):
return self._fit(X)
'Fit the Kernel Density model on the data. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point.'
def fit(self, X, y=None):
algorithm = self._choose_algorithm(self.algorithm, self.metric) X = check_array(X, order='C', dtype=DTYPE) kwargs = self.metric_params if (kwargs is None): kwargs = {} self.tree_ = TREE_DICT[algorithm](X, metric=self.metric, leaf_size=self.leaf_size, **kwargs) return self
'Evaluate the density model on the data. Parameters X : array_like, shape (n_samples, n_features) An array of points to query. Last dimension should match dimension of training data (n_features). Returns density : ndarray, shape (n_samples,) The array of log(density) evaluations.'
def score_samples(self, X):
X = check_array(X, order='C', dtype=DTYPE) N = self.tree_.data.shape[0] atol_N = (self.atol * N) log_density = self.tree_.kernel_density(X, h=self.bandwidth, kernel=self.kernel, atol=atol_N, rtol=self.rtol, breadth_first=self.breadth_first, return_log=True) log_density -= np.log(N) return log_de...
'Compute the total log probability under the model. Parameters X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns logprob : float Total log-likelihood of the data in X.'
def score(self, X, y=None):
return np.sum(self.score_samples(X))
'Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels. Parameters n_samples : int, optional Number of samples to generate. Defaults to 1. random_state : int, RandomState instance or None. default to None If int, random_state is the seed used by the random number ge...
def sample(self, n_samples=1, random_state=None):
if (self.kernel not in ['gaussian', 'tophat']): raise NotImplementedError() data = np.asarray(self.tree_.data) rng = check_random_state(random_state) i = rng.randint(data.shape[0], size=n_samples) if (self.kernel == 'gaussian'): return np.atleast_2d(rng.normal(data[i], self.bandwidth...