code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def set_up(self):
"""Set up your applications and the test environment."""
self.path.state = self.path.gen.joinpath("state")
if self.path.state.exists():
self.path.state.rmtree(ignore_errors=True)
self.path.state.mkdir()
for script in self.given.get("scripts", []):
... | Set up your applications and the test environment. |
def distance_matrix(client, origins, destinations,
mode=None, language=None, avoid=None, units=None,
departure_time=None, arrival_time=None, transit_mode=None,
transit_routing_preference=None, traffic_model=None, region=None):
""" Gets travel distance and ... | Gets travel distance and time for a matrix of origins and destinations.
:param origins: One or more locations and/or latitude/longitude values,
from which to calculate distance and time. If you pass an address as
a string, the service will geocode the string and convert it to a
latitude/lon... |
def check_views(view_set, max_views=3):
"""Ensures valid view/dimensions are selected."""
if not isinstance(view_set, Iterable):
view_set = tuple([view_set, ])
if len(view_set) > max_views:
raise ValueError('Can only have {} views'.format(max_views))
return [check_int(view, 'view', mi... | Ensures valid view/dimensions are selected. |
def bindata(data, maxbins = 30, reduction = 0.1):
'''
data must be numeric list with a len above 20
This function counts the number of data points in a reduced array
'''
tole = 0.01
N = len(data)
assert N > 20
vmin = min(data)
vmax = max(data)
DV = vmax - vmin
tol = tole*DV
vmax += t... | data must be numeric list with a len above 20
This function counts the number of data points in a reduced array |
def parse(self, string, parent, module=True, filepath=None):
"""Extracts modules *and* programs from a fortran code file.
:arg string: the contents of the fortran code file.
:arg parent: the instance of CodeParser that will own the return Module.
:arg module: when true, the code file wi... | Extracts modules *and* programs from a fortran code file.
:arg string: the contents of the fortran code file.
:arg parent: the instance of CodeParser that will own the return Module.
:arg module: when true, the code file will be searched for modules; otherwise
it will be searched for ... |
def weibo_url(self):
"""获取用户微博链接.
:return: 微博链接地址,如没有则返回 ‘unknown’
:rtype: str
"""
if self.url is None:
return None
else:
tmp = self.soup.find(
'a', class_='zm-profile-header-user-weibo')
return tmp['href'] if tmp is no... | 获取用户微博链接.
:return: 微博链接地址,如没有则返回 ‘unknown’
:rtype: str |
def removeRef(self, doc):
"""Remove the given attribute from the Ref table maintained
internally. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlRemoveRef(doc__o, self._o)
return ret | Remove the given attribute from the Ref table maintained
internally. |
def attached(name, force=False):
'''
Ensure zone is attached
name : string
name of the zone
force : boolean
force attach the zone
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
zones = __salt__['zoneadm.list'](instal... | Ensure zone is attached
name : string
name of the zone
force : boolean
force attach the zone |
def fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, TM):
r"""Calculate Pu+, Pu-, Pd+, Pd-.
This is a modified version of empymod.kernel.fields(). See the original
version for more information.
"""
# Booleans if src in first or last layer; swapped if up=True
first_layer = lsrc == 0
last_layer ... | r"""Calculate Pu+, Pu-, Pd+, Pd-.
This is a modified version of empymod.kernel.fields(). See the original
version for more information. |
def var_explained(y_true, y_pred):
"""Fraction of variance explained.
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
var_resid = np.var(y_true - y_pred)
var_y_true = np.var(y_true)
return 1 - var_resid / var_y_true | Fraction of variance explained. |
def get_flanker(group, query):
"""
>>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False)
"""
group.sort()
pos = bisect_left(group, (query, ... | >>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385)
((373, 15176), (400, 15193), True)
>>> get_flanker([(124, 13639), (137, 13625)], 138)
((137, 13625), (137, 13625), False) |
def set_link(self, prop, value):
""" Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property
"""
... | Set given link in CTS Namespace
.. example::
collection.set_link(NAMESPACES.CTS.about, "urn:cts:latinLit:phi1294.phi002")
:param prop: Property to set (Without namespace)
:param value: Value to set for given property |
def _get_rsa_public_key(cert):
"""
PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key w... | PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted
ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>),
but this dumps the public key within a PrivateKeyInfo structure which is not suita... |
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
... | Start the oplog worker. |
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
... | Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents(). |
def patch_memcache():
"""Monkey patch python-memcached to implement our consistent hashring
in its node selection and operations.
"""
def _init(self, servers, *k, **kw):
self._old_init(servers, *k, **kw)
nodes = {}
for server in self.servers:
conf = {
... | Monkey patch python-memcached to implement our consistent hashring
in its node selection and operations. |
def p_sens_all_paren(self, p):
'senslist : AT LPAREN TIMES RPAREN'
p[0] = SensList(
(Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | senslist : AT LPAREN TIMES RPAREN |
def handshake_peers(self):
'''
pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string
'''
pstr = 'BitTorrent protocol'
pstrlen = len(pstr)
... | pstrlen = length of pstr as one byte
pstr = BitTorrent protocol
reserved = chr(0)*8
info_hash = 20-byte hash above (aka self.hash_string)
peer_id = 20-byte string |
def do_fit(self, event):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.Get... | Re-fit the window to the size of the content. |
def make_blastdb(self):
"""
Create a BLAST database of the primer file
"""
# remove the path and the file extension for easier future globbing
db = os.path.splitext(self.formattedprimers)[0]
nhr = '{db}.nhr'.format(db=db) # add nhr for searching
if not os.path.is... | Create a BLAST database of the primer file |
def inet_to_str(inet):
"""Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueErro... | Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address |
def _build_tree(self, position, momentum, slice_var, direction, depth, stepsize):
"""
Recursively builds a tree for proposing new position and momentum
"""
# Parameter names in algorithm (here -> representation in algorithm)
# position -> theta, momentum -> r, slice_var -> u, di... | Recursively builds a tree for proposing new position and momentum |
def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not... | Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value |
def _registered(self):
"""
A optional boolean property indidcating whether this job store is registered. The
registry is the authority on deciding if a job store exists or not. If True, this job
store exists, if None the job store is transitioning from True to False or vice versa,
... | A optional boolean property indidcating whether this job store is registered. The
registry is the authority on deciding if a job store exists or not. If True, this job
store exists, if None the job store is transitioning from True to False or vice versa,
if False the job store doesn't exist.
... |
def make_iterable(obj, default=None):
""" Ensure obj is iterable. """
if obj is None:
return default or []
if isinstance(obj, (compat.string_types, compat.integer_types)):
return [obj]
return obj | Ensure obj is iterable. |
def get_filename(self):
''' Return the source filename of the current Stim. '''
if self.filename is None or not os.path.exists(self.filename):
tf = tempfile.mktemp() + self._default_file_extension
self.save(tf)
yield tf
os.remove(tf)
else:
... | Return the source filename of the current Stim. |
def insert_attribute(self, att, index):
"""
Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int
"""
javabridge.call(self.jobject, "in... | Inserts the attribute at the specified location.
:param att: the attribute to insert
:type att: Attribute
:param index: the index to insert the attribute at
:type index: int |
def rmdir(self, pathobj):
"""
Removes a directory
"""
stat = self.stat(pathobj)
if not stat.is_dir:
raise OSError(20, "Not a directory: '%s'" % str(pathobj))
url = str(pathobj) + '/'
text, code = self.rest_del(url, session=pathobj.session, verify=pa... | Removes a directory |
def _proc_pax(self, filetar):
"""Process an extended or global header as described in POSIX.1-2001."""
# Read the header information.
buf = filetar.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
... | Process an extended or global header as described in POSIX.1-2001. |
def add(env, securitygroup_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress... | Add a security group rule to a security group.
\b
Examples:
# Add an SSH rule (TCP port 22) to a security group
slcli sg rule-add 384727 \\
--direction ingress \\
--protocol tcp \\
--port-min 22 \\
--port-max 22
\b
# Add a ping rule (... |
def get_atlas_summary_df(self):
"""Create a single data frame which summarizes all genes per row.
Returns:
DataFrame: Pandas DataFrame of the results
"""
all_info = []
for g in self.reference_gempro.genes_with_a_representative_sequence:
info = {}
... | Create a single data frame which summarizes all genes per row.
Returns:
DataFrame: Pandas DataFrame of the results |
def extract(fileobj, keywords, comment_tags, options):
"""Extracts translation messages from underscore template files.
This method does also extract django templates. If a template does not
contain any django translation tags we always fallback to underscore extraction.
This is a plugin to Babel, wri... | Extracts translation messages from underscore template files.
This method does also extract django templates. If a template does not
contain any django translation tags we always fallback to underscore extraction.
This is a plugin to Babel, written according to
http://babel.pocoo.org/docs/messages/#wr... |
def speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2,
st_win=0.05, lda_dim=35, plot_res=False):
'''
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in the recording (<=0 for unknown)
... | ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mt_size (opt) mid-term window size
- mt_step (opt) mid-term window step
- st_win (opt) short-term window size
... |
def encode(self, X, seed=None):
"""Given a string ``X``, returns ``unrank(X[:n]) || X[n:]`` where ``n``
is the the maximum number of bytes that can be unranked w.r.t. the
capacity of the input ``dfa`` and ``unrank`` is w.r.t. to the input
``dfa``.
"""
if not X:
... | Given a string ``X``, returns ``unrank(X[:n]) || X[n:]`` where ``n``
is the the maximum number of bytes that can be unranked w.r.t. the
capacity of the input ``dfa`` and ``unrank`` is w.r.t. to the input
``dfa``. |
def log_tensor_stats(self, tensor, name):
"""Add distribution statistics on a tensor's elements to the current History entry
"""
# TODO Handle the case of duplicate names.
if (isinstance(tensor, tuple) or isinstance(tensor, list)):
while (isinstance(tensor, tuple) or isinsta... | Add distribution statistics on a tensor's elements to the current History entry |
def get(self, language: str=None, default: str=None) -> str:
"""Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the ... | Gets the underlying value in the specified or
primary language.
Arguments:
language:
The language to get the value in.
Returns:
The value in the current language, or
the primary language in case no language
was specified. |
def _to_s(val, sep=", "):
"""Convert any to string.
:param val: An object
:param sep: separator between values
>>> _to_s([1, 2, 3])
'1, 2, 3'
>>> _to_s("aaa")
'aaa'
"""
if anyconfig.utils.is_iterable(val):
return sep.join(str(x) for x in val)
return str(val) | Convert any to string.
:param val: An object
:param sep: separator between values
>>> _to_s([1, 2, 3])
'1, 2, 3'
>>> _to_s("aaa")
'aaa' |
def reset_statistics(stat, frequencies, reset_cumulative, recalculate=False):
"""
Resets the specified statistic's data (deletes it) for the given
frequency/ies.
"""
stats = ensure_list(stat)
frequencies = ensure_list(frequencies)
for s in stats:
for f in frequencies:
i... | Resets the specified statistic's data (deletes it) for the given
frequency/ies. |
def GetPageInfo(self):
"""Returns page information
What is the page range available, and what is the selected page range.
"""
return self.first_tab, self.last_tab, self.first_tab, self.last_tab | Returns page information
What is the page range available, and what is the selected page range. |
def reset(self):
"""Reset the clustering to the original clustering.
All changes are lost.
"""
self._undo_stack.clear()
self._spike_clusters = self._spike_clusters_base
self._new_cluster_id = self._new_cluster_id_0 | Reset the clustering to the original clustering.
All changes are lost. |
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : a... | Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (option... |
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from ... | Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false |
def copy_unit_spike_features(self, sorting, unit_ids=None):
'''Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied... | Copy unit spike features from another sorting extractor to the current
sorting extractor.
Parameters
----------
sorting: SortingExtractor
The sorting extractor from which the spike features will be copied
unit_ids: (array_like, int)
The list (or single va... |
def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False):
"""
Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
--------... | Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to... |
def save_info(self, dirn):
'''
Save information about the distribution in its dist_dir.
'''
with current_directory(dirn):
info('Saving distribution info')
with open('dist_info.json', 'w') as fileh:
json.dump({'dist_name': self.ctx.dist_name,
... | Save information about the distribution in its dist_dir. |
def direction(self):
"""
Get image direction
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getDirection%s'%self._libsuffix)
return libfn(self.pointer) | Get image direction
Returns
-------
tuple |
def amplitude_by_welch(self, data_frame):
"""
This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust
alternative to using fft_signal & amplitude
:param data_frame: the data frame
:type data_frame: pandas.DataF... | This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust
alternative to using fft_signal & amplitude
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return: the ampl
:rtype ampl: float
... |
def X(self) -> Optional[Union[np.ndarray, sparse.spmatrix, ArrayView]]:
"""Data matrix of shape :attr:`n_obs` × :attr:`n_vars`."""
if self.isbacked:
if not self.file.isopen: self.file.open()
X = self.file['X']
if self.isview:
X = X[self._oidx, self._vi... | Data matrix of shape :attr:`n_obs` × :attr:`n_vars`. |
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash.
"""
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if not m:
break
lang = m.group('lang')
linenums = bool(m.group('linenums'))... | Match and store Fenced Code Blocks in the HtmlStash. |
def get_unique_filename(filename, new_filename=None, new_extension=None):
"""
Génère un nouveau nom pour un fichier en gardant son extension
Soit le nouveau nom est généré à partir de la date
(heures+minutes+secondes+microsecondes) soit un nouveau nom est spécifié et on
l'utilise tel quel.
... | Génère un nouveau nom pour un fichier en gardant son extension
Soit le nouveau nom est généré à partir de la date
(heures+minutes+secondes+microsecondes) soit un nouveau nom est spécifié et on
l'utilise tel quel.
:type filename: string
:param filename: Nom du fichier original
:t... |
def progressbar(stream, prefix='Loading: ', width=0.5, **options):
""" Generator filter to print a progress bar. """
size = len(stream)
if not size:
return stream
if 'width' not in options:
if width <= 1:
width = round(shutil.get_terminal_size()[0] * width)
options['w... | Generator filter to print a progress bar. |
def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be remo... | Set a key/value pair in the vault service |
def calcsize(values, sizerange=(2,70), inds=None, plaw=3):
""" Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
pl... | Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values |
def get_adaptive_threshold(threshold_method, image, threshold,
mask = None,
adaptive_window_size = 10,
**kwargs):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the thres... | Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks. |
def _refreshNodeFromTarget(self):
""" Updates the config settings
"""
for key, value in self.viewBox.state.items():
if key != "limits":
childItem = self.childByNodeName(key)
childItem.data = value
else:
# limits contains a d... | Updates the config settings |
def write(self, *args, **kwargs):
"""
:param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value
"""
if args:
kwargs =... | :param args: tuple(value, style), tuple(value, style)
:param kwargs: header=tuple(value, style), header=tuple(value, style)
:param args: value, value
:param kwargs: header=value, header=value |
def inspect(self):
"""
Fetches information about the container from the client.
"""
policy = self.policy
config_id = self.config_id
if self.config_id.config_type == ItemType.VOLUME:
if self.container_map.use_attached_parent_name:
container_name... | Fetches information about the container from the client. |
def top_x_bleu(query_dic, mark, x=1):
"""
Calculate the top x average bleu value predictions ranking by item, x default is set above
:param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key
:param mark:string, which indicates which method is evaluat... | Calculate the top x average bleu value predictions ranking by item, x default is set above
:param query_dic: dict, key is qid, value is (item, bleu) tuple list, which will be ranked by 'item' as key
:param mark:string, which indicates which method is evaluated, also used as output file name here.
:param ... |
def tt_avg(self, print_output=True, output_file="tt.csv"):
"""
Compute average term-topic matrix, and print to file if
print_output=True.
"""
avg = self.tt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | Compute average term-topic matrix, and print to file if
print_output=True. |
def is_done(self):
"""True if the last two moves were Pass or if the position is at a move
greater than the max depth."""
return self.position.is_game_over() or self.position.n >= FLAGS.max_game_length | True if the last two moves were Pass or if the position is at a move
greater than the max depth. |
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, t... | Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags |
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(DeleteImageView, self).dispatch(request, *args, **kwargs) | Adds useful objects to the class. |
def all_terminated():
"""For each remote shell determine if its terminated"""
instances_found = False
for i in all_instances():
instances_found = True
if i.state not in (remote_dispatcher.STATE_TERMINATED,
remote_dispatcher.STATE_DEAD):
return False
... | For each remote shell determine if its terminated |
def list(self):
"""List collection items."""
if self.is_fake:
return
for item in self.collection.list():
yield item.uid + self.content_suffix | List collection items. |
def stream(self, page, limit=None, page_limit=None):
"""
Generates records one a time from a page, stopping at prescribed limits.
:param Page page: The page to stream.
:param int limit: The max number of records to read.
:param int page_imit: The max number of pages to read.
... | Generates records one a time from a page, stopping at prescribed limits.
:param Page page: The page to stream.
:param int limit: The max number of records to read.
:param int page_imit: The max number of pages to read. |
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
for req in args:
requirement_set.add_requirement(
InstallRequirement.from_l... | Marshal cmd line args into a requirement set. |
def fill_superseqs(data, samples):
"""
Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations.
"""
## load super to get edges
io5 = h5py.File(data.clust_database, 'r+')
superseqs = io5["seqs"]
splits = io5["splits"]
... | Fills the superseqs array with seq data from cat.clust
and fill the edges array with information about paired split locations. |
def lsp_server_ready(self, language, configuration):
"""Notify all stackeditors about LSP server availability."""
for editorstack in self.editorstacks:
editorstack.notify_server_ready(language, configuration) | Notify all stackeditors about LSP server availability. |
def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to... | Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to wh... |
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
... | Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL). |
def _validate(self, writing=False):
"""Verify that the box obeys the specifications."""
if ((len(self.bits_per_component) != len(self.signed)) or
(len(self.signed) != self.palette.shape[1])):
msg = ("The length of the 'bits_per_component' and the 'signed' "
... | Verify that the box obeys the specifications. |
def keys(self, remote=False):
"""
Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
... | Returns the database names for this client. Default is
to return only the locally cached database names, specify
``remote=True`` to make a remote request to include all databases.
:param bool remote: Dictates whether the list of locally cached
database names are returned or a remote... |
def quick_report(report_type, change, options):
"""
writes a change report via report_type to options.output or
sys.stdout
"""
report = report_type(None, options)
if options.output:
with open(options.output, "w") as out:
report.run(change, None, out)
else:
repor... | writes a change report via report_type to options.output or
sys.stdout |
def _trj_check_version(self, version, python, force):
"""Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted.
"""
curr_python = py... | Checks for version mismatch
Raises a VersionMismatchError if version of loaded trajectory and current pypet version
do not match. In case of `force=True` error is not raised only a warning is emitted. |
def add_debug(parser):
"""Add a `debug` flag to the _parser_."""
parser.add_argument(
'-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') | Add a `debug` flag to the _parser_. |
def XXX_REMOVEME(func):
"""Decorator for dead code removal
"""
@wraps(func)
def decorator(self, *args, **kwargs):
msg = "~~~~~~~ XXX REMOVEME marked method called: {}.{}".format(
self.__class__.__name__, func.func_name)
raise RuntimeError(msg)
return func(self, *args,... | Decorator for dead code removal |
def graph(self, as_dot=False):
"""Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
... | Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
associated with the resolve. |
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
s... | Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...) |
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else... | Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name |
def electric_field_amplitude_intensity(s0,Omega=1.0e6):
'''This function returns the value of E0 (the amplitude of the electric field)
at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the
saturation intensity of the D2 line of Rubidium for linearly polarized light.'''
e0=hbar*Omega/(e*a0) #T... | This function returns the value of E0 (the amplitude of the electric field)
at a given saturation parameter s0=I/I0, where I0=2.50399 mW/cm^2 is the
saturation intensity of the D2 line of Rubidium for linearly polarized light. |
def download(cls, filename, input_dir, dl_dir=None):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
"""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.bucket,
... | Provide potentially streaming download from S3 using gof3r
or the AWS CLI. |
def import_module(dotted_path):
"""
Imports the specified module based on the
dot notated import path for the module.
"""
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:-1])
module = importlib.import_module(module_path)
return getattr(mo... | Imports the specified module based on the
dot notated import path for the module. |
def has_value(key, value=None):
'''
Determine whether the key exists in the current salt process
environment dictionary. Optionally compare the current value
of the environment against the supplied value string.
key
Must be a string. Used as key for environment lookup.
value:
O... | Determine whether the key exists in the current salt process
environment dictionary. Optionally compare the current value
of the environment against the supplied value string.
key
Must be a string. Used as key for environment lookup.
value:
Optional. If key exists in the environment, c... |
def get_remembered_position(self):
"""Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True)."""
if not self._able_to_format:
return self._original_position
... | Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True). |
def getGolangPackages(self):
"""Get a list of all golang packages for all available branches
"""
packages = {}
# get all packages
url = "%s/packages" % self.base_url
params = {"pattern": "golang-*", "limit": 200}
response = requests.get(url, params=params)
if response.status_code != requests.codes.ok:... | Get a list of all golang packages for all available branches |
def catch(do, my_exception=TypeError, hints='', do_raise=None, prt_tb=True):
"""
防止程序出现 exception后异常退出,
但是这里的异常捕获机制仅仅是为了防止程序退出, 无法做相应处理
可以支持有参数或者无参数模式
- ``do == True`` , 则启用捕获异常
- 无参数也启用 try-catch
.. code:: python
@catch
def fnc():
pass
- 在有... | 防止程序出现 exception后异常退出,
但是这里的异常捕获机制仅仅是为了防止程序退出, 无法做相应处理
可以支持有参数或者无参数模式
- ``do == True`` , 则启用捕获异常
- 无参数也启用 try-catch
.. code:: python
@catch
def fnc():
pass
- 在有可能出错的函数前添加, 不要在最外层添加,
- 这个catch 会捕获从该函数开始的所有异常, 会隐藏下一级函数调用的错误.
- 但是如果在内层的函数也有捕获... |
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfv... | Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory... |
def _check_lib(self, remake, compiler, debug, profile):
"""Makes sure that the linked library with the original code exists. If it doesn't
the library is compiled from scratch.
"""
from os import path
if self.link is None or not path.isfile(self.link):
self.makelib(re... | Makes sure that the linked library with the original code exists. If it doesn't
the library is compiled from scratch. |
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Houston
Astros'.
"""
if self.winner == HOME:
return self._home_name.text()
return self._away_name.text() | Returns a ``string`` of the winning team's name, such as 'Houston
Astros'. |
def run(self, inputRecord):
"""
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
... | Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:... |
def shard_stores(self, index=None, params=None):
"""
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard... | Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.c... |
def save_template(self, name, unlocked=False):
"""Save a message template for later use with `Load template`.
If saved template is marked as unlocked, then changes can be made to it
afterwards. By default tempaltes are locked.
Examples:
| Save Template | MyMessage |
| S... | Save a message template for later use with `Load template`.
If saved template is marked as unlocked, then changes can be made to it
afterwards. By default tempaltes are locked.
Examples:
| Save Template | MyMessage |
| Save Template | MyOtherMessage | unlocked=True | |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
# Probably name should be removed altogether until its usage is decided, see
# https://github.com/LEMS/LEMS/issues/4
# '''(' name = "{0}"'.format(self.name) if self.name else '') +\'''
return '... | Exports this object into a LEMS XML object |
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument ... | Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent comm... |
def like(self):
""" Like a clip.
"""
r = requests.post(
"https://kippt.com/api/clips/%s/likes" % (self.id),
headers=self.kippt.header
)
return (r.json()) | Like a clip. |
def _run(self, *args, **kwargs):
"""Runs RPC server.
Wait for peer to connect and start rpc session with it.
For every connection we start and new rpc session.
"""
apgw_rpc_bind_ip = _validate_rpc_ip(kwargs.pop(NC_RPC_BIND_IP))
apgw_rpc_bind_port = _validate_rpc_port(kwa... | Runs RPC server.
Wait for peer to connect and start rpc session with it.
For every connection we start and new rpc session. |
def declare_type_info(config):
"""Lookup type info from app configuration."""
settings = config.registry.settings
settings['_type_info'] = []
for line in settings['exports-allowable-types'].splitlines():
if not line.strip():
continue
type_name, type_info = line.strip().split(... | Lookup type info from app configuration. |
def add2node(self, othereplus, node):
"""add the node here with the node from othereplus
this will potentially have duplicates"""
node = node.upper()
self.dt[node.upper()] = self.dt[node.upper()] + \
othereplus.dt[node.upper()] | add the node here with the node from othereplus
this will potentially have duplicates |
def __WaitForVolume(volume, desired_state):
""" Blocks until EBS volume is in desired state. """
print 'Waiting for volume %s to be %s...' % (volume.id, desired_state)
while True:
volume.update()
sys.stdout.write('.')
sys.stdout.flush()
#print 'status is: %s' % volume.status
if volume.status =... | Blocks until EBS volume is in desired state. |
def create_user(self, username, email, password, active=False,
send_email=True):
"""
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email... | A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.