code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def polygonize(layer):
"""Polygonize a raster layer into a vector layer using GDAL.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to reproject.
:type layer: QgsRasterLayer
:return: Reprojected memory layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"... | Polygonize a raster layer into a vector layer using GDAL.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to reproject.
:type layer: QgsRasterLayer
:return: Reprojected memory layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0 |
def _clamp_value(value, minimum, maximum):
"""
Clamp a value to fit between a minimum and a maximum.
* If ``value`` is between ``minimum`` and ``maximum``, return ``value``
* If ``value`` is below ``minimum``, return ``minimum``
* If ``value is above ``maximum``, return ``maximum``
Args:
... | Clamp a value to fit between a minimum and a maximum.
* If ``value`` is between ``minimum`` and ``maximum``, return ``value``
* If ``value`` is below ``minimum``, return ``minimum``
* If ``value is above ``maximum``, return ``maximum``
Args:
value (float or int): The number to clamp
mi... |
def task_collection_thread_handler(self, results_queue):
"""Main method for worker to run
Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added.
:param collections.deque results_queue: Queue for worker to output results to
"""
# Add ... | Main method for worker to run
Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added.
:param collections.deque results_queue: Queue for worker to output results to |
def read_shakemap(self, haz_sitecol, assetcol):
"""
Enabled only if there is a shakemap_id parameter in the job.ini.
Download, unzip, parse USGS shakemap files and build a corresponding
set of GMFs which are then filtered with the hazard site collection
and stored in the datastor... | Enabled only if there is a shakemap_id parameter in the job.ini.
Download, unzip, parse USGS shakemap files and build a corresponding
set of GMFs which are then filtered with the hazard site collection
and stored in the datastore. |
def convert_out(self, obj):
"""Write EMIRUUID header on reduction"""
newobj = super(ProcessedImageProduct, self).convert_out(obj)
if newobj:
hdulist = newobj.open()
hdr = hdulist[0].header
if 'EMIRUUID' not in hdr:
hdr['EMIRUUID'] = str(uuid.uu... | Write EMIRUUID header on reduction |
def delete(self):
"""
Delete the link and free the resources
"""
if not self._created:
return
try:
node1 = self._nodes[0]["node"]
adapter_number1 = self._nodes[0]["adapter_number"]
port_number1 = self._nodes[0]["port_number"]
... | Delete the link and free the resources |
def _action_allowed(self, action):
"""
participation actions can be disabled on layer level, or disabled on a per node basis
"""
if getattr(self.layer.participation_settings, '{0}_allowed'.format(action)) is False:
return False
else:
return getattr(self.participation_settings, '{0}_a... | participation actions can be disabled on layer level, or disabled on a per node basis |
def _expr2bddnode(expr):
"""Convert an expression into a BDD node."""
if expr.is_zero():
return BDDNODEZERO
elif expr.is_one():
return BDDNODEONE
else:
top = expr.top
# Register this variable
_ = bddvar(top.names, top.indices)
root = top.uniqid
l... | Convert an expression into a BDD node. |
def get_select_sql(self):
"""
Calculate the difference between this record's value and the lag/lead record's value
"""
return '(({0}) - ({1}({2}){3}))'.format(
self.field.get_select_sql(),
self.name.upper(),
self.get_field_identifier(),
sel... | Calculate the difference between this record's value and the lag/lead record's value |
def tiered_alignment(in_bam, tier_num, multi_mappers, extra_args,
genome_build, pair_stats,
work_dir, dirs, config):
"""Perform the alignment of non-mapped reads from previous tier.
"""
nomap_fq1, nomap_fq2 = select_unaligned_read_pairs(in_bam, "tier{}".format(tier_... | Perform the alignment of non-mapped reads from previous tier. |
def put(self, storagemodel:object, modeldefinition = None) -> StorageQueueModel:
""" insert queue message into storage """
try:
message = modeldefinition['queueservice'].put_message(storagemodel._queuename, storagemodel.getmessage())
storagemodel.mergemessage(message)
ex... | insert queue message into storage |
def filter_accept_reftrack(self, reftrack):
"""Return True, if the filter accepts the given reftrack
:param reftrack: the reftrack to filter
:type reftrack: :class:`jukeboxcore.reftrack.Reftrack`
:returns: True, if the filter accepts the reftrack
:rtype: :class:`bool`
:r... | Return True, if the filter accepts the given reftrack
:param reftrack: the reftrack to filter
:type reftrack: :class:`jukeboxcore.reftrack.Reftrack`
:returns: True, if the filter accepts the reftrack
:rtype: :class:`bool`
:raises: None |
def p_for_sentence_start(p):
""" for_start : FOR ID EQ expr TO expr step
"""
gl.LOOPS.append(('FOR', p[2]))
p[0] = None
if p[4] is None or p[6] is None or p[7] is None:
return
if is_number(p[4], p[6], p[7]):
if p[4].value != p[6].value and p[7].value == 0:
warning(p... | for_start : FOR ID EQ expr TO expr step |
def get_name_type_dict(self):
""" Returns a dictionary of the type
{'column_name': data_type, ...}
:return: dict
"""
attrs = self.get_attributes()
types = self.get_types()
d = dict()
for i,a in enumerate(attrs):
d[a] = types[i]
return... | Returns a dictionary of the type
{'column_name': data_type, ...}
:return: dict |
def _make_txn_selector(self):
"""Helper for :meth:`read`."""
if self._transaction_id is not None:
return TransactionSelector(id=self._transaction_id)
if self._read_timestamp:
key = "read_timestamp"
value = _datetime_to_pb_timestamp(self._read_timestamp)
... | Helper for :meth:`read`. |
async def remove(self, *instances, using_db=None) -> None:
"""
Removes one or more of ``instances`` from the relation.
"""
db = using_db if using_db else self.model._meta.db
if not instances:
raise OperationalError("remove() called on no instances")
through_ta... | Removes one or more of ``instances`` from the relation. |
def list_engines_by_priority(engines=None):
"""
Return a list of engines supported sorted by each priority.
"""
if engines is None:
engines = ENGINES
return sorted(engines, key=operator.methodcaller("priority")) | Return a list of engines supported sorted by each priority. |
def platform_cache_dir():
"""
Returns a directory which should be writable for any application
This should be used for temporary deletable data.
"""
if WIN32: # nocover
dpath_ = '~/AppData/Local'
elif LINUX: # nocover
dpath_ = '~/.cache'
elif DARWIN: # nocover
dpat... | Returns a directory which should be writable for any application
This should be used for temporary deletable data. |
def _spinboxValueChanged(self, index, spinBox=None):
""" Is called when a spin box value was changed.
Updates the spin boxes and sets other combo boxes having the same index to
the fake dimension of length 1.
"""
if spinBox is None:
spinBox = self.sender()
... | Is called when a spin box value was changed.
Updates the spin boxes and sets other combo boxes having the same index to
the fake dimension of length 1. |
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print... | Brun and Yoshida's metal creep rupture data. |
def consume_token(self, tokens, index, tokens_len):
"""Consume a token.
Returns tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
"""
del tokens_len
consumption_ended = False
q_type = self.quote_type
... | Consume a token.
Returns tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together. |
def train(self):
"""Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None
"""
self.stamp_start = time.time()
for iteration, batch in tqdm.tqdm(enumerate(self.iter_train),
... | Train the network using the training dataset.
Parameters
----------
None
Returns
-------
None |
def evalrepr(self):
"""Evaluable repr"""
if self.is_model():
return self.get_fullname()
else:
return self.parent.evalrepr + "." + self.name | Evaluable repr |
def _put_key(file_path, dest_key=None, overwrite=True):
"""
Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully
"""
ret = api("PO... | Upload given file into DKV and save it under give key as raw object.
:param dest_key: name of destination key in DKV
:param file_path: path to file to upload
:return: key name if object was uploaded successfully |
def copy(self):
"""
Safely get a copy of the current mesh.
Copied objects will have emptied caches to avoid memory
issues and so may be slow on initial operations until
caches are regenerated.
Current object will *not* have its cache cleared.
Returns
--... | Safely get a copy of the current mesh.
Copied objects will have emptied caches to avoid memory
issues and so may be slow on initial operations until
caches are regenerated.
Current object will *not* have its cache cleared.
Returns
---------
copied : trimesh.Tri... |
def update(self):
"""Update |C2| based on :math:`c_2 = 1.-c_1-c_3`.
Examples:
The following examples show the calculated value of |C2| are
clipped when to low or to high:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> der... | Update |C2| based on :math:`c_2 = 1.-c_1-c_3`.
Examples:
The following examples show the calculated value of |C2| are
clipped when to low or to high:
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> derived.c1 = 0.6
>>>... |
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
im... | Generate information for a bug report. |
def alias_composition(self, composition_id, alias_id):
"""Adds an ``Id`` to a ``Composition`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Composition`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer t... | Adds an ``Id`` to a ``Composition`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Composition`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another composition, it is reassigned
to the given compo... |
def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a condition into this
object.
'''
self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence'))
c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent')
if c.length != 1:
raise Inva... | Parse an xml.dom Node object representing a condition into this
object. |
def average(numbers, numtype='float'):
"""
Calculates the average or mean of a list of numbers
Args:
numbers: a list of integers or floating point numbers.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
The average (mean) of the numbers as a floa... | Calculates the average or mean of a list of numbers
Args:
numbers: a list of integers or floating point numbers.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
The average (mean) of the numbers as a floating point number
or a Decimal object.
... |
def find(self, name):
"""Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for
"""
collectors = self.get_collectors()
for collector in collectors:
if name.lower() == collector['name'].lower():
s... | Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for |
def run_command(self, codeobj):
"""Execute a compiled code object, and write the output back to the client."""
try:
value, stdout = yield from self.attempt_exec(codeobj, self.namespace)
except Exception:
yield from self.send_exception()
return
else:
... | Execute a compiled code object, and write the output back to the client. |
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias fo... | Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then bo... |
def play_sync(self):
"""
Play the video and block whilst the video is playing
"""
self.play()
logger.info("Playing synchronously")
try:
time.sleep(0.05)
logger.debug("Wait for playing to start")
while self.is_playing():
... | Play the video and block whilst the video is playing |
def err_exit(msg, rc=1):
"""Print msg to stderr and exit with rc.
"""
print(msg, file=sys.stderr)
sys.exit(rc) | Print msg to stderr and exit with rc. |
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayo... | dayofweek == 0 means Sunday, whichweek 5 means last instance |
def most_visited_venues_card(num=10):
"""
Displays a card showing the Venues that have the most Events.
In spectator_core tags, rather than spectator_events so it can still be
used on core pages, even if spectator_events isn't installed.
"""
if spectator_apps.is_enabled('events'):
obje... | Displays a card showing the Venues that have the most Events.
In spectator_core tags, rather than spectator_events so it can still be
used on core pages, even if spectator_events isn't installed. |
def items(self) -> Iterable[Tuple[str, Any]]:
"""An iterable of (name, value) pairs.
.. versionadded:: 3.1
"""
return [(opt.name, opt.value()) for name, opt in self._options.items()] | An iterable of (name, value) pairs.
.. versionadded:: 3.1 |
def restore(self, state):
"""Restore a previous state of this stream walker.
Raises:
ArgumentError: If the state refers to a different selector or the
offset is invalid.
"""
selector = DataStreamSelector.FromString(state.get(u'selector'))
if selector... | Restore a previous state of this stream walker.
Raises:
ArgumentError: If the state refers to a different selector or the
offset is invalid. |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NumaCollector, self).get_default_config()
config.update({
'path': 'numa',
'bin': self.find_binary('numactl'),
})
return config | Returns the default collector settings |
def _set_mode(self, discover_mode, connect_mode):
"""Set the mode of the BLED112, used to enable and disable advertising
To enable advertising, use 4, 2.
To disable advertising use 0, 0.
Args:
discover_mode (int): The discoverability mode, 0 for off, 4 for on (user data)
... | Set the mode of the BLED112, used to enable and disable advertising
To enable advertising, use 4, 2.
To disable advertising use 0, 0.
Args:
discover_mode (int): The discoverability mode, 0 for off, 4 for on (user data)
connect_mode (int): The connectability mode, 0 for ... |
def iter_package_families(paths=None):
"""Iterate over package families, in no particular order.
Note that multiple package families with the same name can be returned.
Unlike packages, families later in the searchpath are not hidden by earlier
families.
Args:
paths (list of str, optional)... | Iterate over package families, in no particular order.
Note that multiple package families with the same name can be returned.
Unlike packages, families later in the searchpath are not hidden by earlier
families.
Args:
paths (list of str, optional): paths to search for package families,
... |
def close(self):
'''close the Mission Editor window'''
self.time_to_quit = True
self.close_window.release()
if self.child.is_alive():
self.child.join(1)
self.child.terminate()
self.mavlink_message_queue_handler.join()
self.event_queue_lock.acquire()... | close the Mission Editor window |
def send_command(self, *args, **kwargs):
"""
Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined aut... | Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined automatically.
command_string = command to execute
... |
def build_listen(self, listen_node):
"""parse `listen` sections, and return a config.Listen
Args:
listen_node (TreeNode): Description
Returns:
config.Listen: an object
"""
proxy_name = listen_node.listen_header.proxy_name.text
service_address_nod... | parse `listen` sections, and return a config.Listen
Args:
listen_node (TreeNode): Description
Returns:
config.Listen: an object |
def _get_bgp_route_attr(self, destination, vrf, next_hop, ip_version=4):
"""
BGP protocol attributes for get_route_tp
Only IPv4 supported
"""
CMD_SHIBNV = 'show ip bgp neighbors vrf {vrf} | include "is {neigh}"'
search_re_dict = {
"aspath": {
... | BGP protocol attributes for get_route_tp
Only IPv4 supported |
def register(linter):
"""required method to auto register this checker """
linter.register_checker(TypeChecker(linter))
linter.register_checker(IterableChecker(linter)) | required method to auto register this checker |
def modify(
login, password=None, password_hashed=False,
domain=None, profile=None, script=None,
drive=None, homedir=None, fullname=None,
account_desc=None, account_control=None,
machine_sid=None, user_sid=None,
reset_login_hours=False, reset_bad_password_count=False,
):
'''
Modify user ... | Modify user account
login : string
login name
password : string
password
password_hashed : boolean
set if password is a nt hash instead of plain text
domain : string
users domain
profile : string
profile path
script : string
logon script
drive... |
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.sli... | Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info. |
def Hide(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call native `ShowWindow(SW.Hide)`.
waitTime: float
Return bool, True if succeed otherwise False.
"""
return self.ShowWindow(SW.Hide, waitTime) | Call native `ShowWindow(SW.Hide)`.
waitTime: float
Return bool, True if succeed otherwise False. |
def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None):
"""Delete datapoints from this stream between the provided start and end times
If neither a start or end time is specified, all data points in the stream
will be deleted.
:param start_dt: The datetime after which da... | Delete datapoints from this stream between the provided start and end times
If neither a start or end time is specified, all data points in the stream
will be deleted.
:param start_dt: The datetime after which data points should be deleted or None
if all data points from the beginn... |
def _update_doc(self, func_doc):
"""更新文档信息,把原来的文档信息进行合并格式化,
即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc"""
deprecated_doc = "Deprecated"
if self.tip_info:
deprecated_doc = "{}: {}".format(deprecated_doc, self.tip_info)
if func_doc:
func_doc = ... | 更新文档信息,把原来的文档信息进行合并格式化,
即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc |
def push(cpu, value, size):
"""
Writes a value in the stack.
:param value: the value to put in the stack.
:param size: the size of the value.
"""
assert size in (8, 16, cpu.address_bit_size)
cpu.STACK = cpu.STACK - size // 8
base, _, _ = cpu.get_descripto... | Writes a value in the stack.
:param value: the value to put in the stack.
:param size: the size of the value. |
def savvyize(self, input_string, recursive=False, stemma=False):
'''
Determines which files should be processed
NB: this is the PUBLIC method
@returns filenames_list
'''
input_string = os.path.abspath(input_string)
tasks = []
restricted = [ symbol for sym... | Determines which files should be processed
NB: this is the PUBLIC method
@returns filenames_list |
def _get_connection(self):
"""Make SSH connection to the IOS XE device.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also interfaces (except management) are typically
... | Make SSH connection to the IOS XE device.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also interfaces (except management) are typically
disabled by default when it is booted... |
def _wrap(value):
"""
Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to ... | Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to wrap
:return: wrapped or n... |
def requeue(self, message_id, timeout=0, backoff=True):
"""Re-queue a message (indicate failure to process)."""
self.send(nsq.requeue(message_id, timeout))
self.finish_inflight()
self.on_requeue.send(
self,
message_id=message_id,
timeout=timeout,
... | Re-queue a message (indicate failure to process). |
def start_greedy_ensemble_search(automated_run, session, path):
"""Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble... | Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performanc... |
def stopMessage(self, apiMsgId):
"""
See parent method for documentation
"""
content = self.parseRest(self.request('rest/message/' + apiMsgId, {}, {}, 'DELETE'))
return {
'id': content['apiMessageId'].encode('utf-8'),
'status': content['messageStatus'].e... | See parent method for documentation |
def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None,
status_gone='deleted', region=None, key=None, keyid=None, profile=None,
**args):
'''
Delete a generic Elasticache resource.
'''
try:
wait = int(wait)
except Exception:
... | Delete a generic Elasticache resource. |
def asset_create_task(self, *args, **kwargs):
"""Create a new task
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
task = self.create_task(element=self.cur_asset)
if task:
taskdata = djitemdata.TaskItemD... | Create a new task
:returns: None
:rtype: None
:raises: None |
def _refresh_outlineexplorer(self, index=None, update=True, clear=False):
"""Refresh outline explorer panel"""
oe = self.outlineexplorer
if oe is None:
return
if index is None:
index = self.get_stack_index()
if self.data:
finfo = self.d... | Refresh outline explorer panel |
def map_port(protocol, public_port, private_port, lifetime=3600,
gateway_ip=None, retry=9, use_exception=True):
"""A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
... | A function to map public_port to private_port of protocol.
Returns the complete response on success.
protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP
public_port - the public port of the mapping requested
private_port - the private port of the mapping request... |
def customize_compiler_for_nvcc(compiler, nvcc_settings):
"""inject deep into distutils to customize gcc/nvcc dispatch """
# tell the compiler it can process .cu files
compiler.src_extensions.append('.cu')
# save references to the default compiler_so and _compile methods
default_compiler_so = comp... | inject deep into distutils to customize gcc/nvcc dispatch |
def jinja_loader(self):
"""Search templates in custom app templates dir (default Flask
behaviour), fallback on abilian templates."""
loaders = self._jinja_loaders
del self._jinja_loaders
loaders.append(Flask.jinja_loader.func(self))
loaders.reverse()
return jinja2... | Search templates in custom app templates dir (default Flask
behaviour), fallback on abilian templates. |
def get_ppis(self, ppi_df):
"""Generate Complex Statements from the HPRD PPI data.
Parameters
----------
ppi_df : pandas.DataFrame
DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt
file.
"""
logger.info('Processing PPIs...')
... | Generate Complex Statements from the HPRD PPI data.
Parameters
----------
ppi_df : pandas.DataFrame
DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt
file. |
def document_frequencies(self, hashes):
'''Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number... | Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
... |
def create_account(self, body, **kwargs): # noqa: E501
"""Create a new account. # noqa: E501
An endpoint for creating a new account. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts -d '{\"display_name\": \"MyAccount1\", \"admin_name\": \"accountAdmin1\", \"email\": ... | Create a new account. # noqa: E501
An endpoint for creating a new account. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts -d '{\"display_name\": \"MyAccount1\", \"admin_name\": \"accountAdmin1\", \"email\": \"example_admin@myaccount.info\"}' -H 'content-type: application/js... |
def TargetDirectory(ID, season, relative=False, **kwargs):
'''
Returns the location of the :py:mod:`everest` data on disk
for a given target.
:param ID: The target ID
:param int season: The target season number
:param bool relative: Relative path? Default :py:obj:`False`
'''
if season... | Returns the location of the :py:mod:`everest` data on disk
for a given target.
:param ID: The target ID
:param int season: The target season number
:param bool relative: Relative path? Default :py:obj:`False` |
def section_tortuosity(section):
'''Tortuosity of a section
The tortuosity is defined as the ratio of the path length of a section
and the euclidian distnce between its end points.
The path length is the sum of distances between consecutive points.
If the section contains less than 2 points, the ... | Tortuosity of a section
The tortuosity is defined as the ratio of the path length of a section
and the euclidian distnce between its end points.
The path length is the sum of distances between consecutive points.
If the section contains less than 2 points, the value 1 is returned. |
def prepare_url(hostname, path, params=None):
"""
Prepare Elasticsearch request url.
:param hostname: host name
:param path: request path
:param params: optional url params
:return:
"""
url = hostname + path
if params:
url = url + '?' ... | Prepare Elasticsearch request url.
:param hostname: host name
:param path: request path
:param params: optional url params
:return: |
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
del data[0]
if len(data) == 4:
s = data[3]
s = s.split('+', 1)
if len(s) == 2:
... | Parse an RFC822, RFC1123, RFC2822, or asctime-style date |
def ack(self, device_uuid, ack_keys):
"""
Acknowledge received data
Send acknowledgement keys to let know the Sync service which data you have.
As you fetch new data, you need to send acknowledgement keys.
:calls: ``post /sync/ack``
:param string device_uuid: Device's U... | Acknowledge received data
Send acknowledgement keys to let know the Sync service which data you have.
As you fetch new data, you need to send acknowledgement keys.
:calls: ``post /sync/ack``
:param string device_uuid: Device's UUID for which to perform synchronization.
:param l... |
def do_symbols_matching(self):
"""
Performs symbols matching.
"""
self._clear_decorations()
current_block = self.editor.textCursor().block()
data = get_block_symbol_data(self.editor, current_block)
pos = self.editor.textCursor().block().position()
for symb... | Performs symbols matching. |
def add_contacts(
self,
contacts: List["pyrogram.InputPhoneContact"]
):
"""Use this method to add contacts to your Telegram address book.
Args:
contacts (List of :obj:`InputPhoneContact <pyrogram.InputPhoneContact>`):
The contact list to be added
... | Use this method to add contacts to your Telegram address book.
Args:
contacts (List of :obj:`InputPhoneContact <pyrogram.InputPhoneContact>`):
The contact list to be added
Returns:
On success, the added contacts are returned.
Raises:
:class:... |
def make_signature(name, params, common_params, common_param_values):
"""
Create a signature for a geom or stat
Gets the DEFAULT_PARAMS (params) and creates are comma
separated list of the `name=value` pairs. The common_params
come first in the list, and they get take their values from
either t... | Create a signature for a geom or stat
Gets the DEFAULT_PARAMS (params) and creates are comma
separated list of the `name=value` pairs. The common_params
come first in the list, and they get take their values from
either the params-dict or the common_geom_param_values-dict. |
def copy_files(filelist, destdir):
"""Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory.
"""
for filename in filelist:
destfile = os.path.join(destdir, filename)
# filename should not be absolute, but let's do... | Copy a list of files to destdir, preserving directory structure.
File names should be relative to the current working directory. |
def ts_func(f):
"""
This wraps a function that would normally only accept an array
and allows it to operate on a DataFrame. Useful for applying
numpy functions to DataFrames.
"""
def wrap_func(df, *args):
# TODO: should vectorize to apply over all columns?
return Chromatogram(f(d... | This wraps a function that would normally only accept an array
and allows it to operate on a DataFrame. Useful for applying
numpy functions to DataFrames. |
def __voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
... | Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
... |
def create_shield_layer(shield, hashcode):
"""Creates the layer for shields."""
return pgnreader.parse_pagan_file(('%s%spgn%s' % (PACKAGE_DIR, os.sep, os.sep)) + shield + '.pgn', hashcode, sym=False, invert=False) | Creates the layer for shields. |
def push_dcp(event, callback, position='right'):
"""Push a callable for :class:`~flask_pluginkit.PluginManager`, :func:`push_dcp`.
Example usage::
push_dcp('demo', lambda:'Hello dcp')
.. versionadded:: 2.1.0
"""
ctx = stack.top
ctx.app.extensions.get('pluginkit').push_dcp(event, callb... | Push a callable for :class:`~flask_pluginkit.PluginManager`, :func:`push_dcp`.
Example usage::
push_dcp('demo', lambda:'Hello dcp')
.. versionadded:: 2.1.0 |
def make_index(gff_file):
"""
Make a sqlite database for fast retrieval of features.
"""
import gffutils
db_file = gff_file + ".db"
if need_update(gff_file, db_file):
if op.exists(db_file):
os.remove(db_file)
logging.debug("Indexing `{0}`".format(gff_file))
g... | Make a sqlite database for fast retrieval of features. |
def add_section(self, name=None, anchor=None, description='', comment='', helptext='', plot='', content='', autoformat=True, autoformat_type='markdown'):
""" Add a section to the module report output """
# Default anchor
if anchor is None:
if name is not None:
nid = ... | Add a section to the module report output |
def monthly(usaf, year, field='GHI (W/m^2)'):
"""monthly insolation"""
m = []
lastm = 1
usafdata = Data(usaf, year)
t = 0
for r in usafdata:
r['GHI (W/m^2)'] = r['Glo Mod (Wh/m^2)']
r['DHI (W/m^2)'] = r['Dif Mod (Wh/m^2)']
r['DNI (W/m^2)'] = r['Dir Mod (Wh/m^2)']
... | monthly insolation |
def parse_napp(napp_id):
"""Convert a napp_id in tuple with username, napp name and version.
Args:
napp_id: String with the form 'username/napp[:version]' (version is
optional). If no version is found, it will be None.
Returns:
tuple: A tuple with (username, napp, version... | Convert a napp_id in tuple with username, napp name and version.
Args:
napp_id: String with the form 'username/napp[:version]' (version is
optional). If no version is found, it will be None.
Returns:
tuple: A tuple with (username, napp, version)
Raises:
KytosExce... |
def constraints(self):
"""
:rtype tuple
:return: All constraints represented by this and parent sets.
"""
if self._parent is not None:
return tuple(self._constraints) + self._parent.constraints
return tuple(self._constraints) | :rtype tuple
:return: All constraints represented by this and parent sets. |
def intersect_arc(self, arc):
'''
Given an arc, finds the intersection point(s) of this arc with that.
Returns a list of 2x1 numpy arrays. The list has length 0, 1 or 2, depending on how many intesection points there are.
Points are ordered along the arc.
Intersection with the ar... | Given an arc, finds the intersection point(s) of this arc with that.
Returns a list of 2x1 numpy arrays. The list has length 0, 1 or 2, depending on how many intesection points there are.
Points are ordered along the arc.
Intersection with the arc along the same circle (which means infinitely ma... |
def convert_multiPointSource(self, node):
"""
Convert the given node into a MultiPointSource object.
:param node: a node with tag multiPointGeometry
:returns: a :class:`openquake.hazardlib.source.MultiPointSource`
"""
geom = node.multiPointGeometry
lons, lats = z... | Convert the given node into a MultiPointSource object.
:param node: a node with tag multiPointGeometry
:returns: a :class:`openquake.hazardlib.source.MultiPointSource` |
def git_log_iterator(path):
""" yield commits using git log -- <dir> """
N = 10
count = 0
while True:
lines = _run_git_command_lines(['log', '--oneline',
'-n', str(N),
'--skip', str(count),
... | yield commits using git log -- <dir> |
def clear_lock(self, key):
"""
Remove the lock file.
"""
lock_path = self._get_lock_path(key)
os.remove(lock_path) | Remove the lock file. |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._share_detail is not None:
return False
if self._start_date is not None:
return False
if self._end_date is not None:
return False
return True | :rtype: bool |
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, i... | This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.... |
def setdefault (self, key, *args):
"""Set lowercase key value and return."""
assert isinstance(key, basestring)
return dict.setdefault(self, key.lower(), *args) | Set lowercase key value and return. |
def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f | Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1 |
def wgan(cls, data:DataBunch, generator:nn.Module, critic:nn.Module, switcher:Callback=None, clip:float=0.01, **learn_kwargs):
"Create a WGAN from `data`, `generator` and `critic`."
return cls(data, generator, critic, NoopLoss(), WassersteinLoss(), switcher=switcher, clip=clip, **learn_kwargs) | Create a WGAN from `data`, `generator` and `critic`. |
def from_coords(cls, coords, sort=True):
"""
Create a mesh object from a list of 3D coordinates (by sorting them)
:params coords: list of coordinates
:param sort: flag (default True)
:returns: a :class:`Mesh` instance
"""
coords = list(coords)
if sort:
... | Create a mesh object from a list of 3D coordinates (by sorting them)
:params coords: list of coordinates
:param sort: flag (default True)
:returns: a :class:`Mesh` instance |
def change(self, inpt, hashfun=DEFAULT_HASHFUN):
"""Change the avatar by providing a new input.
Uses the standard hash function if no one is given."""
self.img = self.__create_image(inpt, hashfun) | Change the avatar by providing a new input.
Uses the standard hash function if no one is given. |
def join_path(a, *p):
"""Join path tokens together similar to osp.join, but always use
'/' instead of possibly '\' on windows."""
path = a
for b in p:
if len(b) == 0:
continue
if b.startswith('/'):
path += b[1:]
elif path == '' or path.endswith('/'):
... | Join path tokens together similar to osp.join, but always use
'/' instead of possibly '\' on windows. |
def sudo_yield_file_lines(file_path='/etc/NetworkManager/system-connections/*'):
r"""Cat a file iterating/yielding one line at a time,
shell will execute: `sudo cat $file_path` so if your shell doesn't have sudo or cat, no joy
Input:
file_path(str): glob stars are fine
>> for line ... | r"""Cat a file iterating/yielding one line at a time,
shell will execute: `sudo cat $file_path` so if your shell doesn't have sudo or cat, no joy
Input:
file_path(str): glob stars are fine
>> for line in sudo_yield_file_lines('/etc/NetworkManager/system-connections/*') |
def get_symbols_list(self):
'''Return a list of GdxSymb found in the GdxFile.'''
slist = []
rc, nSymb, nElem = gdxcc.gdxSystemInfo(self.gdx_handle)
assert rc, 'Unable to retrieve "%s" info' % self.filename
self.number_symbols = nSymb
self.number_elements = nElem
s... | Return a list of GdxSymb found in the GdxFile. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.