code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float o... | Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped... |
def display_files(self, pcs_files):
'''重新格式化一下文件列表, 去除不需要的信息
这一操作主要是为了便于接下来的查找工作.
文件的path都被提取出来, 然后放到了一个list中.
'''
tree_iters = []
for pcs_file in pcs_files:
path = pcs_file['path']
pixbuf, type_ = self.app.mime.get(path, pcs_file['isdir'],
... | 重新格式化一下文件列表, 去除不需要的信息
这一操作主要是为了便于接下来的查找工作.
文件的path都被提取出来, 然后放到了一个list中. |
def from_tuples(cls, tups):
"""
Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data.
"""
ivs = [Interval(*t) for t in tups]
return IntervalTree(ivs) | Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data. |
def local_lru(obj):
""" Property that maps to a key in a local dict-like attribute.
self._cache must be an OrderedDict
self._cache_size must be defined as LRU size
..
class Foo(object):
def __init__(self, cache_size=5000):
self._cache = OrderedDict()
... | Property that maps to a key in a local dict-like attribute.
self._cache must be an OrderedDict
self._cache_size must be defined as LRU size
..
class Foo(object):
def __init__(self, cache_size=5000):
self._cache = OrderedDict()
self._cache_size... |
def _deallocator(self):
"""Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable.
"""
lookup = {
"c_bool": "logical",
"c_double": "double",
... | Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable. |
def get_querystring(self):
"""
Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page')
"""
to_remove = self.get_querystring_parameter_to_remove()
query_string = urlparse(self.request.get_full_path()).query... | Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page') |
def run(self, deploy_attempted=False):
'''
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
... | Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode) |
def get_fresh_primary_tumors(biospecimen):
"""Filter biospecimen data to only keep non-FFPE primary tumor samples.
Parameters
----------
biospecimen : `pandas.DataFrame`
The biospecimen data frame. This type of data frame is returned by
:meth:`get_biospecimen_data`.
Returns... | Filter biospecimen data to only keep non-FFPE primary tumor samples.
Parameters
----------
biospecimen : `pandas.DataFrame`
The biospecimen data frame. This type of data frame is returned by
:meth:`get_biospecimen_data`.
Returns
-------
`pandas.DataFrame`
The fi... |
def do_loop_turn(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
"""Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
... | Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites... |
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
... | Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered. |
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_secondary_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
ou... | Auto Generated Code |
def load_extracted(src_dir: str,
patterns="*.npy",
vars_in_cols: bool = True,
index: pd.Series = None):
"""Load data extracted and stored by :py:func:`extract`
Arguments:
src_dir {str} -- The directory where the data is stored.
Keyword Argum... | Load data extracted and stored by :py:func:`extract`
Arguments:
src_dir {str} -- The directory where the data is stored.
Keyword Arguments:
patterns {str, or list of str} -- A pattern (str) or list of patterns (list)
to identify the variables to be loaded.
The default l... |
def dad_status_output_dad_status_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(out... | Auto Generated Code |
def get_space(self, space_key, expand='description.plain,homepage'):
"""
Get information about a space through space key
:param space_key: The unique space key name
:param expand: OPTIONAL: additional info from description, homepage
:return: Returns the space along with its ID
... | Get information about a space through space key
:param space_key: The unique space key name
:param expand: OPTIONAL: additional info from description, homepage
:return: Returns the space along with its ID |
def client_id(self, client):
"""
Get a client's ID. Uses GET to /clients?name=<client> interface.
:Args:
* *client*: (str) Client's name
:Returns: (str) Client id
"""
params = {
"name": client
}
response = self._get(url.clients... | Get a client's ID. Uses GET to /clients?name=<client> interface.
:Args:
* *client*: (str) Client's name
:Returns: (str) Client id |
def df2chucks(din,chunksize,outd,fn,return_fmt='\t',force=False):
"""
:param return_fmt: '\t': tab-sep file, lly, '.', 'list': returns a list
"""
from os.path import exists#,splitext,dirname,splitext,basename,realpath
from os import makedirs
din.index=range(0,len(din),1)
chunkrange=list(np.... | :param return_fmt: '\t': tab-sep file, lly, '.', 'list': returns a list |
def load(cosmicFiles, tag=None, sat_id=None):
"""
cosmic data load routine, called by pysat
"""
import netCDF4
num = len(cosmicFiles)
# make sure there are files to read
if num != 0:
# call separate load_files routine, segemented for possible
# multiprocessor load, not in... | cosmic data load routine, called by pysat |
def split_task_parameters(line):
""" Split a string of comma separated words."""
if line is None:
result = []
else:
result = [parameter.strip() for parameter in line.split(",")]
return result | Split a string of comma separated words. |
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):
"""Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
... | Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned. |
def _desy_bookkeeping(self, key, value):
"""Populate the ``_desy_bookkeeping`` key."""
return {
'date': normalize_date(value.get('d')),
'expert': force_single_element(value.get('a')),
'status': value.get('s'),
} | Populate the ``_desy_bookkeeping`` key. |
def safe_join(directory, *pathnames):
"""Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory.
"""
parts = [directory]
... | Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory. |
def get_transform(offset, scale):
'''
Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix re... | Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
... |
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seco... | Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object. |
def expand_item(self, item, open_all=True):
"""Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded.
"""
self.expand_row(self._view_path_for(item), open_all) | Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded. |
def addChromosome(
self, chrom, tax_id, tax_label=None, build_id=None, build_label=None):
"""
if it's just the chromosome, add it as an instance of a SO:chromosome,
and add it to the genome. If a build is included,
punn the chromosome as a subclass of SO:chromsome, and make t... | if it's just the chromosome, add it as an instance of a SO:chromosome,
and add it to the genome. If a build is included,
punn the chromosome as a subclass of SO:chromsome, and make the
build-specific chromosome an instance of the supplied chr.
The chr then becomes part of the build or ge... |
def gen_round_trip_stats(round_trips):
"""Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A di... | Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A dictionary where each value is a pandas DataFram... |
def build_rrule(count=None, interval=None, bysecond=None, byminute=None,
byhour=None, byweekno=None, bymonthday=None, byyearday=None,
bymonth=None, until=None, bysetpos=None, wkst=None, byday=None,
freq=None):
"""
Build rrule dictionary for vRecur class.
:par... | Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
... |
def connect(self, recver):
"""
Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2
"""
r1 = recver
m1 = r1.middle
s2 = self
m2 = self.middle
r2 = self.other
r2.middle = m1
del m2.sender
del ... | Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2 |
def raw_datastream_old(request, pid, dsid, type=None, repo=None,
headers=None, accept_range_request=False,
as_of_date=None, streaming=False):
'''
.. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:... | .. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that b... |
def get_gradient_y(shape, py):
"""Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.s... | Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros. |
def loadSenderKey(self, senderKeyName):
"""
:type senderKeyName: SenderKeyName
"""
q = "SELECT record FROM sender_keys WHERE group_id = ? and sender_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (senderKeyName.getGroupId(), senderKeyName.getSender().getName()))
... | :type senderKeyName: SenderKeyName |
def p_for_stmt(p):
"""
for_stmt : FOR ident EQ expr SEMI stmt_list END_STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt_list END_STMT
| FOR matrix EQ expr SEMI stmt_list END_STMT
"""
if len(p) == 8:
if not isinstance(p[2], node.ident):
raise_exception(Syntax... | for_stmt : FOR ident EQ expr SEMI stmt_list END_STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt_list END_STMT
| FOR matrix EQ expr SEMI stmt_list END_STMT |
def mp2q(p, q):
"""Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire
"""
p, q = flatten(p), flatten(q)
entropy_dist = 1 / len(p)
return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q))) | Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire |
def _generate_subscribe_headers(self):
"""
generate the subscribe stub headers based on the supplied config
:return: i
"""
headers =[]
headers.append(('predix-zone-id', self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
... | generate the subscribe stub headers based on the supplied config
:return: i |
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, *... | Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method... |
def do_run_one(self, args):
'''run a single job'''
work_spec_names = args.from_work_spec or None
worker = SingleWorker(self.config, task_master=self.task_master, work_spec_names=work_spec_names, max_jobs=args.max_jobs)
worker.register()
rc = False
starttime = time.time()
... | run a single job |
def format_rst(self):
"""
return table in RST format
"""
res = ''
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for c in self.header:
... | return table in RST format |
def preserve_set_th1_add_directory(state=True):
"""
Context manager to temporarily set TH1.AddDirectory() state
"""
with LOCK:
status = ROOT.TH1.AddDirectoryStatus()
try:
ROOT.TH1.AddDirectory(state)
yield
finally:
ROOT.TH1.AddDirectory(status) | Context manager to temporarily set TH1.AddDirectory() state |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remaining_life(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get... | Auto Generated Code |
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check aga... | Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all(... |
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = i... | removes volatile from the type definition
If type is not volatile type, it will be returned as is |
def data_received(self, data):
"""Handle data received."""
self.tokenizer.feed(data)
while self.tokenizer.has_tokens():
raw = self.tokenizer.get_next_token()
frame = frame_from_raw(raw)
if frame is not None:
self.frame_received_cb(frame) | Handle data received. |
def target(self):
"""
Find the target name for this build.
:returns: deferred that when fired returns the build task's target
name. If we could not determine the build task, or the task's
target, return None.
"""
task = yield self.task()
... | Find the target name for this build.
:returns: deferred that when fired returns the build task's target
name. If we could not determine the build task, or the task's
target, return None. |
def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'... | HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav) |
def flexifunction_directory_send(self, target_system, target_component, directory_type, start_index, count, directory_data, force_mavlink1=False):
'''
Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
... | Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
directory_type : 0=inputs, 1=outputs (uint8_t)
start_index : index of first... |
def colorize(self, colormap):
"""Colorize the current image using
*colormap*. Works only on"L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.channels[1]
... | Colorize the current image using
*colormap*. Works only on"L" or "LA" images. |
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is re... | Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` f... |
def combine_metadata(*metadata_objects, **kwargs):
"""Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of dateti... | Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of datetime objects will be averaged. This is to handle cases where... |
def enable(self):
"""Return True|False if the AMP is enabled in the configuration file (enable=true|false)."""
ret = self.get('enable')
if ret is None:
return False
else:
return ret.lower().startswith('true') | Return True|False if the AMP is enabled in the configuration file (enable=true|false). |
def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
"""Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
------... | Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
... |
def _checkstatus(status, line):
"""Returns state/status after reading the next line.
The status codes are::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF - BEGIN parsing; 1 - ENTER METADATA GROUP, 2 - READ METADATA LINE,
3 - END METDADATA GROUP, 4 - END PARSING
Permitted Transitions::... | Returns state/status after reading the next line.
The status codes are::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF - BEGIN parsing; 1 - ENTER METADATA GROUP, 2 - READ METADATA LINE,
3 - END METDADATA GROUP, 4 - END PARSING
Permitted Transitions::
LE07_clip_L1TP_039027_20150529... |
def get_bucket(self, hash_name, bucket_key):
"""
Returns bucket content as list of tuples (vector, data).
"""
results = []
for row in self._get_bucket_rows(hash_name, bucket_key):
val_dict = pickle.loads(row)
# Depending on type (sparse or not) reconstruct... | Returns bucket content as list of tuples (vector, data). |
def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr... | Add model grid attributes to a dataset |
def straddle(self, strike, expiry):
"""
Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics ... | Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle. |
def stream(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as eff... | Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent... |
def create_app(debug=False):
"""
Create the flask app
:param debug: Use debug mode
:type debug: bool
:return: Created app
:rtype: flask.Flask
"""
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
app.json_encoder = DateTimeEncoder
app.register_blueprint(page)
Bowe... | Create the flask app
:param debug: Use debug mode
:type debug: bool
:return: Created app
:rtype: flask.Flask |
def make_url(path, protocol=None, hosts=None):
"""Make an URL given a path, and optionally, a protocol and set of
hosts to select from randomly.
:param path: The Archive.org path.
:type path: str
:param protocol: (optional) The HTTP protocol to use. "https://" is
used by defau... | Make an URL given a path, and optionally, a protocol and set of
hosts to select from randomly.
:param path: The Archive.org path.
:type path: str
:param protocol: (optional) The HTTP protocol to use. "https://" is
used by default.
:type protocol: str
:param hosts: (option... |
def get_users_for_assigned_to():
""" Return a list of users who can be assigned to workflow states """
User = get_user_model()
return User.objects.filter(is_active=True, is_staff=True) | Return a list of users who can be assigned to workflow states |
def get_by_name(self, name):
"""
Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager.
"""
san_managers = self._client.get_all()
result = [x for x in san_managers if x['name'] == name]
return ... | Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager. |
def get_currency(self, code):
"""
Helper function
Returns a dict containing:
shortname (the code)
longname
users - a comma separated list of countries/regions/cities that use it
alternatives - alternative names, e.g. ewro, Quid, Buck
symbol - e.g. £, $
... | Helper function
Returns a dict containing:
shortname (the code)
longname
users - a comma separated list of countries/regions/cities that use it
alternatives - alternative names, e.g. ewro, Quid, Buck
symbol - e.g. £, $
highlight - ? |
def _replace_with_specific_page(page, menu_item):
"""
If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object.
"""
if type(page) is Page:
... | If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object. |
def get_attributes(self, dataset):
"""Get available attritbutes from dataset you've selected"""
attributes = self.attributes(dataset)
attr_ = [ (k, v[0]) for k, v in attributes.items()]
return pd.DataFrame(attr_, columns=["Attribute","Description"]) | Get available attritbutes from dataset you've selected |
def _insert_html(self, cursor, html):
""" Inserts HTML using the specified cursor in such a way that future
formatting is unaffected.
"""
cursor.beginEditBlock()
cursor.insertHtml(html)
# After inserting HTML, the text document "remembers" it's in "html
# mod... | Inserts HTML using the specified cursor in such a way that future
formatting is unaffected. |
def on_startup(self, callback: callable, polling=True, webhook=True):
"""
Register a callback for the startup process
:param callback:
:param polling: use with polling
:param webhook: use with webhook
"""
self._check_frozen()
if not webhook and not pollin... | Register a callback for the startup process
:param callback:
:param polling: use with polling
:param webhook: use with webhook |
def price_unit(self):
"""Return the price unit."""
currency = self.currency
consumption_unit = self.consumption_unit
if not currency or not consumption_unit:
_LOGGER.error("Could not find price_unit.")
return " "
return currency + "/" + consumption_unit | Return the price unit. |
def sample(self, bqm, num_reads=10):
"""Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, defaul... | Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, default=10):
Number of reads.
... |
def plot_welch_perdiogram(x, fs, nperseg):
'''Plot Welch perdiogram
Args
----
x: ndarray
Signal array
fs: float
Sampling frequency
nperseg: float
Length of each data segment in PSD
'''
import scipy.signal
import numpy
# Generate a test signal, a 2 Vrms s... | Plot Welch perdiogram
Args
----
x: ndarray
Signal array
fs: float
Sampling frequency
nperseg: float
Length of each data segment in PSD |
def _account_table(accounts):
"""
creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`)
... | creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`) |
def fetch_url(url):
"""
Fetch the given url, strip formfeeds and decode
it into the defined encoding
"""
with closing(urllib.urlopen(url)) as f:
if f.code is 200:
response = f.read()
return strip_formfeeds(response).decode(ENCODING) | Fetch the given url, strip formfeeds and decode
it into the defined encoding |
def _readClusterSettings(self):
"""
Reads the cluster settings from the instance metadata, which assumes the instance
is the leader.
"""
instanceMetaData = get_instance_metadata()
region = zoneToRegion(self._zone)
conn = boto.ec2.connect_to_region(region)
... | Reads the cluster settings from the instance metadata, which assumes the instance
is the leader. |
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds) | exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3 |
def kube_pod_status_phase(self, metric, scraper_config):
""" Phase a pod is in. """
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod ... | Phase a pod is in. |
def _extract_html_hex(string):
"""Get the first 3 or 6 hex digits in the string"""
try:
hex_string = string and _hex_regexp().search(string).group(0) or ''
except AttributeError:
return None
if len(hex_string) == 3:
hex_string = hex_string[0] * 2 + hex_string[1] * 2 + hex_string[... | Get the first 3 or 6 hex digits in the string |
def is_prime(n, mr_rounds=25):
"""Test whether n is probably prime
See <https://en.wikipedia.org/wiki/Primality_test#Probabilistic_tests>
Arguments:
n (int): the number to be tested
mr_rounds (int, optional): number of Miller-Rabin iterations to run;
defaults to 25 iterations, ... | Test whether n is probably prime
See <https://en.wikipedia.org/wiki/Primality_test#Probabilistic_tests>
Arguments:
n (int): the number to be tested
mr_rounds (int, optional): number of Miller-Rabin iterations to run;
defaults to 25 iterations, which is what the GMP library uses
... |
def cancel_signature_request(self, signature_request_id):
''' Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
... | Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature r... |
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarra... | Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to th... |
def _gdcm_to_numpy(self, image):
""" Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage()
"""
gdcm_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy... | Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage() |
def reset(self):
"Initialises all needed variables to default values"
self.metadata = {}
self.items = []
self.spine = []
self.guide = []
self.pages = []
self.toc = []
self.bindings = []
self.IDENTIFIER_ID = 'id'
self.FOLDER_NAME = 'EPUB'
... | Initialises all needed variables to default values |
def ParseFromString(self, text, message):
"""Parses a text representation of a protocol message into a message."""
if not isinstance(text, str):
text = text.decode('utf-8')
return self.ParseLines(text.split('\n'), message) | Parses a text representation of a protocol message into a message. |
def update(self, x, w=1):
"""
Update the t-digest with value x and weight w.
"""
self.n += w
if len(self) == 0:
self._add_centroid(Centroid(x, w))
return
S = self._find_closest_centroids(x)
while len(S) != 0 and w > 0:
j = c... | Update the t-digest with value x and weight w. |
def parseValue(self, value):
"""Parse the given value and return result."""
if self.isVector():
return list(map(self._pythonType, value.split(',')))
if self.typ == 'boolean':
return _parseBool(value)
return self._pythonType(value) | Parse the given value and return result. |
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
... | Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned. |
def key(self, key, strictkey=None):
"""
Return a chunk referencing a key in a mapping with the name 'key'.
"""
return self._select(self._pointer.key(key, strictkey)) | Return a chunk referencing a key in a mapping with the name 'key'. |
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_... | :param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions. |
def insert_many(self, rows, chunk_size=1000, ensure=None, types=None):
"""Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:m... | Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.... |
def route(self, fn, **kwargs):
""" Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions
:param fn: Function to run the route with
:type fn: function
:param kwargs: Parsed url arguments
:type kwargs: dict
:return: HTTP Response wi... | Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions
:param fn: Function to run the route with
:type fn: function
:param kwargs: Parsed url arguments
:type kwargs: dict
:return: HTTP Response with rendered template
:rtype: flask.... |
def _formatFilepaths(self):
"""
Join dirnames and filenames from config.
"""
likedir=self['output']['likedir']
self.likefile = join(likedir,self['output']['likefile'])
self.mergefile = join(likedir,self['output']['mergefile'])
self.roifile = join(likedir,self['... | Join dirnames and filenames from config. |
def platform_detect():
"""Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN."""
# Handle Raspberry Pi
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
# Handle Beaglebone Black
# TO... | Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN. |
def leave_scope(self):
""" Ends a function body and pops current scope out of the symbol table.
"""
def entry_size(entry):
""" For local variables and params, returns the real variable or
local array size in bytes
"""
if entry.scope == SCOPE.global... | Ends a function body and pops current scope out of the symbol table. |
def buscar_healthchecks(self, id_ambiente_vip):
"""Search healthcheck by environmentvip_id
:return: Dictionary with the following structure:
::
{'healthcheck_opt': [{'name': <name>, 'id': <id>},...]}
:raise InvalidParameterError: Environment VIP identifier is null and in... | Search healthcheck by environmentvip_id
:return: Dictionary with the following structure:
::
{'healthcheck_opt': [{'name': <name>, 'id': <id>},...]}
:raise InvalidParameterError: Environment VIP identifier is null and invalid.
:raise EnvironmentVipNotFoundError: Environm... |
def update_mode(arg_namespace):
"""Check command line arguments and run update function."""
try:
updater.update(custom_sources=arg_namespace.custom)
except (PermissionError, FileNotFoundError) as exception:
if isinstance(exception, PermissionError):
print('No write permission for... | Check command line arguments and run update function. |
def to_netjson(self, remove_block=True):
"""
Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``)
"""
result = OrderedDict()
# copy list
intermediate_data = list(self.intermediate_data[self.... | Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``) |
def update(self, num_iid, session, **kwargs):
'''taobao.item.update 更新商品信息
根据传入的num_iid更新对应的商品的数据 传入的num_iid所对应的商品必须属于当前会话的用户 商品的属性和sku的属性有包含的关系,商品的价格要位于sku的价格区间之中(例如,sku价格有5元、10元两种,那么商品的价格就需要大于等于5元,小于等于10元,否则更新商品会失败) 商品的类目和商品的价格、sku的价格都有一定的相关性(具体的关系要通过类目属性查询接口获得) 当关键属性值更新为“其他”的时候,需要输入input_pid... | taobao.item.update 更新商品信息
根据传入的num_iid更新对应的商品的数据 传入的num_iid所对应的商品必须属于当前会话的用户 商品的属性和sku的属性有包含的关系,商品的价格要位于sku的价格区间之中(例如,sku价格有5元、10元两种,那么商品的价格就需要大于等于5元,小于等于10元,否则更新商品会失败) 商品的类目和商品的价格、sku的价格都有一定的相关性(具体的关系要通过类目属性查询接口获得) 当关键属性值更新为“其他”的时候,需要输入input_pids和input_str商品才能更新成功。 |
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
i... | Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object. |
def GetBalance(self, asset_id, watch_only=0):
"""
Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only... | Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only (bool): True, to limit to watch only wallets.
Returns:
... |
def _build(self, src, path, dest, mtime):
"""Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method i... | Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime. |
def pkg_blacklist(self):
"""Manage blacklist packages
"""
blacklist = BlackList()
options = [
"-b",
"--blacklist"
]
flag = [
"--add",
"--remove"
]
command = ["list"]
if (len(self.args) == 2 and self.a... | Manage blacklist packages |
def get_ntlm_response(self, flags, challenge, target_info=None, channel_binding=None):
"""
Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key.
If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure wi... | Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key.
If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned
:param challenge: The 8-byte challenge message generated by the server
:retu... |
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces) | Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast. |
def find_one(self, cls, id):
"""Required functionality."""
try:
db_result = self.get_class_table(cls).lookup(id)
except ItemNotFound:
# according to docs, this shouldn't be required, but it IS
db_result = None
if not db_result:
return None... | Required functionality. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.