code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _add(self, xer, primary, type):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
if xer not in primary:
primary[xer]... | Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example). |
def _logged_in_successful(data):
"""
Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in.
"""
if re.match(r'^:(te... | Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in. |
async def FinishActions(self, results):
'''
results : typing.Sequence[~ActionExecutionResult]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='FinishActions',
... | results : typing.Sequence[~ActionExecutionResult]
Returns -> typing.Sequence[~ErrorResult] |
def fmap(order, aij, bij, x, y):
"""Evaluate the 2D polynomial transformation.
u = sum[i=0:order]( sum[j=0:j]( a_ij * x**(i - j) * y**j ))
v = sum[i=0:order]( sum[j=0:j]( b_ij * x**(i - j) * y**j ))
Parameters
----------
order : int
Order of the polynomial transformation.
aij : num... | Evaluate the 2D polynomial transformation.
u = sum[i=0:order]( sum[j=0:j]( a_ij * x**(i - j) * y**j ))
v = sum[i=0:order]( sum[j=0:j]( b_ij * x**(i - j) * y**j ))
Parameters
----------
order : int
Order of the polynomial transformation.
aij : numpy array
Polynomial coefficents ... |
def register_magics(store_name='_ampl_cells', ampl_object=None):
"""
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
"""
from IPyth... | Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells. |
def timelimit(timeout):
"""borrowed from web.py"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
... | borrowed from web.py |
def to_df_CSV(self, tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':
"""
Export this SAS Data Set to a Pandas Data Frame via CSV file
:param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up
:par... | Export this SAS Data Set to a Pandas Data Frame via CSV file
:param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after us... |
def getvalue(x):
"""Return the single value of x or raise TypError if more than one value."""
if isrepeating(x):
raise TypeError(
"Ambiguous call to getvalue for %r which has more than one value."
% x)
for value in getvalues(x):
return value | Return the single value of x or raise TypError if more than one value. |
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_first... | Helper function that returns lines with extra information. |
def tweakback(drzfile, input=None, origwcs = None,
newname = None, wcsname = None,
extname='SCI', force=False, verbose=False):
"""
Apply WCS solution recorded in drizzled file to distorted input images
(``_flt.fits`` files) used to create the drizzled file. This task relies... | Apply WCS solution recorded in drizzled file to distorted input images
(``_flt.fits`` files) used to create the drizzled file. This task relies on
the original WCS and updated WCS to be recorded in the drizzled image's
header as the last 2 alternate WCSs.
Parameters
----------
drzfile : str (D... |
def failed_login_limit_reached(self):
""" A boolean method to check for failed login limit being reached"""
login_limit = 10
if self.failed_logins and self.failed_logins >= login_limit:
return True
else:
return False | A boolean method to check for failed login limit being reached |
def addarchive(self, name):
"""
Add (i.e. copy) the contents of another tarball to this one.
:param name: File path to the tar archive.
:type name: unicode | str
"""
with tarfile.open(name, 'r') as st:
for member in st.getmembers():
self.tarfi... | Add (i.e. copy) the contents of another tarball to this one.
:param name: File path to the tar archive.
:type name: unicode | str |
def _filter(request, object_, tags=None, more=False, orderby='created'):
"""Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return... | Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return list, Objects filtered |
def post(self):
"""Register a new model (models)"""
self.set_header("Content-Type", "application/json")
key = uuid.uuid4().hex
metadata = json.loads(self.request.body.decode())
metadata["uuid"] = key
self.database[key] = metadata
result = json.dumps({"uuid": key})... | Register a new model (models) |
def find_embedding(elt, embedding=None):
"""Try to get elt embedding elements.
:param embedding: embedding element. Must have a module.
:return: a list of [module [,class]*] embedding elements which define elt.
:rtype: list
"""
result = [] # result is empty in the worst case
# start to ... | Try to get elt embedding elements.
:param embedding: embedding element. Must have a module.
:return: a list of [module [,class]*] embedding elements which define elt.
:rtype: list |
def _plot_estimate(
cls,
estimate=None,
confidence_intervals=None,
loc=None,
iloc=None,
show_censors=False,
censor_styles=None,
ci_legend=False,
ci_force_lines=False,
ci_alpha=0.25,
ci_show=True,
at_risk_counts=False,
**kwargs
):
"""
Plots a pretty figure of ... | Plots a pretty figure of {0}.{1}
Matplotlib plot arguments can be passed in inside the kwargs, plus
Parameters
-----------
show_censors: bool
place markers at censorship events. Default: False
censor_styles: bool
If show_censors, this dictionary will be passed into the plot call.
... |
def start_container(self):
"""Add a node to the tree that represents the start of a container.
Until end_container is called, any nodes added through add_scalar_value
or start_container will be children of this new node.
"""
self.__container_lengths.append(self.current_container... | Add a node to the tree that represents the start of a container.
Until end_container is called, any nodes added through add_scalar_value
or start_container will be children of this new node. |
def complete_task_from_id(self, task_id):
"""
Runs the task with the given id.
:type task_id: integer
:param task_id: The id of the Task object.
"""
if task_id is None:
raise WorkflowException(self.spec, 'task_id is None')
for task in self.task_tree:... | Runs the task with the given id.
:type task_id: integer
:param task_id: The id of the Task object. |
def get_previous_next_published(self, date):
"""
Returns a dict of the next and previous date periods
with published entries.
"""
previous_next = getattr(self, 'previous_next', None)
if previous_next is None:
date_year = datetime(date.year, 1, 1)
... | Returns a dict of the next and previous date periods
with published entries. |
def initialize(self):
'''
Initialize the handler before requests are called
'''
if not hasattr(self.application, 'event_listener'):
log.debug('init a listener')
self.application.event_listener = EventListener(
self.application.mod_opts,
... | Initialize the handler before requests are called |
def change_quantiles(x, ql, qh, isabs, f_agg):
"""
First fixes a corridor given by the quantiles ql and qh of the distribution of x.
Then calculates the average, absolute value of consecutive changes of the series x inside this corridor.
Think about selecting a corridor on the
y-Axis and only calcu... | First fixes a corridor given by the quantiles ql and qh of the distribution of x.
Then calculates the average, absolute value of consecutive changes of the series x inside this corridor.
Think about selecting a corridor on the
y-Axis and only calculating the mean of the absolute change of the time series i... |
def revokeSystemPermission(self, login, user, perm):
"""
Parameters:
- login
- user
- perm
"""
self.send_revokeSystemPermission(login, user, perm)
self.recv_revokeSystemPermission() | Parameters:
- login
- user
- perm |
def T6(word, rules):
'''If a VVV-sequence contains a long vowel, insert a syllable boundary
between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te].'''
offset = 0
try:
WORD, rest = tuple(word.split('.', 1))
for vvv in long_vowel_sequences(rest):
... | If a VVV-sequence contains a long vowel, insert a syllable boundary
between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te]. |
def sky_fraction(self):
"""
Sky fraction covered by the MOC
"""
pix_id = self._best_res_pixels()
nb_pix_filled = pix_id.size
return nb_pix_filled / float(3 << (2*(self.max_order + 1))) | Sky fraction covered by the MOC |
def models(self):
'''
generator to return the tuple of model and its schema to create on aws.
'''
model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if not model:
break
... | generator to return the tuple of model and its schema to create on aws. |
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.string_value = None
new_lexer.input(stri... | Maps a string to an iterator over tokens. In other words: [char] -> [token] |
def fit_points_in_bounding_box_params(df_points, bounding_box,
padding_fraction=0):
'''
Return offset and scale factor to scale ``x``, ``y`` columns of
:data:`df_points` to fill :data:`bounding_box` while maintaining aspect
ratio.
Arguments
---------
df... | Return offset and scale factor to scale ``x``, ``y`` columns of
:data:`df_points` to fill :data:`bounding_box` while maintaining aspect
ratio.
Arguments
---------
df_points : pandas.DataFrame
A frame with at least the columns ``x`` and ``y``, containing one row
per point.
boundi... |
def organize(dirs, config, run_info_yaml, sample_names=None, is_cwl=False,
integrations=None):
"""Organize run information from a passed YAML file or the Galaxy API.
Creates the high level structure used for subsequent processing.
sample_names is a list of samples to include from the overall ... | Organize run information from a passed YAML file or the Galaxy API.
Creates the high level structure used for subsequent processing.
sample_names is a list of samples to include from the overall file, for cases
where we are running multiple pipelines from the same configuration file. |
def normalize_full_name_true(decl):
"""
Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name
"""
if decl.cache.normalized_full_name_true is None:
decl.cache.normalized_full_name_true = normalize(
d... | Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name |
def _set_default_resource_names(self):
"""
Generate names for resources based on the running_instance_id.
"""
self.ip_config_name = ''.join([
self.running_instance_id, '-ip-config'
])
self.nic_name = ''.join([self.running_instance_id, '-nic'])
self.pub... | Generate names for resources based on the running_instance_id. |
def _match_filenames_w_dfs(filenames, lo_dfs):
"""
Match a list of filenames to their data frame counterparts. Return data frames
:param list filenames: Filenames of data frames to retrieve
:param dict lo_dfs: All data frames
:return dict: Filenames and data frames (filtered)
"""
logger_data... | Match a list of filenames to their data frame counterparts. Return data frames
:param list filenames: Filenames of data frames to retrieve
:param dict lo_dfs: All data frames
:return dict: Filenames and data frames (filtered) |
def click(self):
"""
click extension
Returns:
ClickExtension
"""
if self._click_extension is None:
from .click_ext import ClickExtension
self._click_extension = ClickExtension(
config=self
)
return self._cli... | click extension
Returns:
ClickExtension |
def register(self, resource, endpoint):
'''
This methods registers a resource with the router and connects all receivers to their respective signals
:param resource:
The resource class to register
:type resource:
A subclass of ``Resource`` class
:param en... | This methods registers a resource with the router and connects all receivers to their respective signals
:param resource:
The resource class to register
:type resource:
A subclass of ``Resource`` class
:param endpoint:
the name of the resource's endpoint as i... |
def maybe_show_asm(showasm, tokens):
"""
Show the asm based on the showasm flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param showasm: Flag which determines whether the ingested code is
written to sys.stdout or not. (It is also to pass... | Show the asm based on the showasm flag (or file object), writing to the
appropriate stream depending on the type of the flag.
:param showasm: Flag which determines whether the ingested code is
written to sys.stdout or not. (It is also to pass a file
like object, into whi... |
def _cutoff(self, coeffs, vscale):
"""
Compute cutoff index after which the coefficients are deemed negligible.
"""
bnd = self._threshold(vscale)
inds = np.nonzero(abs(coeffs) >= bnd)
if len(inds[0]):
N = inds[0][-1]
else:
N = 0
re... | Compute cutoff index after which the coefficients are deemed negligible. |
def size_as_bytes(size_, prefix):
"""
>>> size_as_bytes(7.5, 'T')
8246337208320
"""
prefix = prefix.upper()
assert prefix in si_prefixes
exponent = si_prefixes.index(prefix) + 1
return int(size_ * (1024.0 ** exponent)) | >>> size_as_bytes(7.5, 'T')
8246337208320 |
def remove_from_group(self, group, user):
"""
Remove a user from a group
:type user: str
:param user: User's email
:type group: str
:param group: Group name
:rtype: dict
:return: an empty dictionary
"""
data = {'group': group, 'user': us... | Remove a user from a group
:type user: str
:param user: User's email
:type group: str
:param group: Group name
:rtype: dict
:return: an empty dictionary |
def QA_util_date_stamp(date):
"""
字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型
:param date: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型float
"""
datestr = str(date)[0:10]
date = time.mktime(time.strptime(datestr, '%Y-%m-%d'))
return date | 字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型
:param date: 字符串str -- 格式必须是 2018-01-01 ,长度10
:return: 类型float |
def installed(name, env=None, saltenv='base', user=None):
"""
Installs a single package, list of packages (comma separated) or packages in a requirements.txt
Checks if the package is already in the environment.
Check ocurres here so is only needed to `conda list` and `pip freeze` once
name
... | Installs a single package, list of packages (comma separated) or packages in a requirements.txt
Checks if the package is already in the environment.
Check ocurres here so is only needed to `conda list` and `pip freeze` once
name
name of the package(s) or path to the requirements.txt
env : None... |
def set_chain_info(self, chain_id, chain_name, num_groups):
"""Set the chain information.
:param chain_id: the asym chain id from mmCIF
:param chain_name: the auth chain id from mmCIF
:param num_groups: the number of groups this chain has
"""
self.chain_id_list.append(cha... | Set the chain information.
:param chain_id: the asym chain id from mmCIF
:param chain_name: the auth chain id from mmCIF
:param num_groups: the number of groups this chain has |
def count_flag_reads(self, file_name, flag, paired_end):
"""
Counts the number of reads with the specified flag.
:param str file_name: name of reads file
:param str flag: sam flag value to be read
:param bool paired_end: This parameter is ignored; samtools automatically correctl... | Counts the number of reads with the specified flag.
:param str file_name: name of reads file
:param str flag: sam flag value to be read
:param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending
on the data in the bamfile. We leave the opt... |
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits):
"""Generate random id numbers."""
return "".join(random.choice(chars) for x in range(size)) | Generate random id numbers. |
def class_get_trait_help(cls, trait, inst=None):
"""Get the help string for a single trait.
If `inst` is given, it's current trait values will be used in place of
the class default.
"""
assert inst is None or isinstance(inst, cls)
lines = []
header = "--%... | Get the help string for a single trait.
If `inst` is given, it's current trait values will be used in place of
the class default. |
def fold_string(input_string, max_width):
"""
Fold a string within a maximum width.
Parameters:
input_string:
The string of data to go into the cell
max_width:
Maximum width of cell. Data is folded into multiple lines to
fit into this width.
Return:
... | Fold a string within a maximum width.
Parameters:
input_string:
The string of data to go into the cell
max_width:
Maximum width of cell. Data is folded into multiple lines to
fit into this width.
Return:
String representing the folded string |
def get_sun_times(dates, lon, lat, time_zone):
"""Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise,... | Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours |
def open_external_editor(filename=None, sql=None):
"""Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
"""
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or '... | Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element. |
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_export... | convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script` |
def _default_arguments(self, obj):
"""Return the list of default arguments of obj if it is callable,
or empty list otherwise."""
if not (inspect.isfunction(obj) or inspect.ismethod(obj)):
# for classes, check for __init__,__new__
if inspect.isclass(obj):
... | Return the list of default arguments of obj if it is callable,
or empty list otherwise. |
def _pairwise_chisq(self):
"""Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray.
Returns a list of square and symmetric matrices of test statistics for the null
hypothesis that each vector along *axis* is equal to each other.
"""
return [
self._chi_squar... | Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray.
Returns a list of square and symmetric matrices of test statistics for the null
hypothesis that each vector along *axis* is equal to each other. |
def get_updated_data(self, old_data: Dict[str, LinkItem]) -> Dict[str, LinkItem]:
"""
Get links who needs to be downloaded by comparing old and the new data.
:param old_data: old data
:type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem]
:return: data which is newer or d... | Get links who needs to be downloaded by comparing old and the new data.
:param old_data: old data
:type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem]
:return: data which is newer or dont exist in the old one
:rtype: Dict[str, ~unidown.plugin.link_item.LinkItem] |
def find_holes(db_module, db, table_name, column_name, _range, filter=None):
"""
FIND HOLES IN A DENSE COLUMN OF INTEGERS
RETURNS A LIST OF {"min"min, "max":max} OBJECTS
"""
if not filter:
filter = {"match_all": {}}
_range = wrap(_range)
params = {
"min": _range.min,
... | FIND HOLES IN A DENSE COLUMN OF INTEGERS
RETURNS A LIST OF {"min"min, "max":max} OBJECTS |
def get_search_results(portal_type=None, uid=None, **kw):
"""Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable
"""
# If we have an UID, return the object immediately
if uid is not None:
logger.info("UID '%s' found, returning the object immediat... | Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable |
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see functio... | Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results). |
def validate_twilio_signature(func=None, backend_name='twilio-backend'):
"""View decorator to validate requests from Twilio per http://www.twilio.com/docs/security."""
def _dec(view_func):
@functools.wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwarg... | View decorator to validate requests from Twilio per http://www.twilio.com/docs/security. |
def import_url(self,caseSensitiveNetworkCollectionKeys=None,\
caseSensitiveNetworkKeys=None,dataTypeList=None,\
DataTypeTargetForNetworkCollection=None,DataTypeTargetForNetworkList=None,\
delimiters=None,delimitersForDataList=None,firstRowAsColumnNames=None,\
KeyColumnForMapping=None,Key... | Similar to Import Table this uses a long list of input parameters to
specify the attributes of the table, the mapping keys, and the destination
table for the input.
:param caseSensitiveNetworkCollectionKeys (string, optional): Determines wh
ether capitalization is considered in matc... |
def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(t... | Given a sentence chunk, extract original texts and labels. |
def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the pos... | Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks. |
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active | Return a list of the selected axes. |
def add_param(self, param_key, param_val):
"""
adds parameters as key value pairs
"""
self.params.append([param_key, param_val])
if param_key == '__success_test':
self.success = param_val | adds parameters as key value pairs |
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
... | Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name... |
def build_plan(description, graph,
targets=None, reverse=False):
"""Builds a plan from a list of steps.
Args:
description (str): an arbitrary string to
describe the plan.
graph (:class:`Graph`): a list of :class:`Graph` to execute.
targets (list): an optional l... | Builds a plan from a list of steps.
Args:
description (str): an arbitrary string to
describe the plan.
graph (:class:`Graph`): a list of :class:`Graph` to execute.
targets (list): an optional list of step names to filter the graph to.
If provided, only these steps, an... |
async def _set_annotations(entity_tag, annotations, connection):
"""Set annotations on the specified entity.
:param annotations map[string]string: the annotations as key/value
pairs.
"""
# TODO: ensure annotations is dict with only string keys
# and values.
log.debug('Updating annotatio... | Set annotations on the specified entity.
:param annotations map[string]string: the annotations as key/value
pairs. |
def dict(self, **kwargs):
"""
Dictionary representation.
"""
return dict(
time = self.timestamp,
serial_number = self.serial_number,
value = self.value,
battery = self.battery,
... | Dictionary representation. |
def get_desc2nts(self, **kws_usr):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj)
# keys_nts: hdrgo_prt section_prt top_n use_sections
kws_nts = {k:v for k, v in kws_usr.items() if k... | Return grouped, sorted namedtuples in either format: flat, sections. |
def cced(self, user, include=None):
"""
Retrieve the tickets this user is cc'd into.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param user: User object or id
"""
re... | Retrieve the tickets this user is cc'd into.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param user: User object or id |
def _NormalizeTime(self, time):
"""Normalize a time to be an int measured in microseconds."""
if isinstance(time, rdfvalue.RDFDatetime):
return time.AsMicrosecondsSinceEpoch()
if isinstance(time, rdfvalue.Duration):
return time.microseconds
return int(time) | Normalize a time to be an int measured in microseconds. |
def parse_config_file(config_path, verb=3):
"""Parse provided json to get configuration
Empty default json:
{
"testfiles": [],
"breakfailed": true,
"onlyfailed": false,
"verb": 3,
"dump": 0,
"crc": true,
"scapy": "scapy",
"preexec": {},
"global_preexec":... | Parse provided json to get configuration
Empty default json:
{
"testfiles": [],
"breakfailed": true,
"onlyfailed": false,
"verb": 3,
"dump": 0,
"crc": true,
"scapy": "scapy",
"preexec": {},
"global_preexec": "",
"outputfile": null,
"local": true,... |
def set_attribute(self, name, value):
""" Default handler for those not explicitly defined """
if value is True:
self.widget.set(name, name)
elif value is False:
del self.widget.attrib[name]
else:
self.widget.set(name, str(value)) | Default handler for those not explicitly defined |
def run_powerflow(self, session, method='onthefly', export_pypsa=False, debug=False):
""" Performs power flow calculation for all MV grids
Args:
session : sqlalchemy.orm.session.Session
Database session
method: str
Specify export method
... | Performs power flow calculation for all MV grids
Args:
session : sqlalchemy.orm.session.Session
Database session
method: str
Specify export method
If method='db' grid data will be exported to database
If me... |
def scroll_event(self, widget, event):
"""
Called when a mouse is turned in the widget (and maybe for
finger scrolling in the trackpad).
Adjust method signature as appropriate for callback.
"""
x, y = event.x, event.y
num_degrees = 0
direction = 0
... | Called when a mouse is turned in the widget (and maybe for
finger scrolling in the trackpad).
Adjust method signature as appropriate for callback. |
def _delete_file(configurator, path):
""" remove file and remove it's directories if empty """
path = os.path.join(configurator.target_directory, path)
os.remove(path)
try:
os.removedirs(os.path.dirname(path))
except OSError:
pass | remove file and remove it's directories if empty |
def statexml2pdb(topology, state, output=None):
"""
Given an OpenMM xml file containing the state of the simulation,
generate a PDB snapshot for easy visualization.
"""
state = Restart.from_xml(state)
system = SystemHandler.load(topology, positions=state.positions)
if output is None:
... | Given an OpenMM xml file containing the state of the simulation,
generate a PDB snapshot for easy visualization. |
def release(self, connection: Connection):
'''Put a connection back in the pool.
Coroutine.
'''
assert not self._closed
key = connection.key
host_pool = self._host_pools[key]
_logger.debug('Check in %s', key)
yield from host_pool.release(connection)
... | Put a connection back in the pool.
Coroutine. |
def define_simulation_graph(batch_env, algo_cls, config):
"""Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via a... | Define the algorithm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes. |
def filter(self, *args, **kwargs):
"""
Works just like the default Manager's :func:`filter` method, but
you can pass an additional keyword argument named ``path`` specifying
the full **path of the folder whose immediate child objects** you
want to retrieve, e.g. ``"path/to/folder... | Works just like the default Manager's :func:`filter` method, but
you can pass an additional keyword argument named ``path`` specifying
the full **path of the folder whose immediate child objects** you
want to retrieve, e.g. ``"path/to/folder"``. |
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = b'GIF89a'
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += b'\x87\x00\x00'
return bb | getheaderAnim(im)
Get animation header. To replace PILs getheader()[0] |
def publish_minions(self):
'''
Publishes minions as a list of dicts.
'''
log.debug('in publish minions')
minions = {}
log.debug('starting loop')
for minion, minion_info in six.iteritems(self.minions):
log.debug(minion)
# log.debug(minion_i... | Publishes minions as a list of dicts. |
def make_tx_signatures(txs_to_sign, privkey_list, pubkey_list):
"""
Loops through txs_to_sign and makes signatures using privkey_list and pubkey_list
Not sure what privkeys and pubkeys to supply?
Use get_input_addresses() to return a list of addresses.
Matching those addresses to keys is up to you ... | Loops through txs_to_sign and makes signatures using privkey_list and pubkey_list
Not sure what privkeys and pubkeys to supply?
Use get_input_addresses() to return a list of addresses.
Matching those addresses to keys is up to you and how you store your private keys.
A future version of this library ma... |
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
... | Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format |
def clean(ctx):
"""Clean Sphinx build products.
Use this command to clean out build products after a failed build, or
in preparation for running a build from a clean state.
This command removes the following directories from the
``pipelines_lsst_io`` directory:
- ``_build`` (the Sphinx build ... | Clean Sphinx build products.
Use this command to clean out build products after a failed build, or
in preparation for running a build from a clean state.
This command removes the following directories from the
``pipelines_lsst_io`` directory:
- ``_build`` (the Sphinx build itself)
- ``modules... |
def directives(entrystream, type=None):
"""
Pull directives out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only directives of the specified type; set to
:code:`None` to retrieve all directives
"""
for directive in entry_type_fil... | Pull directives out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only directives of the specified type; set to
:code:`None` to retrieve all directives |
def as_unicode(s, encoding='utf-8'):
"""Force conversion of given string to unicode type.
Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x .
If the string is already in unicode, then no conversion is done and the same string is returned.
Parameters
----------
s: str or byt... | Force conversion of given string to unicode type.
Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x .
If the string is already in unicode, then no conversion is done and the same string is returned.
Parameters
----------
s: str or bytes (Python3), str or unicode (Python2)
... |
def block_ip(ip_address):
""" given the ip, block it """
if not ip_address:
# no reason to continue when there is no ip
return
if config.DISABLE_IP_LOCKOUT:
# no need to block, we disabled it.
return
key = get_ip_blocked_cache_key(ip_address)
if config.COOLOFF_TIME:
... | given the ip, block it |
def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSecti... | You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree. |
def datetime_utc_to_local(utc):
"""
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
"""
# pylint: disable-msg=C0103
... | An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that. |
def find_max(self, predicate, max_=None):
"""
Return the largest item in or under this node that satisfies
*predicate*.
"""
if predicate(self.value):
max_ = self.value
next_node = self._greater
else:
next_node = self._lesser
if ... | Return the largest item in or under this node that satisfies
*predicate*. |
def _parse_doc(doc):
"""Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary.
"""
lines = doc.split("\n")
descriptions = list(itertools.takewhile(_checker(_... | Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
Args:
doc: docstring.
Returns:
a dictionary. |
def process_from_webservice(id_val, id_type='pmcid', source='pmc',
with_grounding=True):
"""Return an output from RLIMS-p for the given PubMed ID or PMC ID.
Parameters
----------
id_val : str
A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to
... | Return an output from RLIMS-p for the given PubMed ID or PMC ID.
Parameters
----------
id_val : str
A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to
be "read".
id_type : str
Either 'pmid' or 'pmcid'. The default is 'pmcid'.
source : str
Either '... |
def write_ln(self, *text, sep=' '):
"""
Write line
:param text:
:param sep:
:return:
"""
if self.text and self.text[-1] != '\n':
self.text += '\n'
self.text += markdown.text(*text, sep) + '\n'
return self | Write line
:param text:
:param sep:
:return: |
def format_symbol(symbol):
"""Returns well formatted Hermann-Mauguin symbol as extected by
the database, by correcting the case and adding missing or
removing dublicated spaces."""
fixed = []
s = symbol.strip()
s = s[0].upper() + s[1:].lower()
for c in s:
if c.isalpha():
... | Returns well formatted Hermann-Mauguin symbol as extected by
the database, by correcting the case and adding missing or
removing dublicated spaces. |
def get_edges(self):
"""
Returns the edges of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
... | Returns the edges of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
['dog-out', 'hear-bark']] |
def roll_sparse(x, shift, axis=0):
'''Sparse matrix roll
This operation is equivalent to ``numpy.roll``, but operates on sparse matrices.
Parameters
----------
x : scipy.sparse.spmatrix or np.ndarray
The sparse matrix input
shift : int
The number of positions to roll the speci... | Sparse matrix roll
This operation is equivalent to ``numpy.roll``, but operates on sparse matrices.
Parameters
----------
x : scipy.sparse.spmatrix or np.ndarray
The sparse matrix input
shift : int
The number of positions to roll the specified axis
axis : (0, 1, -1)
T... |
def set(self, field, value):
"""
Sets the value of an app field.
:param str field:
The name of the app field. Trying to set immutable fields
``uuid`` or ``key`` will raise a ValueError.
:param value:
The new value of the app field.
:raises: Va... | Sets the value of an app field.
:param str field:
The name of the app field. Trying to set immutable fields
``uuid`` or ``key`` will raise a ValueError.
:param value:
The new value of the app field.
:raises: ValueError |
def copy_children(self, foreign_id, existing_node):
'''
Initiates copying of tree, with existing_node acting as root
'''
url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id)
self.log(
ACTION,
"Copying Children",
{"existing node type": ... | Initiates copying of tree, with existing_node acting as root |
def nodes(self):
"""
Computes the node positions the first time they are requested
if no explicit node information was supplied.
"""
if self._nodes is None:
self._nodes = layout_nodes(self, only_nodes=True)
return self._nodes | Computes the node positions the first time they are requested
if no explicit node information was supplied. |
def add_noise(Y, sigma):
"""Adds noise to Y"""
return Y + np.random.normal(0, sigma, Y.shape) | Adds noise to Y |
def _cell_to_python(cell):
"""Convert a PyOpenXL's `Cell` object to the corresponding Python object."""
data_type, value = cell.data_type, cell.value
if type(cell) is EmptyCell:
return None
elif data_type == "f" and value == "=TRUE()":
return True
elif data_type == "f" and value == ... | Convert a PyOpenXL's `Cell` object to the corresponding Python object. |
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median ... | Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series) |
def config(self, show_row_hdrs=True, show_col_hdrs=True,
show_col_hdr_in_cell=False, auto_resize=True):
"""
Override the in-class params:
@param show_row_hdrs : show row headers
@param show_col_hdrs : show column headers
@param show_col_hdr_in_cell : embed column... | Override the in-class params:
@param show_row_hdrs : show row headers
@param show_col_hdrs : show column headers
@param show_col_hdr_in_cell : embed column header in each cell
@param auto_resize : auto resize according to the size of terminal |
def slice(index, template):
"""Slice a template based on it's positional argument
Arguments:
index (int): Position at which to slice
template (str): Template to slice
Example:
>>> slice(0, "{cwd}/{0}/assets/{1}/{2}")
'{cwd}/{0}'
>>> slice(1, "{cwd}/{0}/assets/{1}/{2... | Slice a template based on it's positional argument
Arguments:
index (int): Position at which to slice
template (str): Template to slice
Example:
>>> slice(0, "{cwd}/{0}/assets/{1}/{2}")
'{cwd}/{0}'
>>> slice(1, "{cwd}/{0}/assets/{1}/{2}")
'{cwd}/{0}/assets/{1}' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.