code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def load_spacy_rule(file_path: str) -> Dict:
"""
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
"""
with open(file_path) as fp:
return ... | A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules |
def _generate_union_cstor_funcs(self, union):
"""Emits standard union constructor."""
for field in union.all_fields:
enum_field_name = fmt_enum_name(field.name, union)
func_args = [] if is_void_type(
field.data_type) else fmt_func_args_from_fields([field])
... | Emits standard union constructor. |
def pick_config_ids(device_type, os, navigator):
"""
Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engin... | Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engine(s)
:type navigator: string or list/tuple or None
:p... |
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or ... | compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: r... |
def count(cls, slug):
"""get the number of objects in the cache for a given slug
:param slug: cache key
:return: `int`
"""
from .models import Content
# Gets the count for a tag, hopefully form an in-memory cache.
cnt = cls._cache.get(slug)
if cnt is None... | get the number of objects in the cache for a given slug
:param slug: cache key
:return: `int` |
def find_sanitizer(self, name):
"""
Searches for a sanitizer function with given name. The name should
contain two parts separated from each other with a dot, the first
part being the module name while the second being name of the function
contained in the module, when it's being... | Searches for a sanitizer function with given name. The name should
contain two parts separated from each other with a dot, the first
part being the module name while the second being name of the function
contained in the module, when it's being prefixed with "sanitize_".
The lookup proc... |
def GetList(self):
"""Get Info on Current List
This is run in __init__ so you don't
have to run it again.
Access from self.schema
"""
# Build Request
soap_request = soap('GetList')
soap_request.add_parameter('listName', self.listName)
sel... | Get Info on Current List
This is run in __init__ so you don't
have to run it again.
Access from self.schema |
def com_google_fonts_check_name_postscriptname(ttFont, style, familyname):
""" Check name table: POSTSCRIPT_NAME entries. """
from fontbakery.utils import name_entry_id
failed = False
for name in ttFont['name'].names:
if name.nameID == NameID.POSTSCRIPT_NAME:
expected_value = f"{familyname}-{style}"
... | Check name table: POSTSCRIPT_NAME entries. |
def hydrate(self, values):
""" Convert PackStream values into native values.
"""
def hydrate_(obj):
if isinstance(obj, Structure):
try:
f = self.hydration_functions[obj.tag]
except KeyError:
# If we don't recogn... | Convert PackStream values into native values. |
def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
# pylint: disable=too-many-arguments
"""
Get the blob hashes assigned to an alias.
:param alias: Alias name. You ... | Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:t... |
def parse_value(self, tup_tree):
"""
Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
... | Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
VALUE element and therefore will be done when pars... |
def html(self):
""" Create an ``lxml``-based HTML DOM from the response. The tree
will not have a root, so all queries need to be relative
(i.e. start with a dot).
"""
try:
from lxml import html
return html.fromstring(self.content)
except ImportErr... | Create an ``lxml``-based HTML DOM from the response. The tree
will not have a root, so all queries need to be relative
(i.e. start with a dot). |
def _execute_callback(self, status, message, job, res, err, stacktrace):
"""Execute the callback.
:param status: Job status. Possible values are "invalid" (job could not
be deserialized or was malformed), "failure" (job raised an error),
"timeout" (job timed out), or "success" (... | Execute the callback.
:param status: Job status. Possible values are "invalid" (job could not
be deserialized or was malformed), "failure" (job raised an error),
"timeout" (job timed out), or "success" (job finished successfully
and returned a result).
:type status: ... |
def parse(self):
"""
Parses everyting into a datastructure that looks like:
result = [{
'origin_filename': '',
'result_filename': '',
'origin_lines': [], // all lines of the original file
'result_lines': [], // all lines of the... | Parses everyting into a datastructure that looks like:
result = [{
'origin_filename': '',
'result_filename': '',
'origin_lines': [], // all lines of the original file
'result_lines': [], // all lines of the newest file
'added_l... |
def check_tweet(tweet, validation_checking=False):
"""
Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet.
"""
if "id" not in tweet:
raise... | Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet. |
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if req... | Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required. |
def resourceprep(string, allow_unassigned=False):
"""
Process the given `string` using the Resourceprep (`RFC 6122`_) profile. In
the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError`
is raised.
"""
chars = list(string)
_resourceprep_do_mapping(chars)
do_normalizati... | Process the given `string` using the Resourceprep (`RFC 6122`_) profile. In
the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError`
is raised. |
def _client_wrapper(attr, *args, **kwargs):
'''
Common functionality for running low-level API calls
'''
catch_api_errors = kwargs.pop('catch_api_errors', True)
func = getattr(__context__['docker.client'], attr, None)
if func is None or not hasattr(func, '__call__'):
raise SaltInvocation... | Common functionality for running low-level API calls |
def __clear_bp(self, aProcess):
"""
Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
lpAddress = self.get_address()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = ... | Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object. |
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the... | r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of t... |
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a differen... | Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mt... |
def is_compatible_assembly_level(self, ncbi_assembly_level):
"""Check if a given ncbi assembly level string matches the configured assembly levels."""
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level]
return ncbi_assembly_level in configured_ncbi_strings | Check if a given ncbi assembly level string matches the configured assembly levels. |
def add(self, game_object: Hashable, tags: Iterable[Hashable]=()) -> None:
"""
Add a game_object to the container.
game_object: Any Hashable object. The item to be added.
tags: An iterable of Hashable objects. Values that can be used to
retrieve a group containing the game... | Add a game_object to the container.
game_object: Any Hashable object. The item to be added.
tags: An iterable of Hashable objects. Values that can be used to
retrieve a group containing the game_object.
Examples:
container.add(MyObject())
container.add(My... |
def spkcov(spk, idcode, cover=None):
"""
Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:ty... | Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:type idcode: int
:param cover: Optional SPICE W... |
def preTranslate(self, tx, ty):
"""Calculate pre translation and replace current matrix."""
self.e += tx * self.a + ty * self.c
self.f += tx * self.b + ty * self.d
return self | Calculate pre translation and replace current matrix. |
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count ... | Return a function that returns the quantile values for this
histogram. |
def catch(ignore=[],
was_doing="something important",
helpfull_tips="you should use a debugger",
gbc=None):
"""
Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback
"""
exc_cls, exc, tb=sys.exc_info()
... | Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback |
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, t... | List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly... |
def all_resource_urls(query):
''' Get all the URLs for every resource '''
urls = []
next = True
while next:
response = requests.get(query)
json_data = json.loads(response.content)
for resource in json_data['results']:
urls.append(resource['url'])
if bool(json_... | Get all the URLs for every resource |
def _create(cls, repo, path, resolve, reference, force, logmsg=None):
"""internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a ... | internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a detached symbolic reference will be created
instead |
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
logging_conf = cls.config.get('core', 'logging_conf_file', None)
if logging_conf is None:
return False
if not os.path.exists(logging_conf):
# FileNotFoundError added only in Pyt... | Setup logging via ini-file from logging_conf_file option. |
def add_term(self,term_obj):
"""
Adds a term to the term layer
@type term_obj: L{Cterm}
@param term_obj: the term object
"""
if self.term_layer is None:
self.term_layer = Cterms(type=self.type)
self.root.append(self.term_layer.get_node())
s... | Adds a term to the term layer
@type term_obj: L{Cterm}
@param term_obj: the term object |
def get_active(cls, database, conditions=""):
"""
Gets active data from system.parts table
:param database: A database object to fetch data from.
:param conditions: WHERE clause conditions. Database and active conditions are added automatically
:return: A list of SystemPart ... | Gets active data from system.parts table
:param database: A database object to fetch data from.
:param conditions: WHERE clause conditions. Database and active conditions are added automatically
:return: A list of SystemPart objects |
def read_range(self, begin: str, end: str) -> int:
"""
Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF
"""
if self.read_eof():
return False
c = self._stream.peek_char
if begin <= c <= end:
self._stre... | Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF |
def wait_for_close(
raiden: 'RaidenService',
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
channel_ids: List[ChannelID],
retry_timeout: float,
) -> None:
"""Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout.... | Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout. |
def delete_comment(self, project, work_item_id, comment_id):
"""DeleteComment.
[Preview API] Delete a comment on a work item.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item.
:param int comment_id:
"""
route_values = {}
... | DeleteComment.
[Preview API] Delete a comment on a work item.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item.
:param int comment_id: |
def build_def_use(graph, lparams):
"""
Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method.
"""
analysis = reach_def_analysis(graph, lparams)
UD = defaultdict(list)
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
for var in ins.get_... | Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method. |
def ConvertValues(default_metadata, values, token=None, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.... | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
token: ... |
def login(self):
"""
View function to log a user in. Supports html and json requests.
"""
form = self._get_form('SECURITY_LOGIN_FORM')
if form.validate_on_submit():
try:
self.security_service.login_user(form.user, form.remember.data)
except... | View function to log a user in. Supports html and json requests. |
def split(*items):
"""Split samples into all possible genomes for alignment.
"""
out = []
for data in [x[0] for x in items]:
dis_orgs = data["config"]["algorithm"].get("disambiguate")
if dis_orgs:
if not data.get("disambiguate", None):
data["disambiguate"] = {... | Split samples into all possible genomes for alignment. |
def refresh_rooms(self):
"""Calls GET /joined_rooms to refresh rooms list."""
for room_id in self.user_api.get_joined_rooms()["joined_rooms"]:
self._rooms[room_id] = MatrixRoom(room_id, self.user_api) | Calls GET /joined_rooms to refresh rooms list. |
def __get_average_inter_cluster_distance(self, entry):
"""!
@brief Calculates average inter cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average inter... | !
@brief Calculates average inter cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average inter cluster distance. |
def close(self):
""" Close the policy instance. """
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | Close the policy instance. |
def setex(self, key, seconds, value):
"""Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float
"""
if isinstance(seconds, float):
... | Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float |
def process_xlsx(content):
"""
Turn Excel file contents into Tarbell worksheet data
"""
data = {}
workbook = xlrd.open_workbook(file_contents=content)
worksheets = [w for w in workbook.sheet_names() if not w.startswith('_')]
for worksheet_name in worksheets:
if worksheet_name.startsw... | Turn Excel file contents into Tarbell worksheet data |
def _get_ignore_from_manifest_lines(lines):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
ignore ... | Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore. |
def save_scenario(self, scenario_file_path=None):
"""Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str
"""
# Validate Input
warning_ti... | Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str |
def __create_core_and_model_object_copies(self, selection, smart_selection_adaption):
"""Copy all elements of a selection.
The method copies all objects and modifies the selection before copying the elements if the smart flag is true.
The smart selection adaption is by default enabled. In any... | Copy all elements of a selection.
The method copies all objects and modifies the selection before copying the elements if the smart flag is true.
The smart selection adaption is by default enabled. In any case the selection is reduced to have one parent
state that is used as the root of copy... |
def validate(collection, onerror: Callable[[str, List], None] = None):
"""Validate BioC data structure."""
BioCValidator(onerror).validate(collection) | Validate BioC data structure. |
def fromPy(cls, val, typeObj, vldMask=None):
"""
:param val: value of python type bool or None
:param typeObj: instance of HdlType
:param vldMask: None vldMask is resolved from val,
if is 0 value is invalidated
if is 1 value has to be valid
"""
vld... | :param val: value of python type bool or None
:param typeObj: instance of HdlType
:param vldMask: None vldMask is resolved from val,
if is 0 value is invalidated
if is 1 value has to be valid |
def bind_env(self, action, env):
""" Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via... | Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()` |
def str_dict_keys(a_dict):
"""return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['ag... | return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
... |
def handle_timeouts(self):
"""Handle timeouts. Raise timeouted operations with a OperationTimeout
in the associated coroutine (if they are still alive and the operation
hasn't actualy sucessfuly completed) or, if the operation has a
weak_timeout flag, update the timeout point and add... | Handle timeouts. Raise timeouted operations with a OperationTimeout
in the associated coroutine (if they are still alive and the operation
hasn't actualy sucessfuly completed) or, if the operation has a
weak_timeout flag, update the timeout point and add it back in the
heapq.
... |
def _chk_type(recdef, rec):
"""Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError`
"""
if len(recdef) != len(rec):
raise TypeError("Number of columns (%d) is different from ... | Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError` |
def result(self, wait=0):
"""
return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result(self.id, wait=wait, cached=self.cached) | return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results |
def _checkResponseWriteData(payload, writedata):
"""Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 ... | Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError |
def stop_recording(self):
"""Stop recording from the audio source."""
self._stop_recording.set()
with self._source_lock:
self._source.stop()
self._recording = False | Stop recording from the audio source. |
def add_episode(self, text, text_format, title, author, summary=None,
publish_date=None, synthesizer='watson', synth_args=None, sentence_break='. '):
"""
Add a new episode to the podcast.
:param text:
See :meth:`Episode`.
:param text_format:
S... | Add a new episode to the podcast.
:param text:
See :meth:`Episode`.
:param text_format:
See :meth:`Episode`.
:param title:
See :meth:`Episode`.
:param author:
See :meth:`Episode`.
:param summary:
See :meth:`Episode`.
... |
def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0... | List EMR instances |
def apply_vcc(self,vcc):
"""
Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population.
"""
if 'secondary spectrum' not in self.constraints:
self.... | Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population. |
def _validate_importers(importers):
"""Validates the importers and decorates the callables with our output
formatter.
"""
# They could have no importers, that's chill
if importers is None:
return None
def _to_importer(priority, func):
assert isinstance(priority, int), priority
... | Validates the importers and decorates the callables with our output
formatter. |
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cooki... | Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set. |
def hide(self):
"""Overrides Qt Method"""
for widget in self.replace_widgets:
widget.hide()
QWidget.hide(self)
self.visibility_changed.emit(False)
if self.editor is not None:
self.editor.setFocus()
self.clear_matches() | Overrides Qt Method |
def get_groups_by_userid(cls, userid, request):
""" Return group identifiers of user with id :userid:
Used by Ticket-based auth as `callback` kwarg.
"""
try:
cache_request_user(cls, request, userid)
except Exception as ex:
log.error(str(ex))
f... | Return group identifiers of user with id :userid:
Used by Ticket-based auth as `callback` kwarg. |
def get_root_url(url, warn=True):
"""
Get the "root URL" for a URL, as described in the LuminosoClient
documentation.
"""
parsed_url = urlparse(url)
# Make sure it's a complete URL, not a relative one
if not parsed_url.scheme:
raise ValueError('Please supply a full URL, beginning wi... | Get the "root URL" for a URL, as described in the LuminosoClient
documentation. |
def seek(self, offset, whence=os.SEEK_SET):
"""Seek to position in stream, see file.seek"""
pos = None
if whence == os.SEEK_SET:
pos = self.offset + offset
elif whence == os.SEEK_CUR:
pos = self.tell() + offset
elif whence == os.SEEK_END:
pos ... | Seek to position in stream, see file.seek |
def as_string(self, default_from=None):
"""Creates the email"""
encoding = self.charset or 'utf-8'
attachments = self.attachments or []
if len(attachments) == 0 and not self.html:
# No html content and zero attachments means plain text
msg = self._mimetext(self... | Creates the email |
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "bo... | Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at... |
def approve(self, peer_jid):
"""
(Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
... | (Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not r... |
def save_state(self, fname=None):
"""Saves state to pickle"""
if not fname:
date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
fname = date + "_energy_" + str(self.energy()) + ".state"
with open(fname, "wb") as fh:
pickle.dump(self.state, fh) | Saves state to pickle |
def _read(self):
"""Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
"""
raw_response = self.transport.receive()
response = Packet.parse(raw_response)
# FIXME
if re... | Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple` |
def get_paths_from_to(self, goobj_start, goid_end=None, dn0_up1=True):
"""Get a list of paths from goobj_start to either top or goid_end."""
paths = []
# Queue of terms to be examined (and storage for their paths)
working_q = cx.deque([[goobj_start]])
# Loop thru GO terms until w... | Get a list of paths from goobj_start to either top or goid_end. |
def has(self, relation, operator=">=", count=1, boolean="and", extra=None):
"""
Add a relationship count condition to the query.
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count... | Add a relationship count condition to the query.
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
... |
def _include_exclude(file_path, include=None, exclude=None):
"""Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude ... | Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude from result. |
def make_links_absolute(self, base_url=None, resolve_base_href=True,
handle_failures=None):
"""
Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then t... | Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then the ``.base_url``
of the document.
If ``resolve_base_href`` is true, then any ``<base href>``
tags in the document a... |
def execute_operation(self, method="GET", ops_path="", payload=""):
"""
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: Th... | Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall:... |
def convertToPDF(self, from_page=0, to_page=-1, rotate=0):
"""Convert document to PDF selecting page range and optional rotation. Output bytes object."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_conv... | Convert document to PDF selecting page range and optional rotation. Output bytes object. |
def get_pages_from_id_list(id_list):
'''
Accepts: list of page ids
Returns: list of specific page objects
'''
page_list = []
for id_ in id_list:
try:
page_list.append(
Page.objects.get(id=id_).specific)
except ObjectDoesNotExist:
logging.er... | Accepts: list of page ids
Returns: list of specific page objects |
def is_error(self):
""" Checks to see if the job errored out. """
qstat = self._grep_qstat('error')
err = self._grep_status('error')
if qstat and err:
return True
return False | Checks to see if the job errored out. |
def connect(self):
"""
Connects to Redis
"""
logger.info("Connecting to Redis on {host}:{port}...".format(
host=self.host, port=self.port))
super(RedisSubscriber, self).connect()
logger.info("Successfully connected to Redis")
# Subscribe to channel
... | Connects to Redis |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if ha... | Return a json dictionary representing this model. |
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS) | Remove newlines, tabs and multiple spaces with single spaces. |
def _reverse_index(self):
"""
Move the cursor up one row in the same column. If the cursor is at the
first row, create a new row at the top.
"""
if self.y == 0:
# If the cursor is currently at the first row, then scroll the
# screen up.
self.di... | Move the cursor up one row in the same column. If the cursor is at the
first row, create a new row at the top. |
def thumbnail(self, img_url, size, crop=None, bg=None, quality=85,
storage_type=None, bucket_name=None):
"""
:param img_url: url img - '/assets/media/summer.jpg'
:param size: size return thumb - '100x100'
:param crop: crop return thumb - 'fit' or None
:param bg:... | :param img_url: url img - '/assets/media/summer.jpg'
:param size: size return thumb - '100x100'
:param crop: crop return thumb - 'fit' or None
:param bg: tuple color or None - (255, 255, 255, 0)
:param quality: JPEG quality 1-100
:param storage_type: either 's3' or None
:... |
def _parse_variable(s: str, curr_row: int, curr_col: int) -> Tuple:
'''
$A,$2 <- constant col and row
$row,$2 <- current col, row 2
$A+1,$2 <- col A + 1 = 2, row 2
$row+1,$2 <- current col + 1, row 2
$A,$2-1 <-- col A, row 2 - 1 = 1
'''
def parse_expressi... | $A,$2 <- constant col and row
$row,$2 <- current col, row 2
$A+1,$2 <- col A + 1 = 2, row 2
$row+1,$2 <- current col + 1, row 2
$A,$2-1 <-- col A, row 2 - 1 = 1 |
def edit(self, **kwargs):
""" Edit an object.
Parameters:
kwargs (dict): Dict of settings to edit.
Example:
{'type': 1,
'id': movie.ratingKey,
'collection[0].tag.tag': 'Super',
'collection.locked': 0}
... | Edit an object.
Parameters:
kwargs (dict): Dict of settings to edit.
Example:
{'type': 1,
'id': movie.ratingKey,
'collection[0].tag.tag': 'Super',
'collection.locked': 0} |
def extend_course(course, enterprise_customer, request):
"""
Extend a course with more details needed for the program landing page.
In particular, we add the following:
* `course_image_uri`
* `course_title`
* `course_level_type`
* `course_short_description`
... | Extend a course with more details needed for the program landing page.
In particular, we add the following:
* `course_image_uri`
* `course_title`
* `course_level_type`
* `course_short_description`
* `course_full_description`
* `course_effort`
* `expected... |
def render(self, context, instance, placeholder):
''' Allows this plugin to use templates designed for a list of locations. '''
context = super(LocationListPlugin,self).render(context,instance,placeholder)
context['location_list'] = Location.objects.filter(status=Location.StatusChoices.active... | Allows this plugin to use templates designed for a list of locations. |
def make_backups(self, block_id):
"""
If we're doing backups on a regular basis, then
carry them out here if it is time to do so.
This method does nothing otherwise.
Return None on success
Abort on failure
"""
assert self.setup, "Not set up yet. Call .d... | If we're doing backups on a regular basis, then
carry them out here if it is time to do so.
This method does nothing otherwise.
Return None on success
Abort on failure |
def run(self, host=None, port=None, debug=None, use_reloader=None,
open_browser=False):
"""
Starts a server to render the README.
"""
if host is None:
host = self.config['HOST']
if port is None:
port = self.config['PORT']
if debug is No... | Starts a server to render the README. |
def select_inverse(self, name="default", executor=None):
"""Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionInvert(current)
self._s... | Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return: |
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, ... | set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode |
async def load_message(obj, msg_type, msg=None, field_archiver=None):
"""
Loads message if the given type from the object.
Supports reading directly to existing message.
:param obj:
:param msg_type:
:param msg:
:param field_archiver:
:return:
"""
msg = msg_type() if msg is None ... | Loads message if the given type from the object.
Supports reading directly to existing message.
:param obj:
:param msg_type:
:param msg:
:param field_archiver:
:return: |
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None):
"""Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a ... | Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal
command. Defaults to CMD_RETRY_COUNT.
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
Def... |
def _set_rsvp(self, v, load=False):
"""
Setter method for rsvp, mapped from YANG variable /mpls_state/rsvp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp is considered as a private
method. Backends looking to populate this variable should
do so ... | Setter method for rsvp, mapped from YANG variable /mpls_state/rsvp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp() directly.
Y... |
def queue(p_queue, host=None):
'''Construct a path to the queue dir for a queue'''
if host is not None:
return _path(_c.FSQ_QUEUE, root=_path(host, root=hosts(p_queue)))
return _path(p_queue, _c.FSQ_QUEUE) | Construct a path to the queue dir for a queue |
def strace_set_buffer_size(self, size):
"""Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error.
"""
size = ctypes.c_uint32(size)
res = self._dll.JLINK_STRACE_C... | Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error. |
def allocate_port():
"""Allocate an unused port.
There is a small race condition here (between the time we allocate the
port, and the time it actually gets used), but for the purposes for which
this function gets used it isn't a problem in practice.
"""
sock = socket.socket()
try:
s... | Allocate an unused port.
There is a small race condition here (between the time we allocate the
port, and the time it actually gets used), but for the purposes for which
this function gets used it isn't a problem in practice. |
def getNetworkSummary(self, suid, verbose=None):
"""
Returns summary of collection containing the specified network.
:param suid: Cytoscape Collection/Subnetwork SUID
:param verbose: print more
:returns: 200: successful operation
"""
surl=self.___url
sv... | Returns summary of collection containing the specified network.
:param suid: Cytoscape Collection/Subnetwork SUID
:param verbose: print more
:returns: 200: successful operation |
def js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
... | Clicks an element using pure JS. Does not use jQuery. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.