code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def get_field_def(
schema: GraphQLSchema, parent_type: GraphQLType, field_node: FieldNode
) -> Optional[GraphQLField]:
"""Get field definition.
Not exactly the same as the executor's definition of `get_field_def()`, in this
statically evaluated environment we do not always have an Object type, and need... | Get field definition.
Not exactly the same as the executor's definition of `get_field_def()`, in this
statically evaluated environment we do not always have an Object type, and need
to handle Interface and Union types. |
def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True,
envelope=False, iSpeedTuning=False, enforceDale=True):
"""
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedfo... | Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param... |
def cat_top_keywords(self, session, cat, up=True, offset=0, offsets=[]):
'''Get top keywords in a specific category'''
print 'CAT:%s, level:%s'%(str(cat), str(cat.level))
print 'OFFSET: %d'%offset
response = []
if not offsets or offset==0:
url = 'http://top.taobao.c... | Get top keywords in a specific category |
def add_namespaces(spec_dict):
"""Add namespace convenience keys, list, list_{short|long}, to_{short|long}"""
for ns in spec_dict["namespaces"]:
spec_dict["namespaces"][ns]["list"] = []
spec_dict["namespaces"][ns]["list_long"] = []
spec_dict["namespaces"][ns]["list_short"] = []
... | Add namespace convenience keys, list, list_{short|long}, to_{short|long} |
def parse_info_frags(info_frags):
"""Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding.
"""
new_scaffolds = {}
with open(info_frags, "r") as info_frags_handle:
cu... | Import an info_frags.txt file and return a dictionary where each key
is a newly formed scaffold and each value is the list of bins and their
origin on the initial scaffolding. |
def __connect(self):
"""
Connect to the database.
"""
self.__methods = _get_methods_by_uri(self.sqluri)
uri_connect_method = self.__methods[METHOD_CONNECT]
self.__dbapi2_conn = uri_connect_method(self.sqluri) | Connect to the database. |
def assign(self, object_type, object_uuid, overwrite=False):
"""Assign this persistent identifier to a given object.
Note, the persistent identifier must first have been reserved. Also,
if an existing object is already assigned to the pid, it will raise an
exception unless overwrite=Tru... | Assign this persistent identifier to a given object.
Note, the persistent identifier must first have been reserved. Also,
if an existing object is already assigned to the pid, it will raise an
exception unless overwrite=True.
:param object_type: The object type is a string that identif... |
def _placement_points_generator(self, skyline, width):
"""Returns a generator for the x coordinates of all the placement
points on the skyline for a given rectangle.
WARNING: In some cases could be duplicated points, but it is faster
to compute them twice than to remove them.
... | Returns a generator for the x coordinates of all the placement
points on the skyline for a given rectangle.
WARNING: In some cases could be duplicated points, but it is faster
to compute them twice than to remove them.
Arguments:
skyline (list): Skyline HSegment lis... |
def main():
""" Main method of the script """
parser = __build_option_parser()
args = parser.parse_args()
analyze_ws = AnalyzeWS(args)
try:
analyze_ws.set_file(args.file_[0])
except IOError:
print 'IOError raised while reading file. Exiting!'
sys.exit(3)
# Start the ... | Main method of the script |
def add_segy_view_widget(self, ind, widget, name=None):
"""
:param widget: The SegyViewWidget that will be added to the SegyTabWidget
:type widget: SegyViewWidget
"""
if self._context is None:
self._segywidgets.append(widget)
self.initialize()
... | :param widget: The SegyViewWidget that will be added to the SegyTabWidget
:type widget: SegyViewWidget |
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted ... | Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`. |
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False):
"""Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`.... | Transform a 0-D or 1-D `Tensor` to be 1-D.
For user convenience, many parts of the TensorFlow Probability API accept
inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed
to the API as either `5` or `[5]`. This function can be used to transform
such an argument to always be 1-D.
NO... |
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementat... | Return a string representing the default user agent. |
def _remove_none_values(dictionary):
""" Remove dictionary keys whose value is None """
return list(map(dictionary.pop,
[i for i in dictionary if dictionary[i] is None])) | Remove dictionary keys whose value is None |
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp,
self._mangle_from_,
None, # Use policy setting, which we've adjusted
policy=self.policy) | Clone this generator with the exact same options. |
def check_type(self, value, attr, data):
"""Customize check_type for handling containers."""
# Check the type in the standard way first, in order to fail quickly
# in case of invalid values.
root_value = super(InstructionParameter, self).check_type(
value, attr, data)
... | Customize check_type for handling containers. |
def extract_archive(archive, verbosity=0, outdir=None, program=None, interactive=True):
"""Extract given archive."""
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Extracting %s ..." % archive)
return _extract_archive(archive, verbosity=verbosity, interactive=interactive... | Extract given archive. |
def add_sibling(self, pos=None, **kwargs):
"""Adds a new node as a sibling to the current node object."""
pos = self._prepare_pos_var_for_add_sibling(pos)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['i... | Adds a new node as a sibling to the current node object. |
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2, v1):
"""
Add site effects to an intensity.
Implements eq. 5
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
z1pt0 = sites.z1pt0
# we con... | Add site effects to an intensity.
Implements eq. 5 |
def create_payload(self):
"""Remove ``smart_class_parameter_id`` or ``smart_variable_id``"""
payload = super(OverrideValue, self).create_payload()
if hasattr(self, 'smart_class_parameter'):
del payload['smart_class_parameter_id']
if hasattr(self, 'smart_variable'):
... | Remove ``smart_class_parameter_id`` or ``smart_variable_id`` |
def get_config_values(config_path, section, default='default'):
"""
Parse ini config file and return a dict of values.
The provided section overrides any values in default section.
"""
values = {}
if not os.path.isfile(config_path):
raise IpaUtilsException(
'Config file not... | Parse ini config file and return a dict of values.
The provided section overrides any values in default section. |
def from_center(self, x=None, y=None, z=None, r=None,
theta=None, h=None, reference=None):
"""
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
"""
coo... | Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin |
def motif4struct_wei(W):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted... | Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node. Motif intensity
and coherence are weighted generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weig... |
def _select_options(self, options, keys, invert=False):
"""Select the provided keys out of an options object.
Selects the provided keys (or everything except the provided keys) out
of an options object.
"""
options = self._merge_options(options)
result = {}
for... | Select the provided keys out of an options object.
Selects the provided keys (or everything except the provided keys) out
of an options object. |
def assert_array(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None):
r""" Asserts whether the given array or sparse matrix has the given properties
Parameters
----------
A : ndarray, scipy.sparse matrix or array-like
the array under investigation
shape : shape, optio... | r""" Asserts whether the given array or sparse matrix has the given properties
Parameters
----------
A : ndarray, scipy.sparse matrix or array-like
the array under investigation
shape : shape, optional, default=None
asserts if the array has the requested shape. Be careful with vectors
... |
def cluster(list_of_texts, num_clusters=3):
"""
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
"""
... | Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1] |
def matches_query(self, key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._a... | 增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。
:param key: 查询条件字段名
:param query: 查询对象
:type query: Query
:rtype: Query |
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register ... | if module found load module and save all attributes in the module found |
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if... | Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it. |
def get_pose_error(target_pose, current_pose):
"""
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous m... | Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous matrix for the target pose
current_pose: a 4x4 homog... |
def scan(self, restrict):
"""
Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions
"""
# Keep looking for a token, ignoring any in self.ignore
while True:
# Search the patterns for a match, with earlier
... | Should scan another token and add it to the list, self.tokens,
and add the restriction to self.restrictions |
def delete(cls, bucket_id):
"""Delete a bucket.
Does not actually delete the Bucket, just marks it as deleted.
"""
bucket = cls.get(bucket_id)
if not bucket or bucket.deleted:
return False
bucket.deleted = True
return True | Delete a bucket.
Does not actually delete the Bucket, just marks it as deleted. |
def default_package(self):
"""
::
GET /:login/packages
:Returns: the default package for this datacenter
:rtype: :py:class:`dict` or ``None``
Requests all the packages in this datacenter, filters for the default,
and returns the cor... | ::
GET /:login/packages
:Returns: the default package for this datacenter
:rtype: :py:class:`dict` or ``None``
Requests all the packages in this datacenter, filters for the default,
and returns the corresponding dict, if a default has been defined. |
def count_above_mean(x):
"""
Returns the number of values in x that are higher than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
m = np.mean(x)
return np.where(x > m)[0].size | Returns the number of values in x that are higher than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float |
def _set_config_path(self):
"""
Reads config path from environment variable CLOEEPY_CONFIG_PATH
and sets as instance attr
"""
self._path = os.getenv("CLOEEPY_CONFIG_PATH")
if self._path is None:
msg = "CLOEEPY_CONFIG_PATH is not set. Exiting..."
sy... | Reads config path from environment variable CLOEEPY_CONFIG_PATH
and sets as instance attr |
def bbin(obj: Union[str, Element]) -> str:
""" Boldify built in types
@param obj: object name or id
@return:
"""
return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj | Boldify built in types
@param obj: object name or id
@return: |
def add_menu(self, menu):
"""
Adds a sub-menu to the editor context menu.
Menu are put at the bottom of the context menu.
.. note:: to add a menu in the middle of the context menu, you can
always add its menuAction().
:param menu: menu to add
"""
se... | Adds a sub-menu to the editor context menu.
Menu are put at the bottom of the context menu.
.. note:: to add a menu in the middle of the context menu, you can
always add its menuAction().
:param menu: menu to add |
def prepare(args):
"""
%prog prepare genomesize *.fastq
Prepare MERACULOUS configuation file. Genome size should be entered in Mb.
"""
p = OptionParser(prepare.__doc__ + FastqNamings)
p.add_option("-K", default=51, type="int", help="K-mer size")
p.set_cpus(cpus=32)
opts, args = p.parse_... | %prog prepare genomesize *.fastq
Prepare MERACULOUS configuation file. Genome size should be entered in Mb. |
def remove(self, module=True, force=False, configuration=True, dry_run=False):
"""Remove this submodule from the repository. This will remove our entry
from the .gitmodules file and the entry in the .git/config file.
:param module: If True, the module checkout we point to will be deleted
... | Remove this submodule from the repository. This will remove our entry
from the .gitmodules file and the entry in the .git/config file.
:param module: If True, the module checkout we point to will be deleted
as well. If the module is currently on a commit which is not part
of any... |
def _isdst(dt):
"""Check if date is in dst.
"""
if type(dt) == datetime.date:
dt = datetime.datetime.combine(dt, datetime.datetime.min.time())
dtc = dt.replace(year=datetime.datetime.now().year)
if time.localtime(dtc.timestamp()).tm_isdst == 1:
return True
return False | Check if date is in dst. |
def _connect(self):
"""Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds.
"""
def tryConnect():
self.connector = d = maybeDeferred(connect)
d.addCallback(cb... | Connect to the Kafka Broker
This routine will repeatedly try to connect to the broker (with backoff
according to the retry policy) until it succeeds. |
def send(self, pkt):
"""Send a packet"""
# Use the routing table to find the output interface
iff = pkt.route()[0]
if iff is None:
iff = conf.iface
# Assign the network interface to the BPF handle
if self.assigned_interface != iff:
try:
... | Send a packet |
def _claim(cls, cdata: Any) -> "Tileset":
"""Return a new Tileset that owns the provided TCOD_Tileset* object."""
self = object.__new__(cls) # type: Tileset
if cdata == ffi.NULL:
raise RuntimeError("Tileset initialized with nullptr.")
self._tileset_p = ffi.gc(cdata, lib.TCOD... | Return a new Tileset that owns the provided TCOD_Tileset* object. |
def _parse_triggered_hits(self, file_obj):
"""Parse and store triggered hits."""
for _ in range(self.n_triggered_hits):
dom_id, pmt_id = unpack('<ib', file_obj.read(5))
tdc_time = unpack('>I', file_obj.read(4))[0]
tot = unpack('<b', file_obj.read(1))[0]
tr... | Parse and store triggered hits. |
def __fetch(self, url, payload):
"""Fetch requests from groupsio API"""
r = requests.get(url, params=payload, auth=self.auth, verify=self.verify)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise e
return r | Fetch requests from groupsio API |
def getMaxStmIdForStm(stm):
"""
Get maximum _instId from all assigments in statement
"""
maxId = 0
if isinstance(stm, Assignment):
return stm._instId
elif isinstance(stm, WaitStm):
return maxId
else:
for _stm in stm._iter_stms():
maxId = max(maxId, getMaxS... | Get maximum _instId from all assigments in statement |
def connect_paragraph(self, paragraph, paragraphs):
""" Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's c... | Create parent/child links to other paragraphs.
The paragraphs parameters is a list of all the paragraphs
parsed up till now.
The parent is the previous paragraph whose depth is less.
The parent's children include this paragraph.
Called from parse_paragr... |
def _fetch_cached_output(self, items, result):
"""
First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database.
"""
if not appsettings.FLU... | First try to fetch all items from the cache.
The items are 'non-polymorphic', so only point to their base class.
If these are found, there is no need to query the derived data from the database. |
async def get_config(self):
"""Return the configuration settings for this model.
:returns: A ``dict`` mapping keys to `ConfigValue` instances,
which have `source` and `value` attributes.
"""
config_facade = client.ModelConfigFacade.from_connection(
self.connectio... | Return the configuration settings for this model.
:returns: A ``dict`` mapping keys to `ConfigValue` instances,
which have `source` and `value` attributes. |
def _get_reciprocal(self):
"""Return the :class:`Arrow` that connects my origin and destination
in the opposite direction, if it exists.
"""
orign = self.portal['origin']
destn = self.portal['destination']
if (
destn in self.board.arrow and
... | Return the :class:`Arrow` that connects my origin and destination
in the opposite direction, if it exists. |
def smt_dataset(directory='data/',
train=False,
dev=False,
test=False,
train_filename='train.txt',
dev_filename='dev.txt',
test_filename='test.txt',
extracted_name='trees',
check_files=['trees... | Load the Stanford Sentiment Treebank dataset.
Semantic word spaces have been very useful but cannot express the meaning of longer phrases in
a principled way. Further progress towards understanding compositionality in tasks such as
sentiment detection requires richer supervised training and evaluation reso... |
def fake_keypress(self, key, repeat=1):
"""
Fake a keypress
Usage: C{keyboard.fake_keypress(key, repeat=1)}
Uses XTest to 'fake' a keypress. This is useful to send keypresses to some
applications which won't respond to keyboard.send_key()
@param key: they key to be sen... | Fake a keypress
Usage: C{keyboard.fake_keypress(key, repeat=1)}
Uses XTest to 'fake' a keypress. This is useful to send keypresses to some
applications which won't respond to keyboard.send_key()
@param key: they key to be sent (e.g. "s" or "<enter>")
@param repeat: number of t... |
def format_python2_stmts(python_stmts, show_tokens=False, showast=False,
showgrammar=False, compile_mode='exec'):
"""
formats python2 statements
"""
parser_debug = {'rules': False, 'transition': False,
'reduce': showgrammar,
'errorstack':... | formats python2 statements |
def from_resolver(cls, spec_resolver):
"""Creates a customized Draft4ExtendedValidator.
:param spec_resolver: resolver for the spec
:type resolver: :class:`jsonschema.RefResolver`
"""
spec_validators = cls._get_spec_validators(spec_resolver)
return validators.extend(Draf... | Creates a customized Draft4ExtendedValidator.
:param spec_resolver: resolver for the spec
:type resolver: :class:`jsonschema.RefResolver` |
def matrix2map(data_matrix, map_shape):
r"""Matrix to Map
This method transforms a 2D matrix to a 2D map
Parameters
----------
data_matrix : np.ndarray
Input data matrix, 2D array
map_shape : tuple
2D shape of the output map
Returns
-------
np.ndarray 2D map
R... | r"""Matrix to Map
This method transforms a 2D matrix to a 2D map
Parameters
----------
data_matrix : np.ndarray
Input data matrix, 2D array
map_shape : tuple
2D shape of the output map
Returns
-------
np.ndarray 2D map
Raises
------
ValueError
For ... |
def derationalize_denom(expr):
"""Try to de-rationalize the denominator of the given expression.
The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from
``sqrt(2)/2``.
Specifically, this matches `expr` against the following pattern::
Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...... | Try to de-rationalize the denominator of the given expression.
The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from
``sqrt(2)/2``.
Specifically, this matches `expr` against the following pattern::
Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...)
and returns a tuple ``(numerato... |
def kill(self, signal=None):
"""
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return... | Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms) | Updates the query line items to match the latest rule options. |
def main(argv=None):
"""Parse passed in cooked single HTML."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('collated_html', type=argparse.FileType('r'),
help='Path to the collated html'
' file (use - for stdin)')
parser.a... | Parse passed in cooked single HTML. |
def temperature_effectiveness_TEMA_H(R1, NTU1, Ntp, optimal=True):
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible or... | r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `op... |
def YiqToRgb(y, i, q):
'''Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
... | Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]... |
def _iterdump(self, file_name, headers=None):
"""
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Char... | Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame |
def parse(self, scope):
""" Parse Node
args:
scope (Scope): Scope object
raises:
SyntaxError
returns:
str
"""
assert (len(self.tokens) == 3)
expr = self.process(self.tokens, scope)
A, O, B = [
e[0] if isinsta... | Parse Node
args:
scope (Scope): Scope object
raises:
SyntaxError
returns:
str |
def cli(obj, purge):
"""List alert suppressions."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/blackouts')
click.echo(json.dumps(r['blackouts'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
... | List alert suppressions. |
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
class SwaggerSchemaView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
renderer_class... | Returns schema view which renders Swagger/OpenAPI. |
def get_base_route(cls):
"""Returns the route base to use for the current class."""
base_route = cls.__name__.lower()
if cls.base_route is not None:
base_route = cls.base_route
base_rule = parse_rule(base_route)
cls.base_args = [r[2] for r in base_rule]
... | Returns the route base to use for the current class. |
def _trace_dispatch(frame, event, arg):
# type: (Any, str, Optional[Any]) -> None
"""
This is the main hook passed to setprofile().
It implement python profiler interface.
Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace
"""
# Bail if we're not tracing.
... | This is the main hook passed to setprofile().
It implement python profiler interface.
Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace |
def wait(self, time):
"""Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally.
"""
self._wait = Event()
return not self._wait.wait(time) | Pauses the thread for a specified time.
Returns False if interrupted by another thread and True if the
time runs out normally. |
def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_da... | Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. |
def postprocess_segments(self):
"""Convert the format of the segment class members."""
# make segs a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for iseg, seg in enumerate(self.segs):
mask = np.zeros(self._adata.shape[0], dtype=bool)
... | Convert the format of the segment class members. |
def solve(self, scenario, solver):
"""
Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks
"""
clusters = set(self.clustering.busmap.values)
n =... | Decompose each cluster into separate units and try to optimize them
separately
:param scenario:
:param solver: Solver that may be used to optimize partial networks |
def stop(self):
"""Stop the server.
Do nothing if server is already not running.
"""
if self.is_run:
self._service.shutdown()
self._service.server_close() | Stop the server.
Do nothing if server is already not running. |
def regressOut(Y, X, return_b=False):
"""
regresses out X from Y
"""
Xd = la.pinv(X)
b = Xd.dot(Y)
Y_out = Y-X.dot(b)
if return_b:
return Y_out, b
else:
return Y_out | regresses out X from Y |
def Transformer(source_vocab_size,
target_vocab_size,
mode='train',
num_layers=6,
feature_depth=512,
feedforward_depth=2048,
num_heads=8,
dropout=0.1,
shared_embedding=True,
ma... | Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads... |
def _default_hparams():
"""A set of basic model hyperparameters."""
return hparam.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used t... | A set of basic model hyperparameters. |
def generate_dict_schema(size, valid):
""" Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples... | Generate a schema dict of size `size` using library `lib`.
In addition, it returns samples generator
:param size: Schema size
:type size: int
:param samples: The number of samples to generate
:type samples: int
:param valid: Generate valid samples?
:type valid: bool
:returns |
def before(point):
""" True if point datetime specification is before now.
NOTE: If point is specified it is supposed to be in local time.
Not UTC/GMT !! This is because that is what gmtime() expects.
"""
if not point:
return True
if isinstance(point, six.string_types):
point =... | True if point datetime specification is before now.
NOTE: If point is specified it is supposed to be in local time.
Not UTC/GMT !! This is because that is what gmtime() expects. |
def reminder_validator(input_str):
"""
Allows a string that matches utils.REMINDER_REGEX.
Raises ValidationError otherwise.
"""
match = re.match(REMINDER_REGEX, input_str)
if match or input_str == '.':
return input_str
else:
raise ValidationError('Expected format: <number><w|... | Allows a string that matches utils.REMINDER_REGEX.
Raises ValidationError otherwise. |
def crypto_config_from_table_info(materials_provider, attribute_actions, table_info):
"""Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict)
"""
ec_kwargs = table_info.encryption_context_values
if table_info... | Build a crypto config from the provided values and table info.
:returns: crypto config and updated kwargs
:rtype: tuple(CryptoConfig, dict) |
def trim_snapshots(self, hourly_backups = 8, daily_backups = 7,
weekly_backups = 4):
"""
Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
move back in time.
If ebs volumes... | Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
move back in time.
If ebs volumes have a 'Name' tag with a value, their snapshots
will be assigned the same tag when they are created. The values
... |
def extract_pool_attr(cls, req):
""" Extract pool attributes from arbitary dict.
"""
attr = {}
if 'id' in req:
attr['id'] = int(req['id'])
if 'name' in req:
attr['name'] = req['name']
if 'description' in req:
attr['description'] = req[... | Extract pool attributes from arbitary dict. |
def delete_report(report):
"""Delete report(s), supports globbing.
"""
for path in glob.glob(os.path.join(_get_reports_path(), report)):
shutil.rmtree(path) | Delete report(s), supports globbing. |
def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol,
queOut):
"""Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
... | Spatially convolve boxcar functions with 2D Gaussian.
Parameters
----------
idxPrc : 2d numpy array, shape [n_samples, n_measurements]
Description of input 1.
aryBoxCar : float, positive
Description of input 2.
aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements]
... |
def list_metrics():
"""List metrics available."""
for name, operator in ALL_OPERATORS.items():
print(f"{name} operator:")
if len(operator.cls.metrics) > 0:
print(
tabulate.tabulate(
headers=("Name", "Description", "Type"),
tabul... | List metrics available. |
def search_variants(
self, variant_set_id, start=None, end=None, reference_name=None,
call_set_ids=None):
"""
Returns an iterator over the Variants fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: The ID of the
... | Returns an iterator over the Variants fulfilling the specified
conditions from the specified VariantSet.
:param str variant_set_id: The ID of the
:class:`ga4gh.protocol.VariantSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) ... |
def get_slide_vars(self, slide_src, source=None):
""" Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents.
"""
presenter_notes = None
find = re.search(r'<h\d[^>]*>presenter notes</h\d>', slide_src,
... | Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents. |
def _correctArtefacts(self, image, threshold):
'''
Apply a thresholded median replacing high gradients
and values beyond the boundaries
'''
image = np.nan_to_num(image)
medianThreshold(image, threshold, copy=False)
return image | Apply a thresholded median replacing high gradients
and values beyond the boundaries |
def autodiscover():
"""
Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator.
"""
global LOADED
if LOADED:
return
LOADED = True
for app in get_app_name_list():
try:
module = import_module(app)
ex... | Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator. |
def unique(func, num_args=0, max_attempts=100, cache=None):
"""
wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False
"""
if ... | wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False |
def drain_events(self, timeout=None):
"""Wait for an event on a channel."""
chanmap = self.channels
chanid, method_sig, args, content = self._wait_multiple(
chanmap, None, timeout=timeout,
)
channel = chanmap[chanid]
if (content and
channel.a... | Wait for an event on a channel. |
def _read_fasta_files(f, args):
""" read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * ... | read fasta files of each sample and generate a seq_obj
with the information of each unique sequence in each sample
:param f: file containing the path for each fasta file and
the name of the sample. Two column format with `tab` as field
separator
:returns: * :code:`seq_l`: is a list of seq_obj obje... |
def parse_boolargs(self, args):
"""Returns an array populated by given values, with the indices of
those values dependent on given boolen tests on self.
The given `args` should be a list of tuples, with the first element the
return value and the second argument a string that evaluates t... | Returns an array populated by given values, with the indices of
those values dependent on given boolen tests on self.
The given `args` should be a list of tuples, with the first element the
return value and the second argument a string that evaluates to either
True or False for each ele... |
def set_nsxcontroller_ip(self, **kwargs):
"""
Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
... | Set nsx-controller IP
Args:
IP (str): IPV4 address.
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None |
def getAddPerson(self):
"""
Return an L{AddPersonFragment} which is a child of this fragment and
which will add a person to C{self.organizer}.
"""
fragment = AddPersonFragment(self.organizer)
fragment.setFragmentParent(self)
return fragment | Return an L{AddPersonFragment} which is a child of this fragment and
which will add a person to C{self.organizer}. |
def assets(lon=None, lat=None, begin=None, end=None):
'''
HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/assets
QUERY PARAMETERS
Parameter Type Default Description
lat float n/a Latitude
lon float n/a Longitude
begin YYYY-MM-DD n/a beginning of date range
end YYYY-M... | HTTP REQUEST
GET https://api.nasa.gov/planetary/earth/assets
QUERY PARAMETERS
Parameter Type Default Description
lat float n/a Latitude
lon float n/a Longitude
begin YYYY-MM-DD n/a beginning of date range
end YYYY-MM-DD today end of date range
api_key string DEMO_KEY api.nasa.... |
def visit_BinaryOperation(self, node):
"""Visitor for `BinaryOperation` AST node."""
self.visit(node.left)
self.visit(node.right) | Visitor for `BinaryOperation` AST node. |
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punc... | Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift... |
def new_job(frontier, job_conf):
'''Returns new Job.'''
validate_conf(job_conf)
job = Job(frontier.rr, {
"conf": job_conf, "status": "ACTIVE",
"started": doublethink.utcnow()})
if "id" in job_conf:
job.id = job_conf["id"]
if "max_claimed_sites" in job_conf:
... | Returns new Job. |
def run_unlock(device_type, args):
"""Unlock hardware device (for future interaction)."""
util.setup_logging(verbosity=args.verbose)
with device_type() as d:
log.info('unlocked %s device', d) | Unlock hardware device (for future interaction). |
def get_metric_parsers(metric_packages=tuple(), include_defaults=True):
"""Gets all of the metric parsers.
Args:
metric_packages - Defaults to no extra packages. An iterable of
metric containing packages. A metric inherits DiffParserBase
and does not have __metric__ = False
... | Gets all of the metric parsers.
Args:
metric_packages - Defaults to no extra packages. An iterable of
metric containing packages. A metric inherits DiffParserBase
and does not have __metric__ = False
A metric package must be imported using import a.b.c
include_d... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.