content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def elementwise(op: Callable[..., float], *ds: D) -> NumDict:
"""
Apply op elementwise to a sequence of numdicts.
If any numdict in ds has None default, then default is None, otherwise the
new default is calculated by running op on all defaults.
"""
keys: set = set()
keys.update(*ds)
... | 13,800 |
def test_get_closed_class_vague_meaning_count():
"""Test get_closed_class_vague_meaning_count method."""
result = discourse_markers.get_closed_class_vague_meaning_count(
Doc(
[
Text("En realidad la pandemia no es mala."),
Text("En contra de lo que se cree no e... | 13,801 |
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = -1,
gamma: float = 2,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.... | 13,802 |
def test_fullImport():
""" Test that we can import the full dataset. """
tensor, matrix, patient_data = form_tensor()
assert isinstance(tensor, np.ndarray)
assert isinstance(matrix, np.ndarray)
assert tensor.shape[0] == matrix.shape[0]
assert isinstance(patient_data, pd.DataFrame) | 13,803 |
def test_liquidbulk_01():
"""Test to see if object initialisation works properly"""
import opentisim
Smallhydrogen = opentisim.liquidbulk.Vessel(**opentisim.liquidbulk.smallhydrogen_data)
assert Smallhydrogen.call_size == opentisim.liquidbulk.smallhydrogen_data['call_size']
assert Smallhydrogen.LOA == opentisim... | 13,804 |
def arcsin(x):
"""Return the inverse sine or the arcsin.
INPUTS
x (Variable object or real number)
RETURNS
if x is a Variable, then return a Variable with val and der.
if x is a real number, then return the value of arcsin(x).
EXAMPLES
>>> x = Variable(0, name='x')
>>> t = arcsin(x)
>>> print(t.val, t.d... | 13,805 |
def preprocessing(texts, words, label, coef=0.3, all_tasks=False, include_repeat=True, progressbar=True):
"""
the function returns the processed array for the Spacy standard
"""
train = []
enit = {}
assert 0 < coef <= 1, f"The argument must be in the range (0 < coef <= 1) --> {coef}"
if al... | 13,806 |
def rotate(
input,
angle,
axes=(1, 0),
reshape=True,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
*,
allow_float32=True,
):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
``axes`` parameter using spl... | 13,807 |
def default():
""" Run all default tasks to test, and build lib and docs. """
pass | 13,808 |
def generate_url_fragment(title, blog_post_id):
"""Generates the url fragment for a blog post from the title of the blog
post.
Args:
title: str. The title of the blog post.
blog_post_id: str. The unique blog post ID.
Returns:
str. The url fragment of the blog post.
"""
... | 13,809 |
def update_cluster(cluster, cluster_args, args,
api=None, path=None, session_file=None):
"""Updates cluster properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating cluster. %s\n" %
get_url(cluster))
log_message(message, log_fil... | 13,810 |
def calculate_FLOPs_scale(model, input_size, multiply_adds=False, use_gpu=False):
"""
forked from FishNet @ github
https://www.zhihu.com/question/65305385/answer/256845252
https://blog.csdn.net/u011501388/article/details/81061024
https://blog.csdn.net/xidaoliang/article/details/88191910
no bias... | 13,811 |
def save_Ps_and_Ts(data_filename, Fs = 1000, f_range = (6,12)):
"""
Saves the indices corresponding to oscillatory peaks and trough into a new numpy file
"""
# Load data
x = np.load(data_filename)
# Calculate peaks and troughs
Ps, Ts = nonshape.findpt(x, f_range, Fs = Fs)
# Sa... | 13,812 |
def calcfirst(dfas, first, name):
"""Recursive function that mutates first."""
dfa = dfas[name]
first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, _ in state.arcs.items():
if label in dfas:
if label in first:
... | 13,813 |
def get_post_by_user(user_id: int, database: Session) -> Post:
"""
"""
post = database.query(Post).filter(
Post.user == user_id).order_by(Post.id.desc()).all()
logger.info("FOI RETORNADO DO BANCO AS SEGUINTES CONTRIBUIÇÕES: %s", post)
return post | 13,814 |
def unformat_bundle(formattedBundle):
"""
Converts a push-ready bundle into a structured object by changing
stringified yaml of 'customResourceDefinitions', 'clusterServiceVersions',
and 'packages' into lists of objects.
Undoing the format helps simplify bundle validation.
:param formattedBundl... | 13,815 |
def supports_box_chars() -> bool:
"""Check if the encoding supports Unicode box characters."""
return all(map(can_encode, "│─└┘┌┐")) | 13,816 |
def calculate_intersection_over_union(box_data, prior_boxes):
"""Calculate intersection over union of box_data with respect to
prior_boxes.
Arguments:
ground_truth_data: numpy array with shape (4) indicating x_min, y_min,
x_max and y_max coordinates of the bounding box.
prior_boxes:... | 13,817 |
def get_report(analytics, start_date, end_date = 'today'):
"""Queries the Analytics Reporting API V4.
Args:
analytics: An authorized Analytics Reporting API V4 service object.
Returns: The Analytics Reporting API V4 response.
"""
return analytics.reports().batchGet(
body={
'reportRequests': [
... | 13,818 |
def write_pinout_xml(pinout, out_xml=None):
"""
write the pinout dict to xml format with no attributes. this is verbose
but is the preferred xml format
"""
ar = []
for k in sort_alpha_num(pinout.keys()):
d = pinout[k]
d['number'] = k
# ar.append({'pin': d})
ar.a... | 13,819 |
def get_solution(request, level=1):
"""Returns a render of answers.html"""
context = RequestContext(request)
cheat_message = '\\text{Ulovlig tegn har blitt brukt i svar}'
required_message = '\\text{Svaret ditt har ikke utfylt alle krav}'
render_to = 'game/answer.html'
if request.method == 'POS... | 13,820 |
def build_UNIST_tree():
"""
This function returns a (linked) binary tree that contains (a simplified and fictitious version of)
the organisational structure of schools and departments at UNIST.
In particular, this function should return the following tree:
UNIST
--Engineering
----Managemen... | 13,821 |
def get_work_log_queue():
""" json格式为::
{'func':'transform',
'kw':{ ... # 和前面task_queue相同
},
"runtime":{ # 队列运行相关信息
'created':12323423 #进入原始队列时间
'queue':'q01' # 是在哪个原子原子队列
'start':123213123 #转换开始时间
'end':123213123 #转换结束时间
'worker':'w01... | 13,822 |
def azure_project_train_status_handler(**kwargs):
"""
Listen on azure_training.models.Project change.
If a Project is created, create a Train(Training Status) as well.
"""
logger.info("Azure Project changed.")
logger.info("Checking...")
if 'sender' not in kwargs or kwargs['sender'] != Proje... | 13,823 |
def is_first_buy(ka, ka1, ka2=None, pf=False):
"""确定某一级别一买
注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!!
一买识别逻辑:
1)必须:上级别最后一个线段标记和最后一个笔标记重合且为底分型;
2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型;
3)必须:本级别向下线段背驰 或 本级别向下笔背驰;
4)辅助:下级别向下线段背驰 或 下级别向下笔背驰。
:param ka: KlineAnalyze
本级别
... | 13,824 |
def AddInitialRefugees(e, d, loc):
""" Add the initial refugees to a location, using the location name"""
num_refugees = int(d.get_field(loc.name, 0, FullInterpolation=True))
for i in range(0, num_refugees):
e.addAgent(location=loc) | 13,825 |
def preprocess(path, l_pass=0.7, h_pass=0.01, bandpass=True, short_ch_reg=False, tddr=True, negative_correlation=False, verbose=False, return_all=False):
"""
Load raw data and preprocess
:param str path: path to the raw data
:param float l_pass: low pass frequency
:param float h_pass: high pass... | 13,826 |
def geomapi_To2d(*args):
"""
* To intersect a curve and a surface. This function builds (in the parametric space of the plane P) a 2D curve equivalent to the 3D curve C. The 3D curve C is considered to be located in the plane P. Warning The 3D curve C must be of one of the following types: - a line - a circle - a... | 13,827 |
def validate(epoch, model, criterion, data_loader, tb_writer, args):
"""Routine to validate an epoch.
"""
model.eval()
# dataset loop
pbar = tqdm(enumerate(data_loader))
loss = 0.0
for batch_id, batch_data in pbar:
# retrieve data from loader and copy data to device
images ... | 13,828 |
def play(
q_values, env, num_episodes, grid_cells, state_bounds,
suppress_print=False, episode_length=None
):
"""Renders gym environment under greedy policy.
The gym environment will be rendered and executed according to the greedy
policy induced by the state-action value function. NOTE: After you use an... | 13,829 |
def get_object_list():
"""Returns the object name list for APC2015.
Args:
None.
Returns:
objects (list): List of object name.
"""
pkg_path = rospkg.RosPack().get_path(PKG)
yaml_file = osp.join(pkg_path, 'data/object_list.yml')
with open(yaml_file) as f:
objects = ya... | 13,830 |
def write_spec_to_h5(specfile, h5file, h5path='/',
mode="a", overwrite_data=False,
link_type="hard", create_dataset_args=None):
"""Write content of a SpecFile in a HDF5 file.
:param specfile: Path of input SpecFile or :class:`SpecH5` object
:param h5file: Path of o... | 13,831 |
def gtMakeTAKBlobMsg(callsign, text, aesKey=False):
"""
Assemble an ATAK plugin compatible chat message blob
(suitable for feeding to gtMakeAPIMsg() )
With optional AES encryption, if a key is provided
"""
body = (callsign + b': ' + text)[:230]
# Apply optional encryption (and base64 encod... | 13,832 |
def validate_json_with_extensions(value, rule_obj, path):
""" Performs the above match, but also matches a dict or a list. This it
just because it seems like you can't match a dict OR a list in pykwalify
"""
validate_extensions(value, rule_obj, path)
if not isinstance(value, (list, dict)):
... | 13,833 |
def artists_by_rating(formatter, albums):
"""Returns the artists sorted by decreasing mean album rating.
Only artists with more than 1 reviewed albums are considered.
"""
artist_tags = set([album["artist_tag"] for album in albums])
artists = []
# build the list of artists and compute their ratin... | 13,834 |
def st_max(*args):
"""Max function.
Parameters
----------
x : float, int, MissingValue instance, or None
(2 or more such inputs allowed)
Returns
-------
max(x1, x2, ...) if any x is non-missing (with missing values ignored).
Otherwise, MISSING (".") returned.
"... | 13,835 |
def build_vocab(data_root: str, dataset: str, data_select_ratio: float, vocab_limit_size: int, save_root: str):
"""Build vocab for dataset with random selected client
Args:
data_root (str): string path for data saving root
dataset (str): string of dataset name to build vocab
data_select... | 13,836 |
def is_button_controller(device: Device) -> bool:
"""Return true if the device is a stateless button controller."""
return (
CAP_PUSHABLE_BUTTON in device.capabilities
or CAP_HOLDABLE_BUTTON in device.capabilities
or CAP_DOUBLE_TAPABLE_BUTTON in device.capabilities
) | 13,837 |
def gen_acq_noddi(in_file, epi_params, alt_epi_params, readout, readout_alt):
"""
This is a function to generate the FSL topup acq.txt file
:param in_file:
:param epi_params:
:param alt_epi_params:
:param readout:
:param readout_alt:
:return:
"""
import numpy as np
import os
... | 13,838 |
def list_parts(bucket, key, upload_id):
"""Lists the parts that have been uploaded for a specific multipart upload.
This operation must include the upload ID, which you obtain by
sending the initiate multipart upload request (see
CreateMultipartUpload ). This request returns a maximum of 1,000
uplo... | 13,839 |
def leap_year():
"""
This functions seeks to return a leap year after user input << integer(4).
Rules for a leap year:
As you surely know, due to some astronomical reasons, years may be leap or common.
The former are 366 days long, while the latter are 365 days long.
Since the introduction of t... | 13,840 |
def parse(files, **kwargs):
"""Parse all BAM files."""
parsed = []
if kwargs["meta"].has_field("base_coverage"):
cov_range = kwargs["meta"].field_meta("base_coverage")["range"]
else:
cov_range = [math.inf, -math.inf]
if kwargs["meta"].has_field("read_coverage"):
read_cov_rang... | 13,841 |
def check_latest_version():
""" checks for the latest version of cumulusci from pypi, max once per hour """
check = True
with timestamp_file() as f:
timestamp = float(f.read() or 0)
delta = time.time() - timestamp
check = delta > 3600
if check:
try:
latest_version =... | 13,842 |
def test_username_match(string: str) -> None:
"""Test that the username regex matches correct strings."""
assert USERNAME.fullmatch(string) | 13,843 |
def login(request):
"""Login view for GET requests."""
logged_in = request.authenticated_userid is not None
if logged_in:
return {'logged_in': True,
'form_enabled': False,
'status': u'Already logged in',
'status_type': u'info'}
status = u''
s... | 13,844 |
def of_type(_type, value_1, *args) -> bool:
"""
Check if a collection of values are of the same type.
Parameters:
_type (any): The type to check for.
value_1 (any): The first value to check.
*args (any): Rest of values to check against given type.
Return... | 13,845 |
def configuration(parent_package='', top_path=None):
"""[Placeholder].
Parameters
----------
parent_package :
top_path :
Returns
-------
configuration :
"""
build_path = build_mlpack()
config = Configuration('mlpack', parent_package, top_path)
libraries = ['mlpack', 'b... | 13,846 |
def read_config(path):
"""
Reads the Kong config file (YAML).
"""
if path is None:
raise Exception(
"empty path provided. please provide a path using `--config=<config.yml>`"
)
with open(path, "r") as stream:
try:
return yaml.safe_load(stream)
... | 13,847 |
def generate_grid_world(grid, prob, pos_rew, neg_rew, gamma=.9, horizon=100):
"""
This Grid World generator requires a .txt file to specify the
shape of the grid world and the cells. There are five types of cells: 'S' is
the starting position where the agent is; 'G' is the goal state; '.' is a
norma... | 13,848 |
def sqrt_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt.
"""
nodes, weights = sqrt_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights) | 13,849 |
def check_dataset_update(args, dataset):
"""Checks if the dataset information must be updated.
"""
return (args.dataset_attributes or
args.import_fields or
(args.shared_flag and r.shared_changed(args.shared, dataset)) or
(((hasattr(args, 'max_categories') and args.max_ca... | 13,850 |
def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]:
"""
Finds extras for the package specified.
:param provider_package_id: id of the package
"""
if provider_package_id == 'providers':
return {}
with open(DEPENDENCIES_JSON_FILE) as dependencies_file:
cross_... | 13,851 |
def e(a: float, b: float) -> float:
"""
e = sqrt(1 + (b * b) / (a * a))
:param a: semi-major axis
:type a: float
:param b: semi-minor axis
:type b: float
:return: eccentricity
:rtype: float
"""
return np.sqrt(1 + (b * b) / (a * a)) | 13,852 |
def remove_extra_citation_metadata(graph) -> None:
"""Remove superfluous metadata associated with a citation (that isn't the db/id).
Best practice is to add this information programmatically.
"""
for u, v, k in graph.edges(keys=True):
if CITATION not in graph[u][v][k]:
continue
... | 13,853 |
def parse_note(path: Path) -> dict:
""" convert note in plain text to a dictionary.
Line #1 ~ #5 are meta data of the note.
Line #9 to end is the body.
"""
header_line_number = 5
body_start_line = 9
res = {}
with open(path) as f:
for x in range(header_line_number):
... | 13,854 |
def get_previous_sle_for_warehouse(last_sle, exclude_current_voucher=False):
"""get stock ledger entries filtered by specific posting datetime conditions"""
last_sle['time_format'] = '%H:%i:%s'
if not last_sle.get("posting_date"):
last_sle["posting_date"] = "1900-01-01"
if not last_sle.ge... | 13,855 |
def write_visfile(discr, io_fields, visualizer, vizname,
step=0, t=0, overwrite=False, vis_timer=None):
"""Write VTK output for the fields specified in *io_fields*.
Parameters
----------
visualizer:
A :class:`meshmode.discretization.visualization.Visualizer`
VTK output... | 13,856 |
def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False):
"""
Creates a nice utf8 representation of a directed forest
Parameters
----------
graph : nx.DiGraph | nx.Graph
Graph to represent (must be a tree, forest, or the empty graph)
with_labels : bool
... | 13,857 |
def rotated_shower(shower, alt, az):
"""
Return a rotated shower object from a shower object and a direction (alt, az)
Parameters
----------
shower: shower class object
Returns
-------
copy of the given shower but rotated
"""
rot_shower = copy(shower)
rot_shower.particles = ... | 13,858 |
def write_output_report(dataframe):
"""Report workspace set-up statuses and create output tsv file from provided dataframe."""
# create timestamp and use to label output file
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
output_filename = f"{timestamp}_workspaces_published_status.ts... | 13,859 |
def cv_project(c):
"""Create CV project from cookiecutter
"""
with c.cd(dir_path):
c.run(
f"cookiecutter cvbpcc --no-input type=cv project_name={cc_project_name}",
pty=True,
)
_fake_update(os.path.join(dir_path, cc_project_name)) | 13,860 |
def summary(clf, X, y, xlabels=None):
"""
Output summary statistics for a fitted regression model.
Parameters
----------
clf : sklearn.linear_model
A scikit-learn linear model classifier with a `predict()` method.
X : numpy.ndarray
Training data used to fit the classifier.
y... | 13,861 |
def angle2circle(angles):
"""from degree to radians multipled by 2"""
return np.deg2rad(2 * (np.array(angles) + 7.5)) | 13,862 |
def test_string():
""" Test incomplete string """
inp = load('js/incomplete_string.js')
exp = load('js/incomplete_string.min.js')
# save('js/incomplete_string.min.js', py_jsmin(inp))
assert py_jsmin(inp) == exp
assert py_jsmin2(inp) == exp
assert c_jsmin(inp) == exp
inp = inp.decode('la... | 13,863 |
def _make_source(cls_source: str, cls_name: str, instance_method: str):
"""Converts a class source to a string including necessary imports.
Args:
cls_source (str): A string representing the source code of a user-written class.
cls_name (str): The name of the class cls_source represents.
... | 13,864 |
def prepend_pass_statement(line: str) -> str:
"""Prepend pass at indent level and comment out the line."""
colno = num_indented(line)
right_side = line[colno:]
indent = " " * colno
return indent + "pass # " + right_side | 13,865 |
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (PyMOL v%s; %s) Starting...\n" % (ScriptName, pymol.cmd.get_version()[1], time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
... | 13,866 |
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow ... | 13,867 |
def clear_cache(user=None, doctype=None):
"""clear cache"""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
transl... | 13,868 |
def download_spot_by_dates(start=datetime(2011, 1, 1)):
"""
下载数据,存储为csv文件
:param start: 2011-01-01 最早数据
:return: True 下载文件 False 没有下载文件
"""
file_index = get_download_file_index(SPREAD_DIR, start=start)
if file_index.empty:
return False
for date in file_index:
date_str =... | 13,869 |
def run_dataset(prob_label):
"""Run the experiment"""
sample_source, n = get_sample_source(prob_label)
# /////// submit jobs //////////
# create folder name string
home = os.path.expanduser("~")
foldername = os.path.join(home, "freqopttest_slurm", 'e%d'%ex)
logger.info("Setting engine fold... | 13,870 |
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True):
"""Truncate too low and too high values.
Parameters
----------
data : np.ndarray
Image to be truncated.
percMin : float
Percentile minimum.
percMax : float
Percentile maximum.
discard_zeros : ... | 13,871 |
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the ... | 13,872 |
def test_reset_threshold():
"""
Test the model threshold can be reset.
Performance metric should be recalculated and also predictions should be changed based on the new threshold.
"""
# import data
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv")... | 13,873 |
async def payment_list(request):
"""
---
description: Show outgoing payments, regarding {bolt11} or {payment_hash} if set Can only specify one of {bolt11} or {payment_hash}
tags:
- payments
produces:
- application/json
parameters:
- in: body
name: body
required: false
... | 13,874 |
def write_opened(dir, file_dict, data_dict, verbose=True):
"""
read in dictionary with open files as values
and write data to files
"""
for game_id, vals in data_dict.items():
f = file_dict.get(game_id)
if not f:
fn = dir + str(game_id) + ".csv"
f =... | 13,875 |
def get_accessible_cases(item, user):
"""Return all accessible for a cohort and user."""
return getattr(item, "get_accessible_cases_for_user")(user) | 13,876 |
def run_main():
"""
这是主函数
"""
cluster_number = args.cluster_number
dataset_path = os.path.abspath(args.dataset_path)
result_path = os.path.abspath(args.yolo_anchors_path)
kmeans = YOLO_KMeans(cluster_number, dataset_path)
kmeans.txt2clusters(result_path) | 13,877 |
def aux_conv5(A, B, n, idx):
"""
Performs the convolution of A and B where B = A* (enumerate-for-loop)
:param A: Coefficients matrix 1 (orders, buses)
:param B: Coefficients matrix 2 (orders, buses)
:param c: last order of the coefficients in while loop
:param indices: bus indices array
:ret... | 13,878 |
def plot_phaseogram(phaseogram, phase_bins, time_bins, unit_str='s', ax=None,
**plot_kwargs):
"""Plot a phaseogram.
Parameters
----------
phaseogram : NxM array
The phaseogram to be plotted
phase_bins : array of M + 1 elements
The bins on the x-axis
time_bi... | 13,879 |
def load_rapidSTORM_track_header(path):
"""
Load xml header from a rapidSTORM (track) single-molecule localization file and identify column names.
Parameters
----------
path : str, bytes, os.PathLike, file-like
File path for a rapidSTORM file to load.
Returns
-------
list of st... | 13,880 |
def hex_to_byte(hexStr):
""" Convert hex strings to bytes. """
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return ''.join(bytes) | 13,881 |
def _vital_config_update(cfg, cfg_in):
"""
Treat a vital Config object like a python dictionary
Args:
cfg (kwiver.vital.config.config.Config): config to update
cfg_in (dict | kwiver.vital.config.config.Config): new values
"""
# vital cfg.merge_config doesnt support dictionary input
... | 13,882 |
def SubscriberReceivedStartEncKeyVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartEncKeyVector(builder, numElems) | 13,883 |
def _read_extended_field_value(value, rawdata):
"""Used to decode large values of option delta and option length
from raw binary form."""
if value >= 0 and value < 13:
return (value, rawdata)
elif value == 13:
return (rawdata[0] + 13, rawdata[1:])
elif value == 14:
return ... | 13,884 |
def add_uint(a, b):
"""Returns the sum of two uint256-ish tuples."""
a = from_uint(a)
b = from_uint(b)
c = a + b
return to_uint(c) | 13,885 |
def get_dcgan_args(parser, args=[]):
"""
parameters determing the DCGAN parameters
"""
# DCGAN:
# ------------------------------------------------------------------------
parser.add_argument(
"--lam", type=float, default=10, help="Factor for scaling gradient penalty"
)
parser.add... | 13,886 |
def test_file_upload_with_users(svc_client, identity_headers):
"""Check successful file upload and listing based on user auth header."""
headers_user1 = copy.deepcopy(identity_headers)
headers_user1.pop("Content-Type")
filename = uuid.uuid4().hex
jwt_data = {
"aud": ["renku"],
"ema... | 13,887 |
def _gen_span_id() -> str:
"""Return 16 random hexadecimal digits.
The id is used for distributed tracing.
"""
return os.urandom(8).hex() | 13,888 |
def store_tabular_data(filepath: Path, use_stem: bool = True) -> None:
"""Reads the tabular data from filepath and stores it in-memory to be plotted asychronously.
Args:
filepath (Path): The tabular data file to be read and stored.
use_stem (bool, optional): Only store the filename (without ext... | 13,889 |
def load_uci_credit_card(return_X_y=False, as_frame=False):
"""Loads the UCI Credit Card Dataset.
This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset).
Example:
```python
from skorecard import datasets
... | 13,890 |
def sanitize_yaml_and_save_datasource(
context: DataContext, datasource_yaml: str, overwrite_existing: bool = False
) -> None:
"""A convenience function used in notebooks to help users save secrets."""
if not datasource_yaml:
raise ValueError("Please verify the yaml and try again.")
if not isins... | 13,891 |
def quad_lsq(x, y, verbose=False, itmax=200, iparams=[]):
"""
Fits a parabola to the data, more handy as it fits for
parabola parameters in the form y = B_0 * (x - B_1)**2 + B_2.
This is computationally slower than poly_lsq, so beware of its usage
for time consuming operations. Uses scipy odrpack, b... | 13,892 |
def find_file(directory_name, cyclone_id_string, prefer_zipped=True,
allow_other_format=True, raise_error_if_missing=True):
"""Finds NetCDF file with SHIPS data.
:param directory_name: Name of directory with SHIPS data.
:param cyclone_id_string: Cyclone ID (must be accepted by
`satell... | 13,893 |
def check_sentence_ending(sentence):
"""Check the ending of the sentence to verify that a period is present.
:param sentence: str - a sentence to check.
:return: bool - return True if punctuated correctly with period, False otherwise.
"""
pass | 13,894 |
def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]:
"""Checks that the input is of shape `name:value` and then splits it into a tuple"""
match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str)
if match is None:
raise ValueError(f'CLI variable input {mapping_str} is not of form `"name... | 13,895 |
def list_privileges_by_role(request, role):
"""
List sentry privilegs by role
:param request:
:param role: role name
:return: A Json array of SentryPrivileges: [p1, p2, p3...]
"""
sentry_privileges = _get_sentry_api(request.user).list_sentry_privileges_by_role("cdap", role)
sentry_privileges = [{"action... | 13,896 |
def to_torch_as(x: Any, y: torch.Tensor) -> Union[Batch, torch.Tensor]:
"""Return an object without np.ndarray.
Same as ``to_torch(x, dtype=y.dtype, device=y.device)``.
"""
assert isinstance(y, torch.Tensor)
return to_torch(x, dtype=y.dtype, device=y.device) | 13,897 |
async def test_closest_function_home_vs_group_state(hass):
"""Test closest function home vs group state."""
hass.states.async_set(
"test_domain.object",
"happy",
{
"latitude": hass.config.latitude + 0.1,
"longitude": hass.config.longitude + 0.1,
},
)
... | 13,898 |
def cleanup_exports():
"""
Cleanup export directories
"""
require('map', provided_by=[map])
local('rm -rf %(map)s/tiles/*' % env)
local('rm -rf %(map)s/exports/*' % env) | 13,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.