content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def sgd(lr, tparams, grads, inp, cost, opt_ret=None):
"""
Stochastic gradient descent (SGD) optimizer
:param lr:
:param tparams:
:param grads:
:param inp:
:param cost:
:param opt_ret:
:return f_grad_shared, f_update:
"""
gshared = [theano.shared(p.get_value() * 0.,
... | 15,400 |
def build_empty_indexes(ngram_len):
"""
Build and return the nested indexes structure.
The resulting index structure can be visualized this way::
1. The unigrams index is in indexes[1] with this structure:
{1:
{
u1: {index_docid1: [posting_list1], index_docid2: [posting_list2]},
u... | 15,401 |
def test_if_in_for_tensor():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_for():
x = Tensor(7)
y = Tensor(0)
for _ in range(3):
if y < Tensor(10):
y += ... | 15,402 |
def file_export(args_opt):
"""Export to file"""
if config.model == "ssd320":
net = SSD320(ssd_mobilenet_v2(), config, is_training=False)
else:
net = ssd_mobilenet_v2(config=config)
net = SsdInferWithDecoder(net, Tensor(default_boxes), config)
save_ckpt_path = './ckpt_' + str(device... | 15,403 |
def as_dicts(results):
"""Convert execution results to a list of tuples of dicts for better comparison."""
return [result.to_dict(dict_class=dict) for result in results] | 15,404 |
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten
if they are also present in dict_to_merge"""
for key, value in iteritems(dict_to_merge):
if isinstance(merged_dict.... | 15,405 |
def _backend_name_to_class(backend_str: str):
"""
Convert a backend string to the test configuration class for the backend.
"""
known_backends = _get_all_backends()
if backend_str not in known_backends:
raise ValueError(
f'Unknown backend {backend_str}. '
f'Known back... | 15,406 |
def concat_allocator_cmd(allocator, cmd):
"""add env variable for different allocator modes."""
new_cmd = cmd
if allocator == "direct":
new_cmd = "DIRECT_BUFFER=1 " + cmd
elif allocator == "unified":
new_cmd = "UNIFIED_BUFFER=1 " + cmd
elif allocator == "je_direct":
new_cmd =... | 15,407 |
def generate_property_comment(
description: intermediate.PropertyDescription,
) -> Tuple[Optional[Stripped], Optional[List[Error]]]:
"""Generate the documentation comment for the given property."""
return _generate_summary_remarks_constraints(description) | 15,408 |
def task_eeg_get_flicker_frequencies() -> Dict:
"""
Get the flicker frequency of each trial. Save as .mat files.
"""
Path(fname.eeg_flicker_frequencies_dir).mkdir(exist_ok=True, parents=True)
for subject in SUBJECTS:
# Get sources.
sources = dict(
dat=fname.bids... | 15,409 |
def create_set(X, y, inds):
"""
X list and y nparray
:return:
"""
new_X = []
for i in inds:
new_X.append(X[i])
new_y = y[inds]
return SignalAndTarget(new_X, new_y) | 15,410 |
def evaluate_model(h5_file, pred_file):
"""
evaluate the trained model. Plot ROC curve and calculate AUC.
inputs:
model json file path, model weights file.
outputs:
filename of the plotting.
"""
try:
batch_size = 32
model = load_model(h5_file)
file_path = os.pat... | 15,411 |
def find_dates():
"""
FInd valid dates
"""
text = read_file()
valid = []
for i, c in enumerate(text):
# Find "-" which we use identifier for possible dates
if c == "-":
try:
date = validate_date_string(i, text)
if date:
... | 15,412 |
def launch_app():
"""
Activates the application if it is not running or is running in the background.
"""
session = get_mobile_driver_session().driver
logger.info("Launch app from desired capabilities on device")
if get_platform() == 'iOS':
session.execute_script('mobile: launchApp', {... | 15,413 |
def handle_400_error(_error):
"""Return a http 400 error to client"""
return make_response(jsonify({'error': 'Misunderstood'}), 400) | 15,414 |
def control_norm_backward(grad_out, ustream, vstream, abkw, cache):
"""
Implements the forward pass of the control norm
For each incoming sample it does:
grad = grad_out - (1 - abkw) * vstream * out
vstream = vstream + mu()
y = (x - mstream) / sqrt(varstream)
varstream ... | 15,415 |
def get_random_asset_id_of_dataset(
db: Session = Depends(deps.get_db),
dataset_id: int = Path(..., example="12"),
viz_client: VizClient = Depends(deps.get_viz_client),
current_user: models.User = Depends(deps.get_current_active_user),
current_workspace: models.Workspace = Depends(deps.get_current_w... | 15,416 |
def seconds_to_hours(s):
"""Convert seconds to hours:
:param s: Number of seconds
:type s: Float
:return: Number of hours
:rtype: Float
"""
return float(s) / 3600 | 15,417 |
def assign_change_priority(zone: dict, change_operations: list) -> None:
"""
Given a list of change operations derived from the difference of two zones
files, assign a priority integer to each change operation.
The priority integer serves two purposes:
1. Identify the relative order the changes. T... | 15,418 |
def contigs_n_bases(contigs):
"""Returns the sum of all n_bases of contigs."""
return sum(c.n_bases for c in contigs) | 15,419 |
def parse_input_fn_result(result):
"""Gets features, labels, and hooks from the result of an Estimator input_fn.
Args:
result: output of an input_fn to an estimator, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple
(features, labels) with same cons... | 15,420 |
def remove(self):
"""Deprecated. Remove a node path from the scene graph"""
print("Warning: NodePath.remove() is deprecated. Use remove_node() instead.")
# Send message in case anyone needs to do something
# before node is deleted
messenger.send('preRemoveNodePath', [self])
... | 15,421 |
def demosaic(cfa, pattern='RGGB'):
"""
Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using
bilinear interpolation.
Parameters
----------
CFA : array_like
*Bayer* color filter array (CFA).
pattern : unicode, optional
**{'RGGB', 'BGGR', 'GRBG', 'GBRG'}*... | 15,422 |
def auditable_event(message, user_id, subject_id, context="other"):
"""Record auditable event
message: The message to record, i.e. "log in via facebook"
user_id: The authenticated user id performing the action
subject_id: The user id upon which the action was performed
"""
text = "performed by... | 15,423 |
def overwrite_core_fields(new_metadata, old_metadata):
"""For fields like dc and project_metadata, if overwrite the items in
old_metadata with the fields in new_metadata"""
old_metadata = copy.deepcopy(old_metadata)
for cat in ['dc', 'project_metadata']:
if cat not in new_metadata:
c... | 15,424 |
def compare_skill(embedding, idx=None):
"""Display a skill its most similar skills in the embedding.
Args:
embedding (array): skills embedding
idx (int): index to select skill,
defaults to None (if None, a random index is chosen)
Returns:
df: dataframe of a skill and th... | 15,425 |
def translate_df(df: DataFrame) -> DataFrame:
"""
Función para traducir directamente un DataFrame
:param df: DataFrame a traducir
:return: DataFrame
"""
regs = df.Country.count() #Contamos la cantidad de registros en la columna 'Country' para servir como delimitador del for
# Usamos un for pa... | 15,426 |
def recreate_city_table(log_file_path, city_table_path):
"""Create a table with city and its respected cases count."""
df = pd.read_csv(log_file_path)
# Group the table by city and infections
grouped_city = df.groupby("City")
grouped_infection = df.groupby(["City", "Infection"])
# Initiate new ... | 15,427 |
def compare_data(data1, data2, ignore=None, expected=True):
"""
todo: Update Documentation
:param data1:
:type data1:
:param data2:
:type data2:
:param ignore:
:type ignore:
:param expected:
:type expected:
:return:
:rtype:
"""
print(data1)
print(data2)
re... | 15,428 |
def test_irr3():
"""
Test irr on unique distribution.
"""
d = bivariates['cat']
red = i_rr(d, ((0,), (1,)), (2,))
assert red == pytest.approx(0) | 15,429 |
def _process_caption_jieba(caption):
"""Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = []
tokenized_caption.extend(jieba.cut(caption, cut_all=False))
re... | 15,430 |
def create_connection(graph, node1, node2, linktype, propertydict=None, allow_dup=False):
"""
:param graph:
:param node1:
:param node2:
:param linktype:
:param propertydict:
:return:
"""
data = {}
data["graph"] = graph
data["node1"] = node1
data["node2"] = node2
dat... | 15,431 |
def get_nn(config):
"""
Args:
config: Path to the confi file generated during training
Returns:
Model instance
"""
# Loads the model configurations from config file
# generated during training
with open(config, 'r') as f:
C = json.load(f)
C = Struct(**C)
... | 15,432 |
def mandlebrot(ssize, screen, clock, Xs,Xe,Ys,Ye):
"""
would using numpy improve performance and/or precision ?
"""
screen_x, screen_y = ssize
print("start mandlebrot")
for Py in range(screen_y):
for Px in range(screen_x):
x0 = scaled_x(Px, screen_x, Xs, Xe)
... | 15,433 |
def test_breadth_traversal_single(single_tree):
"""test breadth-first traversal"""
order = []
single_tree.breadth_first_traversal_op(lambda n: order.append(n.val))
assert order == [1] | 15,434 |
def get_functor(value: Any) -> Union[Functor, FunctorIter, FunctorDict]:
"""
Returns a base functor instance with a value property set to 'value'
of the class for either dictionary, other iterable or uniterable type,
and, where passed, a const property set to the constructor of 'value'.
>>> f = get... | 15,435 |
def save_hdf_dataset(ds, fname, verbose=True):
"""
Save VoigtFit.dataset to a HDF5 file.
The function maps the internal data to a HDF5 data model.
"""
if splitext(fname)[1] == '.hdf5':
pass
else:
fname += '.hdf5'
with h5py.File(fname, 'w') as hdf:
# set main attrib... | 15,436 |
def main():
"""
This code, which must run on a LAPTOP:
1. Constructs a GUI for my part of the Capstone Project.
2. Communicates via MQTT with the code that runs on the EV3 robot.
"""
# -------------------------------------------------------------------------
# Construct and connect the M... | 15,437 |
def split_bibtexs_by_bib_style(bibtexs):
"""
Args:
bibtexs (list of Queryset of Bibtex):
Returns:
list of tuple: (Style Key, Display Name, Bibtex List)
"""
# Get STYLE KYES
bibtex_backet = dict()
choices = expand_book_style_tuple(Book.STYLE_CHOICES) + list(
Bibtex... | 15,438 |
def apply_gates(date, plate, gates_df, subpopulations=False, correlation=None):
""" Constructs dataframe with channels relevant to receptor quantification. """
if date == "5-16":
receptors = ['CD127']
channels = ['BL1-H']
else:
receptors = ['CD25', 'CD122', 'CD132']
channels ... | 15,439 |
def _check_config(config):
"""Check the configuration is robust.
Args:
config (dict): the configuration in dict.
Raises:
AssertionError: if the configuration violates some of rules.
"""
# Check Dataset definition
if "pretrain data setting" in config:
data_loader = config["pretrain data ... | 15,440 |
def action_interaction_exponential_reward_function(
context: np.ndarray,
action_context: np.ndarray,
action: np.ndarray,
base_reward_function: Callable[[np.ndarray, np.ndarray], np.ndarray],
action_interaction_weight_matrix: np.ndarray,
reward_type: str,
random_state: Optional[int] = None,
... | 15,441 |
def deserialize_once_dates(dates):
"""
Deserializes the dates as expected within a once dates object.
:param dates: The dates object
:return: A 2-tuple containing all the deserialized date parameters
"""
return (
du_parser.parse(dates[RULE_ONCE_S_TIME]),
du_parser.parse(dates[RUL... | 15,442 |
def gen_CDR_MitEx(
device_backend: Backend,
simulator_backend: Backend,
n_non_cliffords: int,
n_pairs: int,
total_state_circuits: int,
**kwargs
) -> MitEx:
"""
Produces a MitEx object for applying Clifford Circuit Learning & Clifford Data Regression
mitigation methods when calculatin... | 15,443 |
def lambda_sum_largest_canon(expr, args):
"""
S_k(X) denotes lambda_sum_largest(X, k)
t >= k S_k(X - Z) + trace(Z), Z is PSD
implies
t >= ks + trace(Z)
Z is PSD
sI >= X - Z (PSD sense)
which implies
t >= ks + trace(Z) >= S_k(sI + Z) >= S_k(X)
We use the fact that
S_k(X) = sup... | 15,444 |
def test_create_deployment(name, simple_box):
"""
Test that we can create a deployment with the right name
"""
deployment = Deployment(name, simple_box)
assert deployment.name == name | 15,445 |
def print_g(msg, term=True, destination=default_log):
"""
Write msg to stdout and to file.
Parameters
----------
msg: str
The text.
term: bool
If true, write to stdout.
destination: str
Path of destination file.
Returns
-------
No return value.
"... | 15,446 |
def compare_bib_dict(item1, item2):
""" compare bibtex item1 and item 2 in dictionary form """
# unique id check
col_list = ["doi", "pmid", "pmcid", "title", "local-url"]
for c in col_list:
if (item1.get(c, "1") != '') and (item1.get(c, "1") == item2.get(c, "2")):
return 1.0
s... | 15,447 |
def find_residues_lsfd(poles, H, fs):
"""Find residues from poles and FRF estimates
Estimate the (in band) residue matrices from poles and FRF's by
the Least Squares Frequency Domain Algorithm (LSFD).
A residue matrix is the outer product of the mode vector
and the modal participation factor. The ... | 15,448 |
def create_regularly_sampled_time_points(interval: pendulum.Duration, start_time_point: pendulum.DateTime, count: int):
"""
Create a sequence of `count` time points starting at `start_time_point`, `interval` apart.
Args:
interval: The time interval between each point.
start_time_point: The ... | 15,449 |
def create_model(bert_config, is_training, input_ids_list, input_mask_list,
segment_ids_list, use_one_hot_embeddings):
"""Creates a classification model."""
all_logits = []
input_ids_shape = modeling.get_shape_list(input_ids_list, expected_rank=2)
batch_size = input_ids_shape[0]
seq_length = ... | 15,450 |
def _remove_redundant_quantize_ops_per_subgraph(model, subgraph_index,
signature_index):
"""Remove redundant quantize ops per subgraph."""
subgraph = model.subgraphs[subgraph_index]
tensors = subgraph.tensors
operators = subgraph.operators
# Find all quantize o... | 15,451 |
def hmmsearch(genome_id, species_id, marker_genes_hmm, num_threads=1):
""" Performance HMM search using prokka annotated protein sequences """
input_annotations = destpath(get_uhgg_layout(species_id, "faa", genome_id)["annotation_file"])
annotated_genes = download_reference(input_annotations)
hmmsearc... | 15,452 |
def where(condition: numpy.typing.ArrayLike, *args: PolyLike) -> ndpoly:
"""
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be... | 15,453 |
def download_google_file(google_file, folder = "./"):
"""
Do a Google image search limited to pixabay.com and get the download file
using these instructions:
https://github.com/fastai/course-v3/blob/master/nbs/dl1/lesson2-download.ipynb
Then use this script to grab the higher res photos.
"""
f = open(google... | 15,454 |
async def async_attach_trigger(
hass, config, action, automation_info, *, platform_type="event"
):
"""Listen for events based on configuration."""
event_types = config.get(CONF_EVENT_TYPE)
removes = []
event_data_schema = None
if config.get(CONF_EVENT_DATA):
event_data_schema = vol.Sche... | 15,455 |
def histogram_filter(x, lb=0, ub=1):
"""Truncates the tail of samples for better visualisation.
Parameters
----------
x : array-like
One-dimensional numeric arrays.
lb : float in [0, 1], optional
Defines the lower bound quantile
ub : float in [0, 1], optional
... | 15,456 |
def create_tweet(food_name):
"""Create the text of the tweet you want to send."""
r = requests.get(food2fork_url, params={"q": food_name, "key": F2F_KEY})
try:
r_json = r.json()
except Exception as e:
return "No recipe found. #sadpanda"
# fetch top-ranked recipe
recipe = r_json["... | 15,457 |
def sublist(lst1: List[T1], lst2: List[T1]) -> bool:
"""
Check `lst1` is sublist of `lst2`.
Parameters
----------
lst1 : List[T1]
List 1.
lst2 : List[T1]
List 2.
Returns
-------
bool
`True` if `lst1` is sublist of `lst2`.
Examples
--------
>>> s... | 15,458 |
async def _sync_friends():
"""Get the actual set of friends to match the expected set of friends"""
global _SYNCED, _WATCHES
expected = set([str(k, "utf8").lower() for k in _WATCHES.keys()])
while True:
actual = await _friends()
if actual is None:
raise eqcmd.CommandError("Fa... | 15,459 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradien... | 15,460 |
def settings_check(settings: Settings):
"""Checks if all of the necessary keys are present in a settings dictionary.
:param settings:
:raise ValueError: if one of the settings is not present
"""
if settings['h'] is None:
raise ValueError("The settings file doesn't specify the height of the ... | 15,461 |
def save_masks(model, it, config):
""" For self-training。 将训练得到的模型再应用于训练集,得到新一轮的训练标签(masks),并保存下来。 """
for subset in ['train', 'val']:
if it == 0:
dataset = VertebralDataset()
dataset.load_vertebral(args.dataset[:-4], subset)
dataset.prepare()
else:
... | 15,462 |
def combine_predictions(indices1: NpArray, confs1: NpArray, indices2: NpArray,
confs2: NpArray) -> Tuple[NpArray, NpArray]:
""" Joins two predictions, returns sorted top-3 results in every row """
dprint(indices1.shape)
dprint(indices2.shape)
assert indices1.shape == indices2.sha... | 15,463 |
def get_speed_limit(center, rad, speed_limit):
"""
Retrieves the speed limit of the intersection circle
:param center: center coordinate point of the intersection circle
:param rad: radius of the intersection circle
:param speed_limit: speed limit of the intersection
:type center: Coordinates
... | 15,464 |
def process(utim, data):
"""
Run process
"""
res = None
try:
crypto = CryptoLayer(utim.get_session_key())
logging.debug('Signing message {0} with key {1}'
.format(data[SubprocessorIndex.body.value], utim.get_session_key()))
res = crypto.sign(CryptoLayer... | 15,465 |
def tika_content(string, file_name,
serverEndPoint=u'http://' + params.TIKA_HOST + ':9998'):
"""
converts the binary file to string via Apache Tika interface
return converted string
"""
def convert_to_pdf(file_path):
"""
convert the file_path file to pdf via libreoff... | 15,466 |
def db_eval(techniques,sequences,inputdir=cfg.PATH.SEGMENTATION_DIR,metrics=None):
""" Perform per-frame sequence evaluation.
Arguments:
techniques (string,list): name(s) of the method to be evaluated.
sequences (string,list): name(s) of the sequence to be evaluated.
inputdir (string): path to the technique... | 15,467 |
def scale_constraint(source_obj, target_obj, maintain_offset=True):
"""
create scale constraint.
:param source_obj:
:param target_obj:
:param maintain_offset:
:return:
"""
return cmds.scaleConstraint(source_obj, target_obj, mo=maintain_offset)[0] | 15,468 |
def freq2note(freq):
"""Convert frequency in Hz to nearest note name.
Parameters
----------
freq : float [0, 23000[
input frequency, in Hz
Returns
-------
str
name of the nearest note
Example
-------
>>> aubio.freq2note(440)
'A4'
>>> aubio.freq2note(220... | 15,469 |
def read_partpositions(filename, nspec, ctable=True, clevel=5, cname="lz4", quantize=None):
"""Read the particle positions in `filename`.
This function strives to use as less memory as possible; for this, a
bcolz ctable container is used for holding the data. Besides to be compressed
in-memory, its ch... | 15,470 |
def _run_cli(*args):
"""Run the jinjafy cli command from the tests/ directory, passing in the provided arguments"""
return subprocess.run(
['jinjafy', *args],
# Execute in the same directory as this test file
cwd=path.dirname(__file__),
# stdout as text
encoding='utf-8',
... | 15,471 |
def SQLHistPlotDivGrp(gGroups,cPlotVar,cPlotVarY,divElement='Canal_Number',pName='',pXlabel='',pYlabel='',bins=20,sqlAdd='',drawGraph=True,myalph=1,xnorm=0,ynorm=0,stdVal='',imgDPI=72,useEbar=False,plotEbar=False,showLegend=True,fixVals=None,logx=False,logy=False,isInt=False):
"""SQLHistPlotGrp(gGroups,cPlotVar,cPl... | 15,472 |
def test40_subdict_simplified_err():
"""
Check the function parsing a PII_TASKS list of simplified tasks with errors
"""
# Not a tuple
PII_TASKS = [r"\d16"]
with pytest.raises(mod.InvPiiTask):
mod.build_subdict(PII_TASKS, "fr")
# A tuple plus not a tuple
PII_TASKS = [(PiiEnum.CR... | 15,473 |
def license_list(ctx):
"""Show all license within the VSD"""
from datetime import datetime
result = ctx.obj['nc'].get("licenses")
table = PrettyTable(["License id",
"is Cluster",
"Compagny",
"Max NICs",
"... | 15,474 |
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7... | 15,475 |
def configure_l3(conf, tunnel_mode):
"""
This function creates a temporary test bridge and adds an L3 tunnel.
"""
s = util.start_local_server(conf[1][1])
server = util.rpc_client("127.0.0.1", conf[1][1])
server.create_bridge(DEFAULT_TEST_BRIDGE)
server.add_port_to_bridge(DEFAULT_TEST_BRIDGE,... | 15,476 |
def gradient_descent(y, tx, initial_w, gamma, max_iters):
"""Gradient descent algorithm."""
threshold = 1e-3 # determines convergence. To be tuned
# Define parameters to store w and loss
ws = [initial_w]
losses = []
w = initial_w
method = 'mse'
for n_iter in range(max_iters):
c... | 15,477 |
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
# 初始化一个centroids矩阵,用于存储npoint个采样点的索引位置,大小为B×npoint
# ... | 15,478 |
def eval_lstm_crf():
""" eval lstm """
print('\neval.py config: \n', config)
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_id=config.device_id,
device_target=config.device_target
)
embeddings_size = config.embed_size
parser = ImdbPa... | 15,479 |
def loadData(fname='Unstra.out2.00008.athdf'):
"""load 3d bfield and calc the current density"""
#data=ath.athdf(fname,quantities=['B1','B2','B3'])
time,data=ath.athdf(fname,quantities=['Bcc1'])
bx = data['Bcc1']
time,data=ath.athdf(fname,quantities=['Bcc2'])
by = data['Bcc2']
time,data=ath.athdf(fname,qu... | 15,480 |
def sparse_chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
"""Given the true sparsely encoded tag sequence y, input x (with mask),
transition energies U, boundary energies b_start and b_end, it computes
the loss function of a Linear Chain Conditional Random Field:
loss(y, x) = NLL(P(y|x)),... | 15,481 |
def _get_de43_fields(de43_field):
"""
get pds 43 field breakdown
:param de43_field: data of pds 43
:return: dictionary of pds 43 sub elements
"""
LOGGER.debug("de43_field=%s", de43_field)
de43_regex = (
r"(?P<DE43_NAME>.+?) *\\(?P<DE43_ADDRESS>.+?) *\\(?P<DE43_SUBURB>.+?) *\\"
... | 15,482 |
async def query(database: Database, payload: PostionQueryIn):
""" Find whether a point is within a country """
query = select([countries.c.name, countries.c.iso2, countries.c.iso3])
# Convert a GeoPoint into a format that can be used in postgis queries
point = f"POINT({payload.location.longitude} {paylo... | 15,483 |
def calc_header_zeropoint(im, ext=0):
"""
Determine AB zeropoint from image header
Parameters
----------
im : `~astropy.io.fits.HDUList` or
Image object or header.
Returns
-------
ZP : float
AB zeropoint
"""
from . import model
scale_exptime = 1.
if i... | 15,484 |
def _convert_min_sec_to_sec(val):
"""
:param val: val is a string in format 'XmYsZ' like '0m5s3' meaning at secong 5,3
:return:
>>> _convert_min_sec_to_sec('10m11s2')
611.2
"""
_min = val.split('m')[0]
_sec = val.split('m')[1].split('s')[0]
_dsec = val.split('s')[1]
if len(_dse... | 15,485 |
def sitetester_home():
"""
Home screen for Tester:
A Tester can:
a. Change their testing site
b. View apspointments for the site they work at
c. Create an appointment for their testing site
d. View aggregate test results
e. View daily test resu... | 15,486 |
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
dirty = u'Yeah right <sarcasm/>'
clean = u'Yeah right <sarcasm/>'
eq_cleaning_for_frag_and_doc(clean, dirty) | 15,487 |
def datetime_to_fractional_year(input: datetime) -> float:
"""Converts a Python datetime object to a fractional year."""
start = date(input.year, 1, 1).toordinal() # type: ignore
year_length = date(input.year + 1, 1, 1).toordinal() - start # type: ignore
return input.year + (input.toordinal() - start)... | 15,488 |
def _optical_flow_to_rgb(
flow: tf.Tensor,
saturate_magnitude: float = -1.0,
name: Optional[str] = None,
) -> tf.Tensor:
"""Visualize an optical flow field in RGB colorspace."""
name = name or 'OpticalFlowToRGB'
hsv = _optical_flow_to_hsv(flow, saturate_magnitude, name)
return tf.image.hsv_to_rgb(hs... | 15,489 |
def shuffle_dict(dict_1, dict_2, num_shuffles=10):
"""
Shuffles num_shuffles times
for two dictionaries that you want to compare against each other, shuffles them.
returns two di
"""
shuffled_dict_1 = {}
shuffled_dict_2 = []
for x in range(num_shuffles):
for dataset_na... | 15,490 |
def clear_dd2_selection(val, n_clicks):
"""Clear Dropdown selections for Dropdown #2 (dd2)
( Dropdown to clear #2 of 2 )
Args:
val (str): cascading response via `clear_dd2_selection()` callback
n_clicks: int
Returns:
str: Resets selections to default, blank states.
"""
... | 15,491 |
def set_lframe(pdict):
"""
Defines reference frame per residue using backbone atoms.
z is normalized vector between Cb and Ca.
x is perpendicular to that and Ca-N vector.
y is perpendicular to the z-x plane.
"""
# local frame
z = pdict['Cb'] - pdict['Ca']
z /= np.linalg.norm(z, axis... | 15,492 |
def test_interpretation_02(reqid, expected_result):
"""
Action : Test mocking interpretation.
Expected Results : No difference from normal application usage.
Returns: N/A.
"""
json_parser = LoadAndParse()
json_parser.data = {"services": [{"title": "ECU Reset", "id": "11"},
... | 15,493 |
def test_ggn_implementation(problem):
"""Compare diagonal of full GGN with diagonal of block GGN."""
problem.set_up()
diag_ggn_from_full = AutogradExtensions(problem).diag_ggn_via_ggn()
diag_ggn_from_block = AutogradExtensions(problem).diag_ggn()
check_sizes_and_values(diag_ggn_from_full, diag_ggn... | 15,494 |
def get_SHF_L_min_C():
""":return: 冷房負荷最小顕熱比率 (-)"""
return 0.4 | 15,495 |
def make_workflow_from_user_options():
"""Parser/validator for the cmd line args."""
parser = get_parser()
if len(sys.argv) < 2:
print('Too few arguments!')
parser.print_help()
parser.exit(1)
# parsing
try:
user_args = parser.parse_args()
except:
parser... | 15,496 |
def get_path(obj: Union[str, pathlib.Path]) -> pathlib.Path:
"""Convert a str into a fully resolved & expanded Path object.
Args:
obj: obj to convert into expanded and resolved absolute Path obj
"""
return pathlib.Path(obj).expanduser().resolve() | 15,497 |
def verify_policy_type_id(policy_type_id):
"""
:type policy_type_id: str
:param policy_type_id: policy type id - e.g. storage-policy-00000001
:rtype: int
:return: Fixed policy type ID
:raises: ValueError: policy type id
"""
if not re.match("storage-policy-\d+", policy_type_id):
... | 15,498 |
def f_fg_iou(results):
"""Calculates foreground IOU score.
Args:
a: list of [T, H, W] or [H, W], binary mask
b: list of [T, H, W] or [H, W], binary mask
Returns:
fg_iou: [B]
"""
y_out = results['y_out']
y_gt = results['y_gt']
num_ex = len(y_gt)
fg_iou = np.zeros... | 15,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.