content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def round_time(t, to=timedelta(seconds=1)):
""" cftime will introduces noise when decoding values into date objects.
This rounds time in the date object to the nearest second, assuming the init time
is at most 1 sec away from a round minute. This is used when merging datasets so
their time dims match up... | dcc7d0caa4e4787f710a386968d8967661e662ca | 3,647,765 |
def decide_end(match_list, return_whole_match_object = False):
"""
Among all the match objects, return the march string the closest to the end of the text
Return : a string. If return_whole_match_object is True, return a match object
"""
if len(match_list) == 0:
return pd.NA
ends = ... | 72e9a4f63c9c7b95e5728b798bc1cd508d1911e6 | 3,647,766 |
def get_level_refactorings_count(level: int, dataset: str = "") -> str:
"""
Get the count of all refactorings for the given level
Parameter:
level (int): get the refactoring instances for this level
dataset (str) (optional): filter for these specific projects
"""
retu... | 8150537a35161541d7eb4b483d06ef8096611d37 | 3,647,767 |
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `t... | 31ae6e02bd23c56049a4f8e5ea9f36e5b6186678 | 3,647,768 |
def ifte(s, g_cond, g_true, g_false):
"""goal that succeeds if g_cond and g_true succeed or g_cond fails and g_false succeeds"""
def loop(s_inf=g_cond(s)):
try:
first_cond = next(s_inf)
except StopIteration:
yield from g_false(s)
return
except Suspend... | 899ed78b53e056804e9515e2f01125831ae0dfba | 3,647,769 |
def filter_by_continue_threshold_variance_threshold(peak_info, acc, cont_win_size=3, cont_thres=4, var_thres=0.001):
"""
Calculate the continuity by a given window length, then calculate the variance and filter the data by
a given threshold
:param peak_info: a 5D matrix
:param cont_win_size: continu... | 7cdbe81b8c0931d315a9d928b6a32105e6da56fb | 3,647,770 |
from datetime import datetime
def send_update(*args: str) -> bool:
""" Updates the path endpoint to contain the current UTC timestamp """
assert args, "Firebase path cannot be empty"
endpoint = args[-1]
value = {endpoint: datetime.utcnow().isoformat()}
return send_message(value, *args[:-1]) | b9b9b7a277bc2a0ffd9ae0c4d658eb5f3d017d20 | 3,647,771 |
def execute_custom(datatype, runtype, driver, data_repository, step_list):
"""
Execute a custom testcase
"""
print_info("{0} {1}".format(datatype, runtype))
tc_status = False
if data_repository.has_key("suite_exectype") and \
data_repository["suite_exectype"].upper() == "ITERATIVE":
... | 884ab4ff7f66f1ad969b03ec406513b301739169 | 3,647,772 |
def parseSolFile(filename):
"""Parses SOL file and extract soil profiles."""
data = {}
profile = None
lat = None
lon = None
with open(filename) as fin:
for line in fin:
if line.startswith("*"):
if profile is not None:
data[(lat, lon)] = "{0... | 7c3876f1e4899eff5b0036045df4348903a11306 | 3,647,773 |
import collections
def get_deps_info(projects, configs):
"""Calculates dependency information (forward and backwards) given configs."""
deps = {p: configs[p].get('deps', {}) for p in projects}
# Figure out the backwards version of the deps graph. This allows us to figure
# out which projects we need to test ... | 10215dfb623b8ebaaabdb2d1bcffd876d37f9f66 | 3,647,774 |
def write_cflags():
"""Adds C-Flags. C++ version is defined at the beginning of this file"""
text = f"""CFLAGS = ${{TF_CFLAGS}} ${{OMP_CFLAGS}} -fPIC -O2 -std={CPPVERSION}
LDFLAGS = -shared ${{TF_LFLAGS}}
"""
text += write_cflags_cuda()
return text | 1348c70b5bdbe168760dba677f9bcc4507957510 | 3,647,775 |
def get_coverage(inputs):
"""Get edge coverage.
Returns:
A dictionary of inputs and corresponding coverage
"""
cov_dict = dict()
for test_input in inputs:
"Get coverage by running the program"
cov = coverage(input)
"Update coverage dictionary of test input"
cov_dict[test_input] = cov
ret... | 5a80399b7877d968654e8c6fc069ff0f70d10a62 | 3,647,777 |
from operator import add
def average(arr, mode = "mixed"):
"""
average(arr, mode) takes the average of a given array
Once again, the modes of add() can be used here to denote what the type of the array is
The function below, determine_mode(arr) can be used to determine the correct mode for your array
... | 74d0b836e6877d1f7d23b69a191e653bcffd6f00 | 3,647,779 |
def non_halting(p):
"""Return a non-halting part of parser `p` or `None`."""
return left_recursive(p) or non_halting_many(p) | d9d8b87cad15c5416041c40396bd3e51b0c28051 | 3,647,780 |
def _isValidWord(word):
"""Determine whether a word is valid. A valid word is a valid english
non-stop word."""
if word in _englishStopWords:
return False
elif word in _englishWords:
return True
elif wordnet.synsets(word):
return True
else:
return False | aa0dd1ceecc807b3aa6ecf740d5ec547bf748e7c | 3,647,781 |
def compare_floats(value1: float, value2: float):
"""Função que compara 2 floats"""
return True if abs(value1 - value2) <= 10**-6 else False | 225a8fd4d472fe630efe32c506cb1ac3f7ff4b5f | 3,647,782 |
from cuml.utils.import_utils import has_treelite, has_xgboost
import treelite
import treelite.runtime
import xgboost as xgb
def _build_treelite_classifier(m, data, arg={}):
"""Setup function for treelite classification benchmarking"""
if has_treelite():
else:
raise ImportError("No treelite package... | 095d9748988d55d2b578c0cb74fc4a662aa660c3 | 3,647,784 |
def _pkq(pk):
"""
Returns a query based on pk.
Note that these are designed to integrate with cells and how they are saved in the database
:Parameters:
----------------
pk : list
list of primary keys
:Returns:
-------
dict
mongo query filtering fo... | d17527132c26c7e3504471f8456baccea295c71e | 3,647,785 |
import torch
def inspect_decode_labels(pred, num_images=1, num_classes=NUM_CLASSES,
inspect_split=[0.9, 0.8, 0.7, 0.5, 0.0], inspect_ratio=[1.0, 0.8, 0.6, 0.3]):
"""Decode batch of segmentation masks accroding to the prediction probability.
Args:
pred: result of inference.
num_images... | d8ee386e2088428b7bfe5579cc5558cf4d6890f1 | 3,647,786 |
from typing import Dict
from typing import Union
def set_default_values(
**attributes: Dict[str, Union[float, int, str]],
) -> Dict[str, Union[float, int, str]]:
"""Set the default value of various parameters.
:param attributes: the attribute dict for the electronic filter being calculated.
:return: ... | 3c8871706446b2bd0aec1879b06e443a57898a96 | 3,647,787 |
import inspect
def validate_function(fn: FunctionType, config: Configuration, module_type: ModuleType) -> FunctionValidationResult:
"""Validates the docstring of a function against its signature.
Args:
fn (FunctionType): The function to validate.
config (Configuration): The configuration to u... | cc9c858f8ade844b89d944dc149c0233ed5741e7 | 3,647,788 |
def say(l, b, i):
"""
!d Repeat a word or phrase
!a <message...>
!r moderator
"""
try:
print 'Saying the phrase:', ' '.join(i.args)
b.l_say(' '.join(i.args), i, 1)
return True
except TypeError:
return False | 260867612cd468babd42654c6d823649cbc73d41 | 3,647,790 |
import re
def rSanderSelect(dbItem,index=0,interactive=False):
"""
rSanderSelect(dbItem,index=0,interactive=False)
select which rSander henry data to use in dbItem
Parameters:
dbItem, db[key] dictionary object with keys = ['hbpSIP','hbpSIPL',
'hbpSI_index']
... | 54e6a79a2095810e10032c2da59972e89ca186eb | 3,647,791 |
def dataset_w_pedigree_field():
"""
:return: Return model Dataset example with `pedigree_field` defined.
"""
search_pattern = SearchPattern(left="*/*/*_R1.fastq.gz", right="*/*/*_R2.fastq.gz")
dataset = DataSet(
sheet_file="sheet.tsv",
sheet_type="germline_variants",
search_p... | 2fce0d1391e234a7bb4f2a0bcab5ba24fc27abe0 | 3,647,792 |
import requests
def get_new_access_token(client_id, client_secret, refresh_token):
"""Use long-lived refresh token to get short-lived access token."""
response = requests.post(
'https://www.googleapis.com/oauth2/v4/token',
data={
'client_id': client_id,
'client_secret'... | a8f79511f8f0078121cf291752c2b315023df6de | 3,647,793 |
def prettify_seconds(seconds):
"""
Prettifies seconds.
Takes number of seconds (int) as input and returns a prettified string.
Example:
>>> prettify_seconds(342543)
'3 days, 23 hours, 9 minutes and 3 seconds'
"""
if seconds < 0:
raise ValueError("negative input not allowed")
... | 4b77f9ed3d2085895ef15c6be30b7bfe83d1f49d | 3,647,794 |
import re
def get_regions_prodigal(fn):
"""Parse prodigal output"""
regions = {}
with open(fn, 'r') as f:
for line in f:
if line[:12] == '# Model Data':
continue
if line[:15] == '# Sequence Data':
m = re.search('seqhdr="(\S+)"', line)
... | d69f7b6d9dfc6802ad4dab3472f90a2d68b95bdd | 3,647,795 |
from typing import Optional
def get_transform(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transform_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransformResult:
"""
Use this dat... | 533ff2c95303c25b0a9741c36b34a755e18948e5 | 3,647,796 |
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1... | e6f4d8ceaa09fe71657e7936db886c3eabfb7aa0 | 3,647,797 |
def get_step_type_udfs(
step_type: str,
workflow: str,
adapter: ArnoldAdapter = Depends(get_arnold_adapter),
):
"""Get available artifact udfs for a step type"""
artifact_udfs = find_step_type_artifact_udfs(
adapter=adapter, step_type=step_type, workflow=workflow
)
process_udfs = fi... | f3ad3ad96d3f33e343afbb2ffcfa176fd4c6e654 | 3,647,798 |
def decode_base58(s: str) -> bytes:
"""
Decode base58.
:param s: base58 encoded string
:return: decoded data
"""
num = 0
for c in s:
if c not in BASE58_ALPHABET:
raise ValueError(
"character {} is not valid base58 character".format(c)
)
... | ee56c73e4fd22f25cd0caf63651abc13a4ba147d | 3,647,799 |
import random
def ports_info(ptfadapter, duthost, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
... | 14aef7e68386872a1d960329f2f8bee452aa9e29 | 3,647,800 |
def test_text_single_line_of_text(region, projection):
"""
Place a single line text of text at some x, y location.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="This is a line of text",
)
return fig | 0e82165a2717fe9279015d3823b717a870b94e05 | 3,647,801 |
def safely_get_form(request, domain, instance_id):
"""Fetches a form and verifies that the user can access it."""
form = get_form_or_404(domain, instance_id)
if not can_edit_form_location(domain, request.couch_user, form):
raise location_restricted_exception(request)
return form | b3ba8da253a6455f5aeb65f828f8c28c826ac2d2 | 3,647,802 |
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary... | 9c0da64f5796f57d474822121e1af5ca8ebb25e2 | 3,647,803 |
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = ... | d346fb75f5ff872147a166948af65bb52bab739c | 3,647,804 |
import torch
def calculate_regularization_term(means, n_objects, norm):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
reg_term = 0.0
for i in range(bs):
if n_objects[i]:
_mean_sample = means[i, : n_objects[i], :] # n_objects, n_filters
... | b6eb43a8915449c7e86d01a08b3ea2e77ae51064 | 3,647,805 |
def mode(x):
""" Find most frequent element in array.
Args:
x (List or Array)
Returns:
Input array element type: Most frequent element
"""
vals, counts = np.unique(x, return_counts=True)
return vals[np.argmax(counts)] | b73bf301ca9ebf45f3a6698f8b6d45a5640cb301 | 3,647,807 |
def has_path(matrix, path: str) -> bool:
"""
Given a matrix, make sure there is a path for a given string or not.
Parameters
----------
path: str
A given path, like "abcd"
Returns
-------
out: bool
Whether the given path can be found in the matrix
"""
if not pa... | bbde72992b762dd73c44c60da675da829255000d | 3,647,808 |
def gensim_processing(data):
"""
Here we use gensim to define bi-grams and tri-grams which enable us to create a create a dictonary and corpus
We then process the data by calling the process_words function from our utils folder
"""
#build the models first
bigram = gensim.models.Phrases(data, mi... | 67a4d9a90c8ea9809980d9871b769288915fe3cc | 3,647,809 |
def _distances(value_domain, distance_metric, n_v):
"""Distances of the different possible values.
Parameters
----------
value_domain : array_like, with shape (V,)
Possible values V the units can take.
If the level of measurement is not nominal, it must be ordered.
distance_metric : ... | 90c362db28497569a50475d7f6040755b1cfffea | 3,647,812 |
import torch
import math
def log_mvn_likelihood(mean: torch.FloatTensor, covariance: torch.FloatTensor, observation: torch.FloatTensor) -> torch.FloatTensor:
"""
all torch primitives
all non-diagonal elements of covariance matrix are assumed to be zero
"""
k = mean.shape[0]
variances = covaria... | 6333ea91ddff9ac685f18954c5b7344846810ec3 | 3,647,813 |
def M_Mobs(H0, M_obs):
"""
Given an observed absolute magnitude, returns absolute magnitude
"""
return M_obs + 5.*np.log10(H0/100.) | e7f817eaf281f2dd64f33ea4af44cd1cf9da31fa | 3,647,814 |
def generate_proctoring_requirements_email_context(user, course_id):
"""
Constructs a dictionary for use in proctoring requirements email context
Arguments:
user: Currently logged-in user
course_id: ID of the proctoring-enabled course the user is enrolled in
"""
course_module = modu... | fc594882b68b7f1f554fa1681943d49b722ae229 | 3,647,815 |
import random
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s) | 0ba9dd533da44bc2051a7076b775177f29f4aaa6 | 3,647,816 |
def get_one_hot(inputs, num_classes):
"""Get one hot tensor.
Parameters
----------
inputs: 3d numpy array (a x b x 1)
Input array.
num_classes: integer
Number of classes.
Returns
-------
One hot tensor.
3d numpy array (a x b x n).
"""
onehots = ... | 2f4a8b3a60a90a8f81579dd5938a1bab91cb5537 | 3,647,817 |
def one_hot_encoder(batch_inds, num_categories):
"""Applies one-hot encoding from jax.nn."""
one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)
return one_hots | 85c15859555ee1bdec64adc627f34cc161c7e66c | 3,647,818 |
def part1(entries: defaultdict) -> int:
"""part1 solver take the entries and return the part1 solution"""
return calculate(entries, 80) | a35a559395f0c53eeac4600aaa28bc04d3e1766f | 3,647,819 |
def ceki_filter(data, bound):
""" Check if convergence checks ceki are within bounds"""
ceki = data["ceki"].abs() < bound
return ceki | 09cd53f44241b13cf77eb2299c802ed238580259 | 3,647,820 |
def get_middleware(folder, request_name, middlewares=None):
""" Gets the middleware for the given folder + request """
middlewares = middlewares or MW
if folder:
middleware = middlewares[folder.META.folder_name + "_" + request_name]
else:
middleware = middlewares[request_name]
if mi... | 720aafa5a3d0ef265eeaa8fe40a68c7024b0adc3 | 3,647,821 |
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a | 8337cbef8459a1403fc6a681f89c14d6ae3a00a5 | 3,647,823 |
import torch
def accuracy(output, target, topk=(1,), output_has_class_ids=False):
"""Computes the accuracy over the k top predictions for the specified values of k"""
if not output_has_class_ids:
output = torch.Tensor(output)
else:
output = torch.LongTensor(output)
target = torch.LongT... | f702000a64db1bb6f53b7686f1143656f9864e8d | 3,647,824 |
def masked_residual_block(c, k, nonlinearity, init, scope):
"""
Residual Block for PixelCNN. See https://arxiv.org/abs/1601.06759
"""
with tf.variable_scope(scope):
n_ch = c.get_shape()[3].value
half_ch = n_ch // 2
c1 = nonlinearity(c)
c1 = conv(c1, k=1, out_ch=half_ch, ... | ffd4bb042affc0250472d50b6b824be66f808878 | 3,647,825 |
def calculate_lookup(src_cdf: np.ndarray, ref_cdf: np.ndarray) -> np.ndarray:
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup... | f1433e6af001ddcda44c740dabfb1ee643cd2260 | 3,647,826 |
def measureInTransitAndDiffCentroidForOneImg(prfObj, ccdMod, ccdOut, cube, rin, bbox, rollPhase, flags, hdr=None, plot=False):
"""Measure image centroid of in-transit and difference images
Inputs:
-----------
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD m... | 655477460e5841736f07106d5e6afd666d95f450 | 3,647,827 |
def readGlobalFileWithoutCache(fileStore, jobStoreID):
"""Reads a jobStoreID into a file and returns it, without touching
the cache.
Works around toil issue #1532.
"""
f = fileStore.getLocalTempFile()
fileStore.jobStore.readFile(jobStoreID, f)
return f | 8c784e809acdc1a7fb3d8c108f85ce61bd1ad11c | 3,647,828 |
def get_user_granted_assets_direct(user):
"""Return assets granted of the user directly
:param user: Instance of :class: ``User``
:return: {asset1: {system_user1, system_user2}, asset2: {...}}
"""
assets = {}
asset_permissions_direct = user.asset_permissions.all()
for asset_permission in... | 602bd104835cc85dcf59339c8b4b2e2e2b5f747b | 3,647,829 |
def nullColumns(fileHeaders, allKeys):
"""
Return a set of column names that don't exist in the file.
"""
s1 = set(fileHeaders)
s2 = set(allKeys)
return s2.difference(s1) | 17a0bb80414fe88f213399958b217ccf6fb5d1e9 | 3,647,830 |
def listable_attachment_tags(obj, joiner=" "):
"""
Return an html string containing links for each of the attachments for
input object. Images will be shown as hover images and other attachments will be
shown as paperclip icons.
"""
items = []
attachments = obj.attachment_set.all()
labe... | b2fa3fd249469334e42616f0e4392ce16d4076d1 | 3,647,831 |
import math
def distance_km(lat1, lon1, lat2, lon2):
""" return distance between two points in km using haversine
http://en.wikipedia.org/wiki/Haversine_formula
http://www.platoscave.net/blog/2009/oct/5/calculate-distance-latitude-longitude-python/
Author: Wayne Dyck
"""
ret_val = ... | f50d444b5769b1d00045429e3d577ec22f922774 | 3,647,832 |
def _flip(r, u):
"""Negate `r` if `u` is negated, else identity."""
return ~ r if u.negated else r | 18ddcf5132867f5646c729bdadcb2c5077df8c03 | 3,647,833 |
def get_arguments():
"""Defines command-line arguments, and parses them."""
parser = ArgumentParser()
# Execution mode
parser.add_argument(
"--mode",
"-m",
choices=['train', 'test', 'full'],
default='train',
help=(
"train: performs training and valida... | 5385c75524460ed4968def0ab98fc29112d72434 | 3,647,834 |
def twoThreeMove(tri, angle, face_num, perform = True, return_edge = False):
"""Apply a 2-3 move to a taut triangulation, if possible.
If perform = False, returns if the move is possible.
If perform = True, modifies tri, returns (tri, angle) for the performed move"""
face = tri.triangle(face_num)
... | 18abe14b2b8446d39e285f1facda82568b808b60 | 3,647,835 |
import csv
def obterUFEstadoPorNome(estado):
"""
Retorna o codigo UF do estado a partir do nome do estado
:param estado: Nome do estado
:return codigoDoEstado: Código UF do estado
"""
try:
with open("./recursos/estados.csv", newline="") as csvfile:
reader = csv.DictReader(c... | 9b136fe8c557e5f75bca235cf66168f92244a4e6 | 3,647,836 |
import random
def get_random_byte_string(byte_length):
""" Use this function to generate random byte string
"""
byte_list = []
i = 0
while i < byte_length:
byte_list.append(chr(random.getrandbits(8)))
i = i + 1
# Make into a string
byte_string = ''.join(byte_list)
retur... | 0ea923a045beb476501dc3d8983f3fe89efef008 | 3,647,837 |
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Complexity Analysis:
Best case: O(t)
Worst Case: O(t)
In the best case the pattern is the empty string(''). In that scenario
thi... | 0101efe77570b5d027928495dc25cb4e02d5c2f5 | 3,647,838 |
def is_igb(request):
"""
Checks the headers for IGB headers.
"""
if 'HTTP_EVE_TRUSTED' in request.META:
return True
return False | 1e6485614063a9f4eec36407b60154300d38db76 | 3,647,839 |
from typing import OrderedDict
from re import T
def compile_ADAM_train_function(model, gparams, learning_rate=0.001, b1=0.9, b2=0.999, e=1e-8,
gamma=1 - 1e-8):
"""
ADAM update rules
Default values are taken from [Kingma2014]
References:
[Kingma2014] Kingma, Diederi... | a60f27c3b314d3adc2ec2f7bb0f8c92875d7625b | 3,647,840 |
def linear_svr_pred(X_train, Y_train):
"""
Train a linear model with Support Vector Regression
"""
svr_model = LinearSVR(random_state=RANDOM_STATE)
svr_model.fit(X_train, Y_train)
Y_pred = svr_model.predict(X_train)
return Y_pred | 336325ec53da4d4008c3219aa737365a40263bdf | 3,647,843 |
import math
def area(rad: float = 1.0) -> float:
"""
return area of a circle
>>> area(2.0)
3.141592653589793
>>> area(3.0)
7.0685834705770345
>>> area(4.0)
12.566370614359172
"""
return rad * rad * math.pi / 4 | 702fc4a9fa370804d88d1182f966890bc0634466 | 3,647,844 |
import requests
import json
def check_coverage_running(url, coverage_name):
"""
Check if Navitia coverage is up and running
:param url: Navitia server coverage url
:param coverage_name: the name of the coverage to check
:return: Whether a Navitia coverage is up and running
"""
_log.info("c... | 3d3d9b1403c541aa0cdb8867845b21bf387431fb | 3,647,845 |
import random
def make_random_board(row_count, col_count, density=0.5):
"""create a random chess board with given size and density"""
board = {}
for row_num in range(row_count):
for col_num in range(col_count):
factor = random.random() / density
if factor >= 1:
... | ea40883989675c99aa70af0b180957aa677233a5 | 3,647,846 |
def create_roots(batch_data):
"""
Create root nodes for use in MCTS simulation. Takes as a parameter a list of tuples,
containing data for each game. This data consist of: gametype, state, type of player 1
and type of player 2
"""
root_nodes = []
for data in batch_data:
game = data[0... | d07b0781605b01d08c9ef78f30dad9254ade9907 | 3,647,847 |
def _parse_crs(crs):
"""Parse a coordinate reference system from a variety of representations.
Parameters
----------
crs : {str, dict, int, CRS}
Must be either a rasterio CRS object, a proj-string, rasterio supported
dictionary, WKT string, or EPSG integer.
Returns
-------
... | 559692b146ec99a9fe5407c8bca340c72dddf0a5 | 3,647,848 |
def hs_instance_get_all(context):
"""Get a list of hyperstash instances."""
return IMPL.hs_instance_get_all(context) | e09991f71e3713eea96956306a1ab4813bfb8b1a | 3,647,849 |
import importlib
def import_from_file(module_name: str, filepath: str):
"""
Imports a module from file.
Args:
module_name (str): Assigned to the module's __name__ parameter (does not
influence how the module is named outside of this function)
filepath (str): Path to the .py fil... | 89ac082cbc7d3dd5d9158a8cc8eb5ef061c444e6 | 3,647,850 |
def plot_chirp(stim_inten, spike_bins, smooth=True, ax=None):
"""
Plot the response to a chirp stimulus (but could be any repeated stimulus, non-shuffled).
The response is plotted with seaborn's lineplot.
params:
- stim_inten: The whole stimulus intensity
- spike_bins: The cell's respon... | 75fe6defcb23a2c59e2241c9a68bf753dc6828b7 | 3,647,851 |
def init_context_processor(app):
"""定义html模板方法"""
@app.context_processor
def pjax_processor():
"""
pjax处理器
"""
def get_template(base, pjax=None):
pjax = pjax or 'pjax.html'
if 'X-PJAX' in request.headers:
return pjax
else:... | 6b5cf03ec48a1b1324a158388098da5e4884286f | 3,647,854 |
def tiered(backup_tier, R):
"""Returns a tier aware checker.
The returned checker ensures that it's possible to construct a set
(of length R) including given set s that will contain exactly one
node from the backup tier.
`backup_tier` is a list of node ids that count as backups.
A typical inv... | ecde647738fad88ea806948a0df7bee22a73abfa | 3,647,855 |
def ls_chebyshev( A, b, s_max, s_min, tol = 1e-8, iter_lim = None ):
"""
Chebyshev iteration for linear least squares problems
"""
A = aslinearoperator(A)
m, n = A.shape
d = (s_max*s_max+s_min*s_min)/2.0
c = (s_max*s_max-s_min*s_min)/2.0
theta = (1.0-s_min/s_max)/(1... | 05e50ac0167d1ed03ae3e9fa6876c94a50db7893 | 3,647,856 |
def compute_confusion_matrix(args, df_inference, strata):
"""From a list of prediction summary (as produced by get_cloud_prediction_summary), compute a confusion matrix."""
y_true = df_inference["vt_" + strata].values
y_predicted = df_inference["pred_" + strata].values
y_true = np.vectorize(get_closes... | 0662638c4db5ee9e1d94b1e582d9b0824eefd3ff | 3,647,857 |
def get_metadata(**kwargs):
"""Metadata
Get account metadata
Reference: https://iexcloud.io/docs/api/#metadata
Data Weighting: ``Free``
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
"""
return Metadata(**kwargs).... | 9f4b506bdf978f525e26d7f976a0fdc2f483ae0f | 3,647,858 |
def load_data():
"""
Carrega os dados do dataset iris
:return: dados carregados em uma matriz
"""
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
# utiliza somente as duas primeiras classes
data = data[:100]
# transforma as... | fe2a1a999406f23676e58f75f1d5999e9f0697e8 | 3,647,863 |
import select
from datetime import datetime
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
... | bdd679d94fc68d4c4c75f410d1ed3eec193f868b | 3,647,864 |
def simulate_until_target_substate_or_max_t(
_simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t,
predecessor_node_lists, truth_tables):
"""
Perform simulation to figure whether it reaches target substate.
Does not return states of simulations that don... | 526ef8085dcbe4bcbc112c3bd4626ec5247e2f97 | 3,647,866 |
import requests
from bs4 import BeautifulSoup
def query_snpedia_online(rsid):
"""
@param soup:
@param rsid:
"""
rsid = rsid.capitalize()
url = "https://bots.snpedia.com/index.php"
rsid_url = f"{url}/{rsid}"
page = requests.get(rsid_url)
soup = BeautifulSoup(page.content, "html.... | 138b252917b027564826212cfe96abafef3071b3 | 3,647,867 |
def lower(value: str): # Only one argument.
"""Converts a string into all lowercase"""
return value.lower() | 59da46b7df5a2afdb106703568635b94174ea57c | 3,647,869 |
import pprint
def validate_oidc():
"""Demonstrates how an access token is validated"""
token = request.headers['Authorization'].split(' ')[1]
message = check_oidc_token(token)
pprint.pprint(message)
return jsonify({
'success': message['success']
}) | d76d510d1b53a10e12ac9a5c085c0650bc8fb965 | 3,647,870 |
def merge(a, b, path=None):
"""From https://stackoverflow.com/a/7205107"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
... | 8f7990f28168fe0e3eaca790baddc0088baedf65 | 3,647,871 |
def norm_sq(f,alpha,n,L_mat_long,step):
""" This function is the log-likelihood functional with the squared L2 norm
of \hat{f_\beta} as the regularization term.
"""
L_mat=L_mat_long.reshape(n,len(f))
f[f <=0] = 1e-6
val=np.log(np.dot(L_mat,f))
return -sum(val)/n+ alpha*step**2*sum(f**2) | 11a2b0fbd296b344b94cd3d5509bb0d4a12ab5fc | 3,647,872 |
def get_applications(device_id: str = None, rpc_channel: InstrumentServer = None):
"""
获取手机应用列表
:param device_id:
:param rpc_channel:
:return:
"""
if not rpc_channel:
_rpc_channel = init(device_id)
else:
_rpc_channel = rpc_channel
application_list = _rpc_channel.call(... | 150884e18349003e33011477603e2a6462bd8492 | 3,647,873 |
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks={'time':1},
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
... | ef31c732919f6b3cda0c6e5d9114fac7c39f40f7 | 3,647,874 |
def wls_sparse(X, y, w=1., calc_cov=False, verbose=False, **kwargs):
"""
Parameters
----------
X
y
w
calc_cov
verbose
kwargs
Returns
-------
"""
# The var returned by ln.lsqr is normalized by the variance of the error. To
# obtain the correct variance, it needs... | ff0bec6d6cdcee85506514348e8a812926427dee | 3,647,875 |
from typing import Tuple
def sobel_gradients(source: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes partial derivations to detect angle gradients.
"""
grad_x = generic_filter(source, np.matrix([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]
))
grad_y = generic_filter... | 19c3e3eec46bee738b1e80dd73c5477f72dcf73c | 3,647,877 |
from typing import Mapping
def flat_dict(d, prefix=""):
"""
Loop through dictionary d
Append any key, val pairs to the return list ret
Add the prefix to any key param
Recurse if encountered value is a nested dictionary.
"""
if not isinstance(d, Mapping):
return d
ret = {}
... | f0c1f519126dea89c25ee38a9b0dd788c40d2088 | 3,647,878 |
import logging
def _get_filehandler_with_formatter(logname, formatter=None):
""" Return a logging FileHandler for given logname using a given
logging formatter
:param logname: Name of the file where logs will be stored, ".log"
extension will be added
:param formatter: An instance of logging.Format... | 1cc6f83480e691c4c54c359deabd6364da65f320 | 3,647,879 |
import torch
def gen_data_tensors(
df: pd.DataFrame,
lag: int = 6,
batch_size: int = 32,
validation_ratio: float = 0.2
) -> (DataLoader, DataLoader, TensorDataset, TensorDataset):
"""
Primary goal: create dataloader object.
"""
x_train, y_train = generate_supervised(df, lag=lag)
# ... | 1451d38bd695163d84784f5a6b9b791c3987d56b | 3,647,880 |
import json
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
... | d8890f31ae67abf72cdfbd14dd2af08762131e90 | 3,647,881 |
def element_z(sym_or_name):
"""Convert element symbol or name into a valid element atomic number Z.
Args:
sym_or_name: string type representing an element symbol or name.
Returns:
Integer z that is a valid atomic number matching the symbol or name.
Raises:
ElementZError: if the symb... | b79fec9062539f98ad8c96cdc41a52f7e9c67fd9 | 3,647,882 |
from typing import Tuple
def to_int(s: str) -> Tuple[bool, int]:
"""Convert a string s to an int, if possible."""
try:
n = int(s)
return True, n
except Exception:
return False, 0 | 27d24b881f5987037f750a1cee022f7b1daa7c33 | 3,647,883 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.