content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def decode_hint(hint: int) -> str:
"""Decodes integer hint as a string.
The format is:
⬜ (GRAY) -> .
🟨 (YELLOW) -> ?
🟩 (GREEN) -> *
Args:
hint: An integer representing the hint.
Returns:
A string representing the hint.
"""
hint_str = []
for _ ... | 13,200 |
def validate_notebook(nb_path, timeout=60):
""" Executes the notebook via nbconvert and collects the output
Args:
nb_path (string): path to the notebook of interest
timeout (int): max allowed time (in seconds)
Returns:
(parsed nbformat.NotebookNode object, list of execution errors)... | 13,201 |
def is_symmetric(arr, i_sym=True, j_sym=True):
"""
Takes in an array of shape (n, m) and check if it is symmetric
Parameters
----------
arr : 1D or 2D array
i_sym : array
symmetric with respect to the 1st axis
j_sym : array
symmetric with respect to the 2nd axis
Returns... | 13,202 |
def linear_powspec(k, a):
"""linear power spectrum P(k) - linear_powspec(k in h/Mpc, scale factor)"""
return _cosmocalc.linear_powspec(k, a) | 13,203 |
def main(request, response):
"""
Simple handler that sets a response header based on which client hint
request headers were received.
"""
response.headers.append(b"Access-Control-Allow-Origin", b"*")
response.headers.append(b"Access-Control-Allow-Headers", b"*")
response.headers.append(b"Ac... | 13,204 |
def light_eff(Pmax, Iz, I0, Ik):
"""
Photosynthetic efficiency based on the light conditions. By definition, the
efficiency has a value between 0 and 1.
Parameters
----------
Pmax : numeric
Maximum photosynthetic rate [-].
Iz : numeric
Coral biomass-averaged light-intensity ... | 13,205 |
def radial_kernel_evaluate(rmax, kernel, pos, wts, log=null_log, sort_data=False,
many_ngb_approx=None):
"""
Perform evaluation of radial kernel over neighbours.
Note you must set-up the linear-interpolation kernel before calling this
function.
rmax - radius to ev... | 13,206 |
def get_referents(source, exclude=None):
"""
:return: dict storing lists of objects referring to source keyed by type.
"""
res = {}
for obj_cls, ref_cls in [
(models.Language, models.LanguageSource),
(models.ValueSet, models.ValueSetReference),
(models.Sentence, models.Senten... | 13,207 |
def test_smplify():
"""Test adaptive batch size."""
smplify_config = dict(mmcv.Config.fromfile('configs/smplify/smplify.py'))
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
smplify_config['body_model'] = dict(
type='SMPL',
gender='neutral'... | 13,208 |
def asarray(buffer=None, itemsize=None, shape=None, byteoffset=0,
bytestride=None, padc=" ", kind=CharArray):
"""massages a sequence into a chararray.
If buffer is *already* a chararray of the appropriate kind, it is
returned unaltered.
"""
if isinstance(buffer, kind) and buffer.__class... | 13,209 |
def _flake():
"""Test flake8"""
orig_dir = os.getcwd()
import_dir, dev = _get_import_dir()
os.chdir(op.join(import_dir, '..'))
if dev:
sys.argv[1:] = ['vispy', 'examples', 'make']
else:
sys.argv[1:] = [op.basename(import_dir)]
sys.argv.append('--ignore=E226,E241,E265,E266,W29... | 13,210 |
def _extract_symlink(zipinfo: zipfile.ZipInfo,
pathto: str,
zipfile: zipfile.ZipFile,
nofixlinks: bool=False) -> str:
"""
Extract: read the link path string, and make a new symlink.
'zipinfo' is the link file's ZipInfo object stored in zipfile.... | 13,211 |
def read_file(fname, ObsClass, verbose=False):
"""This method is used to read the file.
"""
if verbose:
print('reading menyanthes file {}'.format(fname))
if ObsClass == observation.GroundwaterObs:
_rename_dic = {'xcoord': 'x',
'ycoord': 'y',
... | 13,212 |
def cumulative_gain_curve(df: pd.DataFrame,
treatment: str,
outcome: str,
prediction: str,
min_rows: int = 30,
steps: int = 100,
effect_fn: EffectFnType = linear_ef... | 13,213 |
def last(*args):
"""Return last value from any object type - list,tuple,int,string"""
if len(args) == 1:
return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1]
return args[-1] | 13,214 |
def load_ann_kwargs():
"""emboss text"""
from matplotlib.patheffects import withStroke
myeffect = withStroke(foreground="w", linewidth=3)
ann_kwargs = dict(path_effects=[myeffect])
return ann_kwargs | 13,215 |
def color_conversion(img_name, color_type="bgr2rgb"):
"""
色空間の変換
Parameters
----------
img_name : numpy.ndarray
入力画像
color_type : str
変換のタイプ
bgr2rgb, bgr2hsv, bgr2gray, rgb2bgr,
rgb2hsv, rgb2gray, hsv2bgr, hsv2rgb
Return
-------
conver... | 13,216 |
def predict(self, celldata):
"""
This is the method that's to perform prediction based on a model
For now it just returns dummy data
:return:
"""
ai_model = load_model_parameter()
ret = predict_unseen_data(ai_model, celldata)
print("celldata: ", celldata)
print("Classification: ", re... | 13,217 |
def statements_to_str(statements: List[ASTNode], indent: int) -> str:
"""Takes a list of statements and returns a string with their C representation"""
stmt_str_list = list()
for stmt in statements:
stmt_str = stmt.to_str(indent + 1)
if not is_compound_statement(stmt) and not isinstance(stmt... | 13,218 |
def _make_filter(class_name: str, title: str):
"""https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-enumwindows"""
def enum_windows(handle: int, h_list: list):
if not (class_name or title):
h_list.append(handle)
if class_name and class_name not in win32gui.GetCla... | 13,219 |
def is_float(s):
"""
Detertmine if a string can be converted to a floating point number.
"""
try:
float(s)
except:
return False
return True | 13,220 |
def whitelist_sharing_job(h_producer, operator_config, conn, logger):
"""Whitelist distribution job method.
This method listens to a specific database notifications events which are generated when the
historic_whitelist table is update or inserted with records. It than transmit those changes to operators.
... | 13,221 |
def import_from_scale(
dataset, labels_dir_or_json, label_prefix=None, scale_id_field="scale_id",
):
"""Imports the Scale AI labels into the FiftyOne dataset.
This method supports importing annotations from the following Scale API
endpoints:
- `General Image Annotation <https://docs.scale.com/re... | 13,222 |
def build_lm_model(config):
"""
"""
if config["model"] == "transformer":
model = build_transformer_lm_model(config)
elif config["model"] == "rnn":
model = build_rnn_lm_model(config)
else:
raise ValueError("model not correct!")
return model | 13,223 |
def citizenship_fst(
test_file: str, fst_file: str, fuzzy_match: bool = True, verbose: bool = False
):
"""Evaluate citizenship finite state transducer and return report to stdout
Arguments:
test_file: test file path
fst_file: fst file path
fuzzy_match: accept/reject fuzzy match
... | 13,224 |
def __virtual__():
"""
Only load if boto3 libraries exist.
"""
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__["boto3.assign_funcs"](__name__, "cloudfront")
return has_boto_reqs | 13,225 |
def extract_binaries(pbitmap, psamples):
"""
Extract sample binaries from subdirectories according to dataset defined in bitmap.
"""
bins = glob.glob(psamples+'/**/*.bin', recursive=True)
bitmap = pd.read_csv(pbitmap) if '.tsv' not in pbitmap else pd.read_csv(pbitmap, sep='\t')
hashes = bitmap[... | 13,226 |
def test_get_start_offset(request, fixture, result):
"""
Test the function that returns the offset for the RLM parser
"""
text = request.getfixturevalue(fixture)
assert result == _get_start_offset(text.splitlines()) | 13,227 |
def opening_github():
""" This function opens the github. """
webbrowser.open('https://github.com/RIDERIUS/Image-Viewer') | 13,228 |
def search_range(nums, target):
"""
Find first and last position of target in given array by binary search
:param nums: given array
:type nums : list[int]
:param target: target number
:type target: int
:return: first and last position of target
:rtype: list[int]
"""
result = [-1... | 13,229 |
def process_mean_results(data, capacity, constellation, scenario, parameters):
"""
Process results.
"""
output = []
adoption_rate = scenario[1]
overbooking_factor = parameters[constellation.lower()]['overbooking_factor']
constellation_capacity = capacity[constellation]
max_capacity = ... | 13,230 |
def identify_ossim_kwl(ossim_kwl_file):
"""
parse geom file to identify if it is an ossim model
:param ossim_kwl_file : ossim keyword list file
:type ossim_kwl_file : str
:return ossim kwl info : ossimmodel or None if not an ossim kwl file
:rtype str
"""
try:
with open(ossim_kwl_... | 13,231 |
def conv_cond_concat(x, y):
""" Concatenate conditioning vector on feature map axis.
# Arguments
x: 4D-Tensor
y: 4D-Tensor
# Return
4D-Tensor
"""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shape... | 13,232 |
def test_apply_same_period(client):
"""attempt to apply to the same period with the previous application
1. test: error is returned
target_url: /lotteries/<id> [POST]
"""
idx = 1
token = login(client, test_user['secret_id'],
test_user['g-recaptcha-response'])['token']
... | 13,233 |
def get(args, syn):
"""TODO_Sphinx."""
entity = syn.get(args.id)
## TODO: Is this part even necessary?
## (Other than the print statements)
if 'files' in entity:
for file in entity['files']:
src = os.path.join(entity['cacheDir'], file)
dst = os.path.join('.'... | 13,234 |
def swath_pyresample_gdaltrans(file: str, var: str, subarea: dict, epsilon: float, src_tif: str, dst_tif: str):
"""Reprojects swath data using pyresample and translates the image to EE ready tif using gdal
Parameters
----------
file: str
file to be resampled and uploaded to GC -> EE
var: st... | 13,235 |
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False):
"""Run DRC check on a cell which is implemented in gds_name."""
global num_drc_runs
num_drc_runs += 1
write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name)
(out... | 13,236 |
def getKeyPairPrivateKey(keyPair):
"""Extracts the private key from a key pair.
@type keyPair: string
@param keyPair: public/private key pair
@rtype: base string
@return private key PEM text
"""
return crypto.dump_privatekey(crypto.FILETYPE_PEM, keyPair) | 13,237 |
def playbook_input(request, playbook_id, config_file=None, template=None):
"""Playbook input view."""
# Get playbook
playbook = Playbook.objects.get(pk=playbook_id)
# Get username
user = str(request.user)
# Check user permissions
if user not in playbook.permissions.users:
return playbooks(request)
... | 13,238 |
def _filter_gtf_df(GTF_df, col, selection, keep_columns, silent=False):
"""
Filter a GTF on a specific feature type (e.g., genes)
Parameters:
-----------
GTF_df
pandas DataFrame of a GTF
type: pd.DataFrame
col
colname on which df.loc will be performed
type: str... | 13,239 |
def validation_generator_for_dir(data_dir, model_dict):
"""Create a Keras generator suitable for validation
No data augmentation is performed.
:param data_dir: folder with subfolders for the classes and images therein
:param model_dict: dict as returned by `create_custom_model`
:returns: a generato... | 13,240 |
def convert_cbaois_to_kpsois(cbaois):
"""Convert coordinate-based augmentables to KeypointsOnImage instances.
Parameters
----------
cbaois : list of imgaug.augmentables.bbs.BoundingBoxesOnImage or list of imgaug.augmentables.bbs.PolygonsOnImage or list of imgaug.augmentables.bbs.LineStringsOnImage or i... | 13,241 |
def gen_stream_from_zip(zip_path, file_extension='wav', label_files=None, label_names=None, utt2spk=None,
corpus_name=None, is_speech_corpus=True, is_rir=False, get_duration=False):
""" Generate speech stream from zip file and utt2spk. The zip file contains wavfiles.
Parameters... | 13,242 |
def is_partinioned_beurocracy(dsc):
""" Partitioned beurocracy
Args:
dsc
Vars:
standardised_training_programmes (bool) :
relative_theoretical_cohesion (bool) :
focus_on_analytical_work (bool) :
training_programmes (list) :
Returns:
Theory:
stan... | 13,243 |
def get_query_dsl(
query_string, global_filters=None, facets_query_size=20, default_operator='and'):
"""
returns an elasticsearch query dsl for a query string
param: query_string : an expression of the form
type: person title:foo AND description:bar
where type corresponds to an elastic sea... | 13,244 |
def prune_visualization_dict(visualization_dict):
"""
Get rid of empty entries in visualization dict
:param visualization_dict:
:return:
"""
new_visualization_dict = {}
# when the form is left blank the entries of visualization_dict have
# COLUMN_NAME key that points to an empty list
... | 13,245 |
def get_file(level, lesson, file_type):
"""Wrap method to download file
"""
msg = 'level: %s; lesson: %s; file type: %s' % (level, lesson, file_type)
print 'Processing ' + msg
lesson_url = get_lesson_url(level, lesson)
if lesson_url is None:
print 'Lesson URL is None'
return
... | 13,246 |
def _LocationListToGoTo( request_data, positions ):
"""Convert a LSP list of locations to a ycmd GoTo response."""
try:
if len( positions ) > 1:
return [
responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, position ) )
for position in positi... | 13,247 |
def calculate_average_grades_and_deviation(course):
"""Determines the final average grade and deviation for a course."""
avg_generic_likert = []
avg_contribution_likert = []
dev_generic_likert = []
dev_contribution_likert = []
avg_generic_grade = []
avg_contribution_grade = []
dev_generi... | 13,248 |
def get_slurm_params(n,runtime=None,mem=None,n_jobs=None):
"""Get remaining parameters to submit SLURM jobs based on specified parameters and number of files to process.
Parameters
----------
n : int
Number of files to process.
runtime : str, None
Time per run, string formatted 'hou... | 13,249 |
def get_db_comment_text(file_name) -> DataFrame:
"""
db_comment 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_db_comment_tex... | 13,250 |
def test_migration_trans_sync_err(mock_trans):
"""
Tests the device returning an error when the migration state is written to.
"""
global ctx, sock
data = VFIO_DEVICE_STATE_V1_SAVING.to_bytes(c.sizeof(c.c_int), 'little')
write_region(ctx, sock, VFU_PCI_DEV_MIGR_REGION_IDX, offset=0,
... | 13,251 |
def source_open() -> bool:
"""Open a source MS Excel spreadsheet file.
Returns
-------
boolean
Flag about successful processing.
"""
try:
Source.wbook = openpyxl.load_workbook(cmdline.workbook)
except Exception:
logger.error(
'Cannot open the MS Excel wo... | 13,252 |
def cli():
"""
Just pong it
"""
click.echo('ping') | 13,253 |
def make_prompt(token: str, config: Path, model: str = ''):
"""Make a summary using the Studio21 API
Args:
token (str): Your api token to use.
config (Path): The path to the config file.
model (str, optional): Which model to use. If empty
then read the model from the config file... | 13,254 |
def get_license_match_error(lic, lic_file_path):
"""Returns an Error of the type 'warning' if the FreeRTOS license is present in the
input file. Otherwise an empty list is returned.
"""
# Get the words in the license template
with open('license.templ', 'r') as file:
template_lic = file... | 13,255 |
def test_craysdbproc_from_cache():
"""
Initialize CraySdb from decompressed cache
"""
# Create an uncompressed cache file
tokiotest.TEMP_FILE.close()
tokiotest.gunzip(tokiotest.SAMPLE_XTDB2PROC_FILE, tokiotest.TEMP_FILE.name)
print("Decompressed %s to %s" % (tokiotest.SAMPLE_XTDB2PROC_FILE, ... | 13,256 |
def start_workers(size, delete=False, migrate=False):
"""Starts FluxxWorkers.
:returns: Pair of queues.
"""
streams = (queue.Queue(), queue.Queue(maxsize=size))
for _ in range(THREAD_COUNT):
worker = FluxxWorker(streams, delete, migrate)
worker.daemon = True
worker.start()
... | 13,257 |
def harmonizationApply(data, covars, model):
"""
Applies harmonization model with neuroCombat functions to new data.
Arguments
---------
data : a numpy array
data to harmonize with ComBat, dimensions are N_samples x N_features
covars : a pandas DataFrame
contains covar... | 13,258 |
def create_back_links(env, option):
"""
Create back-links in all found needs.
But do this only once, as all needs are already collected and this sorting is for all
needs and not only for the ones of the current document.
:param env: sphinx enviroment
:return: None
"""
option_back = f"{o... | 13,259 |
def plot_single_hand_2d(keypoints, ax, occlusion=None, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
for connection, color in hand_bones:
coord1 = keypoints[connection[0], :]
coord2 = keypoints[connection[1], :]
coords = np.stack([coord1, c... | 13,260 |
def from_rkm(code):
"""Convert an RKM code string to a string with a decimal point.
Parameters
----------
code : str
RKM code string.
Returns
-------
str
String with a decimal point and an R value.
Examples
--------
>>> from pyaedt.circuit import from_rkm
>... | 13,261 |
def update_cache(makefile_dirs: List[str]) -> None:
"""Given a list of directories containing Makefiles, update caches."""
import multiprocessing
cpus = multiprocessing.cpu_count()
fnames1: List[str] = []
fnames2: List[str] = []
for path in makefile_dirs:
cdp = f'cd {path} && ' if path ... | 13,262 |
def register_refinement(name, refinementof, cython_cimport=None, cython_cyimport=None,
cython_pyimport=None, cython_c2py=None, cython_py2c=None):
"""This function will add a refinement to the type system so that it may be used
normally with the rest of the type system.
"""
refi... | 13,263 |
def print_err(msg, error=None, fatal=False):
"""Affiche un message d'erreur
Le pendant de print_ok. On met un failed en rouge. Si une erreur
est passé en paramètre, on affiche son message. Si fatal=True, on
fait un sys.exit(1) à la fin.
"""
print("[" + colors['FAIL'] + "FAILED" + colors['ENDC']... | 13,264 |
def get_arguments(method, rpc_version):
"""
Get arguments for method in specified Transmission RPC version.
"""
if method in ('torrent-add', 'torrent-get', 'torrent-set'):
args = constants.TORRENT_ARGS[method[-3:]]
elif method in ('session-get', 'session-set'):
args = constants.SESSI... | 13,265 |
def compose_local_noises(*functions: NoiseModel) -> NoiseModel:
"""Helper to compose multiple NoiseModel.
Args:
*functions: a list of functions
Returns:
The mathematical composition of *functions. The last element is applied
first. If *functions is [f, g, h], it returns f∘g∘h.
... | 13,266 |
def test_request_password_reset_unverified_email(live_server, mailoutbox):
"""
If the user provides an email address that does not exist in the
system, no action should be taken.
"""
user = get_user_model().objects.create_user(username="Test User")
email = models.EmailAddress.objects.create(
... | 13,267 |
def sls_build(
repository, tag="latest", base="opensuse/python", mods=None, dryrun=False, **kwargs
):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``i... | 13,268 |
def RightDragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse right button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 ... | 13,269 |
def parse_date(date):
"""
Parses a date string and returns number of seconds from the EPOCH.
"""
# yyyy-mm-dd [hh:mm:ss[.s][ [+-]hh[:][mm]]]
p = re.compile( r'''(?P<year>\d{1,4}) # yyyy
- #
(?P<month>\d{1,2}) ... | 13,270 |
def promote_parameter_to_node(scriptargs: dict): # pylint: disable=too-many-locals
"""Promote a parameter to a target node.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
# Get the parms to act on.
parms = scriptargs["parms"]
# The start node for the node chooser prompt... | 13,271 |
def check_free_memory(free_mb):
"""
Check *free_mb* of memory is available, otherwise do pytest.skip
"""
import pytest
try:
mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format(
free_mb, ... | 13,272 |
def CorrectOrWrong(Input,word):
"""Check if Input is inside word"""
if Input in word:
return True
else:
return False | 13,273 |
def get_fair_metrics(dataset, pred, pred_is_dataset=False):
"""
Measure fairness metrics.
Parameters:
dataset (pandas dataframe): Dataset
pred (array): Model predictions
pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'.
Retu... | 13,274 |
def _queue_into_buffer(transfersession):
"""
Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
ALGORITHM: We do Filter Specific Instance Counter arithmetic to get our newest data compared to the server's older data.
We use raw sql queries to place da... | 13,275 |
def union_poi_bus_station(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bus station categories
for Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
... | 13,276 |
def test_cli_note_import_from_stdin(mocker, mock_nncli):
"""test cli_note_import"""
mocker.patch('sys.stdin',
new=StringIO('{"content": "test"}'))
nn_obj = nncli.nncli.Nncli(False)
mocker.patch.object(nn_obj.ndb, 'import_note')
mocker.patch.object(nn_obj.ndb, 'sync_now')
nn_obj.... | 13,277 |
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
"""
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(... | 13,278 |
def load_sentiments(file_name=DATA_PATH + "sentiments.csv"):
"""Read the sentiment file and return a dictionary containing the sentiment
score of each word, a value from -1 to +1.
"""
sentiments = {}
for line in open(file_name):
word, score = line.split(',')
sentiments[word] = float(... | 13,279 |
def fun2():
"""
Use the dictionary for input instead of the list to improve the performance of updating values
by lookup
:return:
"""
try:
while True:
array_range=int(input().strip())
input_list=list(map(int,input().strip().split()))
print(input_list)... | 13,280 |
def run_example(
device_id: str,
server_host: str = "localhost",
server_port: int = 8004,
plot: bool = True,
scope_length: int = 8192,
historylength: int = 1,
):
"""run the example."""
apilevel_example = 6 # The API level supported by this example.
# Call a zhinst utility function ... | 13,281 |
def perform_cegs_gwas(kinship_type='ibd', phen_type='medians'):
"""
Perform a simple MLM GWAS for the 8 traits
"""
import hdf5_data
import kinship
import linear_models as lm
import time
import scipy as sp
from matplotlib import pyplot as plt
import analyze_gwas_results as agr
... | 13,282 |
def get_draft_url(url):
"""
Return the given URL with a draft mode HMAC in its querystring.
"""
if verify_draft_url(url):
# Nothing to do. Already a valid draft URL.
return url
# Parse querystring and add draft mode HMAC.
url = urlparse.urlparse(url)
salt = get_random_string(... | 13,283 |
def make_datum(source: str, img_id: str, sent_id: int, sent: str):
"""
Create a datum from the provided infos.
:param source: the dataset of the particular sentence.
:param img_id: id of the image
:param sent_id: id of the sentence (of the image)
:param sent: the sentence
:return: a dict of ... | 13,284 |
def list_for_consumer(req):
"""List allocations associated with a consumer."""
context = req.environ['placement.context']
context.can(policies.ALLOC_LIST)
consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid')
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# NOTE(cdent):... | 13,285 |
def get_supermean(name, season, data_dir, obs_flag=None):
"""Calculated supermeans from retrieved data, which are pickled Iris cubes.
:param name: Cube name. Should be CF-standard name. If no CF-standard name
exists the STASH code in msi format (for example m01s30i403)
is used... | 13,286 |
def get_zcl_attribute_size(code):
"""
Determine the number of bytes a given ZCL attribute takes up.
Args:
code (int): The attribute size code included in the packet.
Returns:
int: size of the attribute data in bytes, or -1 for error/no size.
"""
opts = (0x00, 0,
0x... | 13,287 |
def get_mappings():
"""We process the mappings for two separate cases. (1) Variables that vary by year,
and (2) variables where there are multiple realizations each year.
"""
# Set up grid for survey years. Note that from 1996 we can only expect information every other
# year. We start with 1978 as ... | 13,288 |
def _get_function_name_and_args(str_to_split):
"""
Split a string of into a meta-function name and list of arguments.
@param IN str_to_split String to split
@return Function name and list of arguments, as a pair
"""
parts = [s.strip() for s in str_to_split.split(" | ")]
if len(parts) < 2:
... | 13,289 |
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
co... | 13,290 |
def replicas_on_delete():
"""
This is a route for ALL NODES.
A (previous) neighbor node sends POST requests to this route,
so that a key-value pair replica is deleted in the current NODE.
"""
# The hash ID of the node-owner of the primary replica
start_id = request.form['id']
key = re... | 13,291 |
def load(train_dir=train_dir, test_dir=test_dir):
"""
Load the dataset into memory.
This uses a cache-file which is reloaded if it already exists,
otherwise the dataset is created and saved to
the cache-file. The reason for using a cache-file is that it
ensure the files are ordered consistently... | 13,292 |
def pdf2(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): with the shape (2, 2)
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
kernel (ndarrray): un-norm... | 13,293 |
def spell_sql(*args,**kwargs):
"""
list=[]
"""
if len(args[0])<=0:
return None
sql="SELECT * from `emotion_data` WHERE id ={}".format(args[0][0])
for index in args[0][1:]:
sql +=" or id ={}".format(index)
return sql | 13,294 |
def is_within_bounds(bounds, point):
""" Returns true if point is within bounds. point is a d-array and bounds is a
dx2 array. bounds is expected to be an np.array object.
"""
point = np.array(point)
if point.shape != (bounds.shape[0],):
return False
above_lb = np.all((point - bounds[:, 0] >= 0))
... | 13,295 |
def get_xyz(filename, information):
""" Returns xyz-files from data file"""
#Take out variables that will be constant throughout loops
natoms=information.num_of_atoms
charge=information.atom_charge
name=information.type
atom_counter = 1
time_step_counter = 0
T = time.time()
#Loop... | 13,296 |
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Ava... | 13,297 |
def list_of_paths():
"""
It lists all the folders which not contain PET images
"""
return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE',
'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confi... | 13,298 |
def convert_onnx_to_ell(path, step_interval_msec=None, lag_threshold_msec=None):
"""
convert the importer model into a ELL model, optionally a steppable model if step_interval_msec
and lag_threshold_msec are provided.
"""
_logger = logger.get()
_logger.info("Pre-processing... ")
converter = ... | 13,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.