content stringlengths 39 9.28k | sha1 stringlengths 40 40 | id int64 8 710k |
|---|---|---|
import warnings
import numbers
def create_plist_from_par_opt_to_par_sim(mapping_par_opt_to_par_sim):
"""
Create list of parameter indices for which sensitivity is to be computed.
From the parameter mapping `mapping_par_opt_to_par_sim`, create the
simulation plist according to the mapping `mapping`.
... | 1df8d0a9814a99b1ba083fbe40ad71a4d0cb8403 | 493,088 |
import torch
def get_devices_spec(devices=None):
"""
Get a valid specification for one or more devices. If `devices` is None get devices for all CUDA devices available.
If `devices` is and zero-length structure a single CPU compute device is returned. In any other cases `devices` is
returned unchanged... | 2afd6df60a5e2f5eeaa2997b5f9efabfa4437bd2 | 304,951 |
def awgGate(gate, station):
""" Return True if the specified gate can be controlled by the AWG """
awg = getattr(station, 'awg', None)
if awg is None:
return False
return awg.awg_gate(gate) | 58ce5aee33e3630db3bc6f0fa0c277f2e9c231cc | 368,125 |
def harmonic_mean(x1, x2):
"""
Calculate the harmonic mean between two values.
"""
return (2*x1*x2)/(x1+x2) | c88436a0fe7962c6de17e096f6e0a0a59aeb98db | 402,745 |
def show_tnseq_upload_btn(network_type):
"""Show TnSeq upload button when combined networks are selected."""
return {'display': 'block'} if network_type == 'combined' else {'display': 'none'} | a875b176c7b56b09d5c38c3fd4fc2afbd350e220 | 471,676 |
from typing import Mapping
from typing import Any
from typing import Optional
def check_override(params: Mapping[str, Any], key: str,
override: Optional[Any]) -> Any:
"""Return desired value, with optional override."""
if override is None:
return params[key]
saved = params.get(k... | ea9776d7d55594ab6ce22168fa4ac2879a0de7f8 | 297,266 |
import json
import hashlib
def hexdigest(jsonable):
"""
Calculate hex digest of a `jsonable` object.
>>> hexdigest({'a': 1, 'b': 2, 'c': 3})
'e20096b15530bd66a35a7332619f6666e2322070'
"""
string = json.dumps(jsonable, sort_keys=True).encode()
return hashlib.sha1(string).hexdigest() | b523381c8e2d4d66c2d4c608c2e212f139d6c43a | 533,895 |
def list_to_array_syntax( l ):
"""Convert a python list to PGSQL array syntax for insertion"""
l_text = str(l)
l_text = "'{" + l_text[1:-1] + "}'"
return l_text | 66a90a0fe132c858c84c37331ae56f342eca5507 | 332,106 |
def json_utf8_encode(obj: object) -> object:
"""Binary encode all strings in an object.
:arg obj: Object.
:returns: Object with binary encoded strings.
"""
if isinstance(obj, str):
return obj.encode('utf-8')
if isinstance(obj, list) or isinstance(obj, tuple):
return [json_utf8_... | 6d11790e2fb18eb0265452bff21ca6d082eae86a | 229,601 |
def get_filtered_attributes(cube, attribute_filter=None):
"""
Build dictionary of attributes that match the attribute_filter. If the
attribute_filter is None, return all attributes.
Args:
cube (iris.cube.Cube):
A cube from which attributes partially matching the
attribut... | c9dc018244856c2148beae0de96f4fe2853017f9 | 496,774 |
def armijo(fun, xk, xkp1, p, p_gradf, fun_xk, eta=0.5, nu=0.9):
""" Determine step size using backtracking
f(xk + alpha*p) <= f(xk) + alpha*nu*<p,Df>
Args:
fun : objective function `f`
xk : starting position
xkp1 : where new position `xk + alpha*p` is stored
p : search ... | 0b44b06fe6db1f778dbc22995a2800ebbf6f051a | 678,650 |
def set_range_offset(ds, h_deploy):
"""
Adds an instrument's height above seafloor (for an up-facing instrument)
or depth below water surface (for a down-facing instrument) to the range
of depth bins
Parameters
----------
ds : xarray.Dataset
The adcp dataset to ajust 'range' on
h... | 13257dc4b5fce72667b58d60f39566e33427167f | 402,462 |
def str2list(_str):
"""Convert string type to list of one string
:arg str _str: a string
"""
if isinstance(_str, str):
return [_str]
elif isinstance(_str, list):
return _str
else:
raise ValueError('"_str" input is not a str or list') | c9f184957167b32d412cc7b592c093d37faa3d6a | 84,664 |
def load_sentences(filename):
"""give us a list of sentences where each sentence is a list of tokens.
Assumes the input file is one sentence per line, pre-tokenized."""
out = []
with open(filename) as infile:
for line in infile:
line = line.strip()
tokens = line.split()
out.a... | 6a4c458f9a0d9b17eaa38c38570dacc4c40e86c0 | 706,735 |
import string
def get_first_n_alphabet(n):
"""Return the first n lowercase letters of the alphabet as a list."""
return string.ascii_lowercase[:n] | 6b011a6a0f121565ed32bfe5e28a31e4bbb82d07 | 242,848 |
from pathlib import Path
import pickle
def read_pickle_dict(filename, folder='C:/Users/Avram/Dropbox (MIT)/MIT'
'/Research/Thesis/Sections/mod_studies/'):
"""Read ptrac data dictionary from pickle file."""
if folder == '':
folder = Path.cwd() # set folder ='' to use current dir... | dcf38d34d596a19d9b69a32e48d0f8f1ca10eda4 | 615,005 |
def empty_data(data):
"""Check to see if data is an empty list or None."""
if data is None:
return True
elif type(data) == list:
if len(data) == 0:
return True
return False | 3dddfe9e561dafb7bdf73b35605097f4d7691c3c | 119,272 |
def deprocess_image(img):#, rescale=False):
"""Undo preprocessing on an image and convert back to uint8."""
#img = (img * SQUEEZENET_STD + SQUEEZENET_MEAN)
img = (img + 1) * 0.5
# if rescale:
# vmin, vmax = img.min(), img.max()
# img = (img - vmin) / (vmax - vmin)
return img | 9048dbd638da17426e412628a804699a3d6f4026 | 205,505 |
def generate_script(group, entry_point, header, template):
"""Generate the script based on the template.
:param str group:
The entry-point group name, e.g., "console_scripts".
:param str header:
The first line of the script, e.g., "!#/usr/bin/env python".
:param str template:
Th... | 847871bc7344dcfda994a9e985e9a541c96fff81 | 45,123 |
def matchAllowedVOs(vo, resource_ad):
"""True if `vo` is in the AllowedVOs list in `resource_ad`, or
if AllowedVOs is undefined or empty
"""
allowed_vos = resource_ad.get('AllowedVOs', None)
if not allowed_vos:
return True
else:
return vo in list(allowed_vos) | acc90deedcb4f0573975088c39c2d98e7fccc30c | 339,029 |
def xstr(s):
"""Creates a string object, but for null objects returns
an empty string
Args as data:
s: input object
Returns:
object converted to a string
"""
if s is None:
return ""
else:
return str(s) | b7ea8e906598d259244cc8a7cbb9cb1a142ba3d8 | 26,829 |
def is_valid_host(host):
""" Check if host is valid.
Performs two simple checks:
- Has host and port separated by ':'.
- Port is a positive digit.
:param host: Host in <address>:<port> format.
:returns: Valid or not.
"""
parts = host.split(':')
return len(parts) == 2 or par... | e7d22558e8a41b3b3345e863e11cdeec1a37d984 | 77,561 |
import ipaddress
def list_all_available_cidr(jnj_root_cidr_list, allocated_cidr_list, subnet_prefix):
"""
Find all CIDRs of specified size from the provided top level CIDR list in the region
Args:
jnj_root_cidr_list: top-level CIDRs allocated to region
allocated_cidr_list: CIDRs currently... | caf84de05b7c8b6a7246062e2f34ce57329cf6b7 | 41,374 |
import copy
def modify_namespace(namespace, args):
"""Modify the specified arguments in the passed Namespace.
namespace argparse.Namespace object
args dict of argument: value pairs
For most command-line tests, we define a base argparse.Namespace object, then
change a few argumen... | 65aee8eb630ee0b75b81b18234ea04b6ca888491 | 666,773 |
def list_wrap_remove(var):
"""
Helper function for removing list wrapping for a single item that might be wrapped into a list
"""
if type(var) == list and len(var) > 1:
# If an actual list, return the list
return var
elif type(var) == list and len(var) == 1:
# If a list of o... | f13ec71bc9275d4ac5b5747f0daa97af24b40361 | 420,641 |
def sizeof_fmt(num, suffix='B', longsuffix=True, usespace=True, base=1024):
""" Returns a string representation of the size ``num``.
- Examples:
>>> sizeof_fmt(1020)
'1020 B'
>>> sizeof_fmt(1024)
'1 KiB'
>>> sizeof_fmt(12011993)
'11.5 MiB'
>>> sizeof_fmt(123456789)
'117.7 MiB'
... | 192809ce48ccdbeeb657ad0be60414664fc60544 | 594,338 |
def load_cluster_labels(clusterpath, one_indexed=True):
"""
one_indexed: if true, assume cluster index starts at 1 (e.g. as in 2018_scMCA data)
"""
cluster_labels = {}
if one_indexed:
dec = 1
else:
dec = 0
# expected format of csv file is "cluster number, name"
with open(... | fed3adb9dc9c5653aaf05548a8c7e6d5739f6e82 | 633,878 |
def match_lab_lightness(outputs, targets):
"""
Replace the L layer of LAB image in outputs
by the ones in targets
"""
outputs = outputs.permute(0, 2, 3, 1)
targets = targets.permute(0, 2, 3, 1)
outputs[..., 0] = targets[..., 0]
return outputs.permute(0, 3, 1, 2) | 1747123e30518784d3c554960c050e62a88e2200 | 286,531 |
def smiles(sid):
""" SMILES string from a species ID
"""
smi = str.split(sid, '_')[0]
return smi | 8c47d163d9110027f9fa47903ab1a1abbaa54fe6 | 83,288 |
def _min_index(b, h):
"""
Returns: The index of the minimum value in b[h..]
Parameter b: The sequence to search
Precondition: b is a mutable sequence (e.g. a list).
"""
# We typically do not enforce preconditions on hidden helpers
# Start from position h
i = h
index = h;
# ind... | 8d1058177967f9e17405802912b2f080757ac1d1 | 524,211 |
def dot_esc(str):
"""Escape str for use in DOT"""
return str.replace('"', '\\"') | ad82f29d9f02523b344f8ed81e73e372913bba0d | 605,047 |
def read_answer(equation):
""" Read user answer """
print(equation)
print()
reply = input("Answer? ")
return reply.strip() or '0' | e52521a8e19fcb3a1928e913d6ba6a915f41c567 | 427,507 |
def cast_bool(value):
"""Cast boolean value in database format to bool."""
if value:
return value[0] in ('t', 'T') | e3a93a175f6cc3b9e8bc00305636016c6e670056 | 413,728 |
def odder(num: int) -> int:
"""Forces a number to be odd"""
if num % 2 == 0:
num += 1
return int(num) | 90261e03ae5784c9cc555963732f8b43203ddecd | 483,327 |
import socket
def getFQDN(default=None):
"""Return the FQDN."""
fqdn = socket.gethostname()
if not '.' in fqdn:
return default
return fqdn | 33157712a5e4ec7bf198005389d2718d7895463c | 641,568 |
def get_filename(pathname, split_char='/'):
"""
:param pathname: path name
:param split_char: Path interval character
:return: file name
"""
pathname = pathname.split(split_char)
return pathname[-1] | 11bead040c84c105ce0c0c4dc354ef04400e1b57 | 644,456 |
def center_scale(arr):
""" Center and scale values """
xmax, xmin = arr.max(), arr.min()
arr = ((arr - xmin) / (xmax - xmin)) -.5
return arr | 28e9a0e0eb6314ef0f293e7440395493d3e8f299 | 560,728 |
import random
def successfulStarts(successProb, numTrials):
"""Assumes successProb is a float representing probability of a
single attempt being successful. numTrials a positive int
Returns a list of the number of attempts needed before a
success for each trial."""
triesBeforeSuccess ... | 16fd09295f2fb617f2be9061ff77117856dc11cf | 547,408 |
def rowwidth(view, row):
"""Returns the number of characters of ``row`` in ``view``.
"""
return view.rowcol(view.line(view.text_point(row, 0)).end())[1] | f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f | 705,633 |
def parse_float(arg):
"""Parses an argument to float number.
Support converting string `none` and `null` to `None`.
"""
if arg is None:
return None
if isinstance(arg, str) and arg.lower() in ['none', 'null']:
return None
return float(arg) | abc7a98509bb1e1aa6d8e42151b98ed020fd483a | 385,847 |
def get_port_detail(ports):
"""
Iterate over ports details from response and retrieve details of ports.
:param ports: list of ports details from response
:return: list of detailed element of ports
:rtype: list
"""
return [{
'ID': port.get('id', ''),
'Number': port.get('numbe... | 003ecf7d3453659d7e43d07f0ad416d0f9da84ca | 478,499 |
def nvmf_create_subsystem(client,
nqn,
serial_number,
tgt_name=None,
model_number='SPDK bdev Controller',
allow_any_host=False,
max_namespaces=0,
... | 909bff67a43ca106d756225e328666d93533d495 | 298,344 |
def polygonal(i, n):
"""
Compute i-polygonal number n
For example, if i=3, the nth triangle number will be returned
"""
return n * ((i - 2) * n + 4 - i) // 2 | d228b268cdeb422e94c2becd3f958d5f0bcd3f0a | 274,490 |
def _resolve_text(unicode_bytes):
"""Resolves Facebook text data, which is often dumped as Unicode bytes, to actual text."""
return unicode_bytes.encode("charmap").decode("utf8") | d572c71382322fa924b603ed8a9c262a8c87025a | 256,143 |
from functools import reduce
def map_reduce_attr(attr, elems, op, init):
""" Execute map-reduce on instance attributes (non-callable).
"""
return reduce(op, map(lambda elem: getattr(elem, attr), elems), init) | d295a6fd5ae039f6309353930ef7cb052a3806e4 | 328,088 |
from datetime import datetime
def display_day(uuid):
"""Display a readable form of the day for a given UUID entity."""
date_str = "{0}{1}".format(uuid.PartitionKey, uuid.RowKey)
date_obj = datetime.strptime(date_str, "%Y%m%d")
return date_obj.strftime("%A, %d") | 0d990aa858f9f4554139b49f64ba1797adfa9553 | 334,358 |
def adjust_widget(widget,window_title=None,size=None,**kwargs):
"""Apply some adjustments to a widget. Unused kwargs are returned."""
if window_title is not None:
widget.setWindowTitle(window_title)
if size is not None:
widget.resize(*size)
return kwargs | 636cb487440747bff2e709a7b36a4149cc95bf7c | 640,785 |
def round_list(x, n = 2):
"""Auxiliary function to round elements of a list to n decimal places.
Parameters
----------
x : list
List of float values.
n : int
Number of decmial places to round the elements of list.
Returns
-------
list
List with elements rounded ... | d4a6b1d283d970618591346caedde559069c9877 | 500,223 |
def swa_lr_decay(step: int, cycle_len: int, start_lr: float, end_lr: float) -> float:
""" Linearly decrease the learning rate over the cycle. """
return start_lr + ((end_lr - start_lr) / cycle_len) * (step % cycle_len) | 928757aae18145371b3c80cc74d749f95c8b6363 | 255,112 |
def parse_so_terms(so_file):
"""Retrieve all available Sequence Ontology terms from the file.
"""
so_terms = []
with open(so_file) as in_handle:
for line in in_handle:
if line.find('name:') == 0:
name = line[5:].strip()
so_terms.append(name)
return... | c61acbd574701244a4ad9cdca5095e4b12514bda | 673,943 |
def get_greatest_depth(germanet, category) -> int:
"""
Iterate trough the synsets of a given word category. For each synset check the depth and return the greatest depth
that has been seen.
:type category: WordCategory
:type germanet: Germanet
:param germanet: the germanet graph
:param categ... | bfc354fe8e0f49a2100c0cb4ccf2ebb080616991 | 461,823 |
import re
def inc_guard(header_name):
"""convert a header file name into an include guard"""
return "_{0}_".format(re.sub(r'[./\\]','_', header_name.upper())) | 2329bf00e069b673f2ea69b8e77a80a1ea694c7d | 151,230 |
import json
import random
import time
def pick_random_quote(path='responses/class_quotes.json'):
"""
Get a random quote from a jason file.
:param path: a string that indicates the location where the quotes are stored.
:return: a random quote
"""
with open(path) as f:
responses = j... | 6d41f35a8316f09d30849d3d1c790e5be2065f68 | 113,145 |
async def get_story_data(session, story_id, story_rank):
"""
Gets the given story data - title and url
"""
url = 'https://hacker-news.firebaseio.com/v0/item/{}.json'.format(story_id)
async with session.get(url) as response:
result_data = await response.json()
story_url = ""
... | cbac4d05915a82ab11854b9365acddb9c42944bd | 25,816 |
def _GetAndroidVersionFromMetadata(metadata):
"""Return the Android version from metadata; None if is does not exist.
In Android PFQ, Android version is set to metadata in master
(MasterSlaveLKGMSyncStage).
"""
version_dict = metadata.GetDict().get('version', {})
return version_dict.get('android') | e1d1ed9d0bbf2f65d646c11007739f6b5a9b78ec | 700,514 |
import json
def format_lld(data_set):
"""Format JSON output for Zabbix LLD
:param data_set: set or list of dictionaries
:return: formatted Zabbix LLD data
"""
return json.dumps({'data': data_set}) | fdcb577fd75336ff439e45313448a1cb3ff03fc0 | 281,725 |
def asn1_to_der(asn1):
"""Convert from asn1crypto x509 to DER bytes.
Args:
asn1 (:obj:`x509.Certificate`):
asn1crypto x509 to convert to DER bytes
Returns:
(:obj:`bytes`)
"""
return asn1.dump() | b22f68537f32e8072bd0a1b5e25a494a5071e699 | 511,125 |
def _local(tag):
"""Extract the local tag from a namespaced tag name (PRIVATE)."""
if tag[0] == '{':
return tag[tag.index('}') + 1:]
return tag | 00895cb03f968a565de3224caad2f05d72557cdd | 86,056 |
def unique_tuples(df, columns):
"""
Return the set of unique tuples from a dataframe for the specified
columns.
Parameters
----------
df : pandas.DataFrame
Dataframe with the columns of data to be considered.
columns : list-like
A list of column names in the dataframe from wh... | 9e6c31e957a1a7a5bf58fd8362ad8ae8106ac253 | 108,445 |
def NestedMultiplication(x, xValues, coeff):
"""Evaluates Newton Polynomial at x in nested form
given the interpolating points and its coefficents"""
n = coeff.size
y = coeff[n-1]
for i in reversed(range(n - 1)):
y = coeff[i] + (x - xValues[i]) * y
return y | 143729cfab7bfd61e76b6195d505e34e81366ad9 | 652,758 |
import torch
def normalize_pointcloud_transform(x):
"""
Compute an affine transformation that normalizes the point cloud x to lie in [-0.5, 0.5]^2
:param x: A point cloud represented as a tensor of shape [N, 3]
:return: An affine transformation represented as a tuple (t, s) where t is a translation an... | 06c2178a92d8e5a1b585249e9aa1e1bd4c4e3d5d | 652,736 |
def readWord (file):
""" Read a 16 bit word from file. """
return ord (file.read (1)) << 8 | ord (file.read (1)) | d05ca2ad97b2a146f0cf39641eb597024d85dd6a | 594,621 |
from typing import List
import heapq
def find_nth_highest_index(input: List[int], n: int = 2) -> int:
"""
Finds the nth highest item in a list and returns its index.
Note that n is 1-indexed here, e.g. n=1 would return the 1st largest element.
"""
heap = [] # Create a min-heap of size n.
for ... | c470694aadffef32b514e7c18469da8b6e2b2a69 | 409,258 |
def count_true_positive(truth, recommend):
"""Count number of true positives from given sets of samples.
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
int: Number of true positives.
"""
tp = 0
... | 6630d8a27ff401aa7d15ec81a23c3fc047c26a9e | 274,235 |
import re
def _hours_to_ints(col_name):
"""A macro to rename hourly demand columns."""
if re.match(r"^hour\d\d$", col_name):
col_name = int(col_name[4:])
return col_name | 1599617416d3e0408ce90c344470d7c5cf9e843a | 400,351 |
def get_span_row_count(span):
"""
Gets the number of rows included in a span
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
rows : int
The number of rows included in the span
Example
-------
C... | e226e0f78bd6711a7ddbe9c749ed43d1d2bc476c | 14,083 |
def _CopyFieldToProtocolBuffer(field, pb):
"""Copies field's contents to a document_pb.Field protocol buffer."""
pb.set_name(field.name.encode('utf-8'))
field_value_pb = pb.mutable_value()
if field.language:
field_value_pb.set_language(field.language.encode('utf-8'))
if field.value is not None:
field.... | 61406ff3d3e85d2f528888b5f7384bc4056e0da1 | 597,688 |
import mimetypes
def get_content_type(filename):
"""
Use the python mimetypes to determine a mime type, or return application/octet-stream
"""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream' | ba52c9c0aa9e421e74dbfb0c19da21fff3bcad0b | 106,542 |
def bbox2ogr_clipdst(bbox):
"""
convert "29.9997,59.7816,30.6396,60.1117" to "29.9997 59.7816 30.6396 60.1117"
"""
clipdst = '{x1} {y1} {x2} {y2}'
clipdst = clipdst.format(
x1 = bbox.split(',')[0],
y1 = bbox.split(',')[1],
x2 = bbox.split(',')[2],
y2 = bbox.split(',')[3],
)
r... | f41af0056a990258c03a5007f09ac1dd99451596 | 272,799 |
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another... | 73a99772443220314c7ce803fdbd815910e706d0 | 694,398 |
def dict_nested_get(dictionary_or_value, keys, default=None):
"""
Performs a dictionary.get(key, default) using the supplied list of keys assuming that each successive key is nested.
For example, for a dictionary dictionary = { "key1": { "key2": 1 } }, use nested_get(dictionary, ["key1", "key2"]) to get th... | 0faec6cb8111d25267ca757f11abc6f68621aec0 | 374,366 |
import json
def get_http_error_response_json(error_title, error_key, error_value):
"""Returns a JSON object indicating an Http Error"""
http_error_resp = {}
http_error_resp["errors"] = []
http_error_resp["errors"].append(
{"error": error_title, "error_values": [{error_key: error_value}]}
)... | 7e0b35e69c2961f21fe690179e7432e8f060982f | 346,857 |
import math
def log2(x):
"""Calculate the base-2 logatihm of x: log2(x)"""
return math.log2(x) | ab3cc630622e9d5edc426952ba02ed69247db6f0 | 153,803 |
def remove_prefix(string, prefix):
""" This function removes the prefix string of a string. If
the prefix does not exist, the string is returned unchanged.
See https://stackoverflow.com/a/16891418
Parameters
----------
string : string
The string that the prefix of which will be taken o... | de742a6df7926f4e407edad577de070cf18d1d6e | 380,019 |
def eeg_name_frequencies(freqs):
"""
Name frequencies according to standart classifications.
Parameters
----------
freqs : list or numpy.array
list of floats containing frequencies to classify.
Returns
----------
freqs_names : list
Named frequencies
Example
---... | f5f055c7855ad3cf1d2bcf2bb8cde1efb6888ad3 | 137,868 |
from typing import Dict
from typing import Tuple
def get_pair_stats(vocab: Dict[str, int]) -> Dict[Tuple[str, str], int]:
"""Get counts of pairs of consecutive symbols."""
pairs = {}
for word, frequency in vocab.items():
symbols = word.split()
# count occurrences of pairs
for i i... | 87300f91867f66f3c686ab22f064f92bd6a129d3 | 356,238 |
from typing import Counter
def count_steps(input_str):
"""Count steps up/down ( '(' / ')' )"""
counter = Counter(input_str)
return counter['('] - counter[')'] | 829f3f245cbf7ca98038d5978ae624537f285fac | 138,206 |
def removeMacColons(macAddress):
""" Removes colon character from Mac Address """
return macAddress.replace(':', '') | babe078c4a2b91e7ee56be15f62e58baa4189b8d | 646,320 |
def _GetCulpritInfo(analysis):
"""Returns a dict with information about the culprit git_hash.
Args:
analysis (MasterFlakeAnalysis): The master flake analysis the suspected
flake build is associated with.
Returns:
A dict in the format:
{
'commit_position': int,
'git_hash':... | feb0d50465f8a3a32153fd5ccadcfb6268116719 | 519,113 |
def get_user_name(handle):
"""Returns the user name of the user executing the command, given 'handle'."""
return handle['user'] | 273c1b20db105daf17a4fb9ec37ed97bde67fe93 | 526,369 |
import ntpath
def fileparts(n):
"""
p,n,e = fileparts(filename)
fileparts(r'c:\blah\BLAH.jpg') returns ('c:\blah','BLAH','.jpg')
Note that the '.' lives with the extension, and separators have been removed.
"""
p = ntpath.dirname(n)
basename = ntpath.basename(n)
n,... | 609aa0dfaa1cfb55520dcbf650e98ba2c4c07948 | 336,671 |
from typing import Union
import multiprocessing
import re
def parse_num_processors(value: Union[str, int, float]):
"""Convert input value (parse if string) to number of processors.
Args:
value: an int, float or string; string value can be "X" or "MAX-X"
Returns:
An int of the number of pro... | 174f825c00946e80a64ce998508d34149e01bf14 | 659,772 |
def load_targets_file(input_file):
"""
Takes a string indicating a file name and reads the contents of the file.
Returns a list containing each line of the file.
Precondition: input_file should exist in the file system.
"""
with open(input_file, 'r') as f:
f = f.readlines()
out = [i.replace('\n','').replace('\... | 40d305e244264d6c3249bb9fb914cda3ebcda711 | 705,072 |
def BM3_EOS_energy (V, V0, E0, K0, Kp0):
"""Calculate the energy from a 3rd order BM EOS"""
E = E0 + ((9.0*V0*K0)/16.0) * ( (((V0/V)**(2.0/3.0)-1.0)**3.0)*Kp0 +
(((V0/V)**(2.0/3.0) - 1.0)**2.0 * (6.0-4.0*(V0/V)**(2.0/3.0))))
return E | b0a4371e9710a3be08541dae6f2d1650ada61d8b | 245,908 |
from typing import OrderedDict
def _translate_reverse_relationships(opts, reverse_relations):
"""
DRF's `_get_reverse_relationships` uses the `get_accessor_name` method of
`ForeignObjectRel` as the key for the relationship. This function replaces
those keys with the rel's `name` property. This allows ... | 56851cb19ec4410bff5d18f08b088957a04a8ae5 | 656,053 |
def parse_u32(byte_seq, index):
"""Parses a u32 value at the given index from the byte_seq."""
return (byte_seq[index] |
(byte_seq[index + 1] << 8) |
(byte_seq[index + 2] << 16) |
(byte_seq[index + 3] << 24)) | d8c29a2d255900c09b9b313b4d636cc40782be46 | 482,799 |
def flatten(l):
"""Flatten a nested list."""
return sum(map(flatten, l), []) \
if isinstance(l, list) or isinstance(l, tuple) else [l] | 99fbeb4257d6f1d12569858b8f85445c6de3c0d9 | 174,946 |
def sectRect(rect1, rect2):
"""Return a boolean and a rectangle. If the input rectangles intersect, return
True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input
rectangles don't intersect.
"""
(xMin1, yMin1, xMax1, yMax1) = rect1
(xMin2, yMin2, xMax2, yMax2) = rect2
... | 4c9d5dd10c3aa0d30127776a92f70d7e2037b066 | 281,380 |
def cut_to_specific_word(dataframe, specific_word, part_in_bid):
"""
Takes in a dataframe and a column, and then creates a new dataframe containing only
the rows from the original dataframe that had the search word in that column
:params: a dataframe, a specific_word to look for, and a column name relat... | c8ee42088cb85c053b1a5a8b3877436a50df689d | 653,543 |
def load_metaparameters(param_dict=None):
"""
Parameters for bayesian optimizer. Default dictionary listed and updated by param_dict.
"""
metaparams = {'architecture': 'svm',
'log_gamma': -3,
'log_C': -2}
if param_dict:
metaparams.update(param_dict)
r... | c07e2c24fdcb4cd553559129a4a6a2cfe8cff726 | 372,407 |
def get_consecutive_num(arr):
"""
Method to get indices of second number in a pair of consecutive numbers
Note: solve_90f3ed37 uses this function
"""
rows = []
for i in range(len(arr) - 1):
if (arr[i] + 1) == arr[i + 1]:
rows.append(arr[i + 1])
return rows | 24417126d4133db62519dd044f7e0b9498d1ad0f | 676,356 |
def dictkeyclean(d):
"""Convert all keys of the dict `d` to strings.
"""
new_d = {}
for k, v in d.items():
new_d[str(k)] = v
return new_d | 97db663ef2bd135ab96719cd05f0a55afdb57b99 | 263,469 |
def glue(stdin, delimiter=" "):
"""
Join every lines in the stream, using the given delimiter.
The default delimiter is a space.
>>> list([[[1], [2]], [[3], [4]], [[5], [6]]] |
... traverse() | map(str) | glue(" "))
['1 2 3 4 5 6']
"""
data = list(stdin)
return iter([delimiter.... | 9ea1b5caa226326ef26389b0ea1cc77eac64e1df | 560,448 |
from typing import List
from typing import Tuple
def find_two_smallest_walk(L: List[float]) -> Tuple[int, int]:
"""Return a tuple of the indices of the two smallest values in list L.
>>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]
>>> find_two_smallest_walk(items)
(6, 7)
>>> items ==... | c00d018fd553fd15eb0063909023d2b56423c3b1 | 422,933 |
import random
def generate_chunks(total, min_val, max_val, num_chunks=-1):
"""
Randomly generate a list of integers l such that sum(l) = total and for each x in l, min_val <= x <= max_val.
If num_chunks > 0, it is guaranteed that the list contains exactly num_chunks elements.
"""
if num_chunks <=... | a76c2f206801b339b203197249bc969cf91c7caa | 115,584 |
import random
def pick_best_and_random(pop, maximize=False):
"""
Here we select the best individual from a population and pair it with a random individual from a population
:param pop: input population
:param maximize: when true a higher fitness score is better, otherwise a lower score is considered ... | 35c8ecb4f3966a06dd62b7fa34a3c5faf076805f | 201,794 |
def range_splits(tensor, split_ranges, dim):
"""Splits the tensor according to chunks of split_ranges.
Arguments:
tensor (Tensor): tensor to split.
split_ranges (list(tuples(int,int))): sizes of chunks (start,end).
dim (int): dimension along which to split the tensor.
"""
return... | 67de1dd67a49e953dfc259031d852649be0e6343 | 588,925 |
def getlabel(section):
"""
Converts all activity outcome strings in either 1, 0 or None for a list
"""
for i in range(len(section)):
if section[i] == 'Active':
section[i] = 1
elif section[i] == 'Inactive':
section[i] = 0
else:
section[i] = None... | 6b30fd141137690e9919503f91063db12eede8aa | 316,515 |
def read_from_occ_translation_dict(occupation, tdict):
"""map an original occupation to HISCO code using the dictionary of unique occupations"""
hisco = tdict[occupation]
return hisco | 8089d42143c2e7012ac31cdf1a5ee214347c7fe3 | 70,518 |
def read_file_data(filepath):
# type: (str) -> list
"""
reads the database files and returns them as list.
"""
dblist = []
try:
with open(filepath, 'r') as f:
dblist = f.read().splitlines()
except (IOError, TypeError) as e:
print(e)
return dblist | 96716241fbcd64c03af22821504af7ce53ea7f28 | 169,333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.