content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _get_train_steps(num_examples, train_epochs, train_batch_size):
"""Determine the number of training steps."""
return num_examples * train_epochs // train_batch_size + 1 | 8fe187059e2050f599fcec0d707a0c3fcb4f857e | 688,143 |
def make_synteny(genes, isoforms):
"""Return synteny for a list of genes and dict of isoforms."""
return len(list(set([isoforms.get(gene) for gene in genes]))) | 2ad56624ee2268d9bbf76e2af98977819fd89526 | 688,144 |
import torch
def view_as_real(data):
"""Named version of `torch.view_as_real()`"""
names = data.names
return torch.view_as_real(data.rename(None)).refine_names(*names + ("complex",)) | 8ae3c540ca1e3cda62ecc3099b930c8b35a8687c | 688,147 |
def bitstring_readable(data, batch_size, model_output=None, whole_batch=False):
"""Produce a human readable representation of the sequences in data.
Args:
data: data to be visualised
batch_size: size of batch
model_output: optional model output tensor to visualize alongside data.
whole_batch: wheth... | 342b6720dd30b1f8d8b984a5d49b09913050fd40 | 688,148 |
def parse_crs_string(string: str) -> str:
"""Parses a string to determine the CRS/spatial projection format.
Args:
string: a string with CRS/projection data.
Returns:
crs_type: Str in ["wkt", "proj4", "epsg", "string"].
"""
if "epsg:" in string.lower():
return "epsg"
el... | 6e730d767924be39244a9d1e08fe6895f1d4b1db | 688,149 |
def bytes_to_int(s):
"""Return converted bytestring to integer.
Args:
s: str of bytes
Returns:
int: numeric interpretation of binary string `s`
"""
# int type casts may return a long type
return int(s.encode('hex'), 16) | dc50db3af4e19ac6d9fe93c969590ead96e628a3 | 688,156 |
def make_space(space_padding=0):
"""
Return string with x number of spaces. Defaults to 0.
"""
space = ''
for i in range(space_padding):
space += ' '
return space | db846fdb426bc04526744daac8487e2a90320200 | 688,157 |
def rename_bindnames(tqry, li_adjust):
"""use this to alter the query template to match expected attribute names in bind objects/dictionaries
For example, a predefined query may be: "select * from customers where custid = %(custid)s"
But you are repeatedly passing bind dictionaries like {"customer" ... | 5e2d79772e1495d215f81166652b4449cb04a788 | 688,158 |
def _format(string):
""" Formats a class name correctly for checking function and class names.
Strips all non-alphanumeric chars and makes lowercase.
"""
return ''.join(list(filter(str.isalnum, string))).lower() | 0fbff1d0da8c3bd4b318613dfa039dcef664f11f | 688,160 |
def rldecode(A, n, axis=0):
"""
Decompresses run length encoding of array A along axis.
Synopsis:
B = rldecode(A, n, axis)
B = rldecode(A, n) # axis assumed to be 0
Arguments:
A (np.ndarray): Encoded array
n (np.ndarray): Repetition of each layer along an axis.
... | 9ffa16774905f6c869eae719f6ff8b06d2a7fb13 | 688,169 |
def open_and_read_file(file_path):
"""Read the entire contents of the file in as a string."""
contents = open(file_path).read()
return contents | 21b5bc501a59f4e0d97839122a2b822b2988a1d0 | 688,179 |
def createFromDocument(doc):
"""
Create an empty JS range from a document
@param doc DOM document
@return a empty JS range
"""
return doc.createRange() | 3bd55c4f60b25bb089b592f9dfe65ea299230be8 | 688,185 |
def _lsb_2fold(aa, bit):
"""
This function embeds a pair of bits in 2/3 fold degenerative codon.
:param aa: amino acid information.
:param bit: bit (character 2 e.g. 0) which should be embedded in codon.
:return: watermarked codon (string) e.g. AGA.
"""
if bit == '0':
return aa["codo... | 9730ddb9f13d9d3fe1191d7fd0bc81172ee5cfcd | 688,186 |
def encoder_type(encode):
"""
Takes the value sent from the user encoding menu and returns
the actual value to be used.
"""
return {
'0': "",
'1': "shikata_ga_nai",
'2': "",
'3': "MULTIENCODE",
'4': "BACKDOOR",
}.get(encode, "ERROR") | a64d7df749296af7b5bfc02f6db19fc75b2465de | 688,187 |
def bounding_box_circle(svg, node, font_size):
"""Bounding box for circle node."""
cx, cy = svg.point(node.get('cx'), node.get('cy'), font_size)
r = svg.length(node.get('r'), font_size)
return cx - r, cy - r, 2 * r, 2 * r | babbafb71e5fbf3e63c4a6ec31ba72545501cffb | 688,188 |
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
... | 0f72f261ff1e0ee2d304321cd0bbc0af9c662b4b | 688,190 |
def _get_parameters_proto(host_calls_dictionary):
"""Get the FormalParameterProtos for the first host call in the dictionary."""
return host_calls_dictionary['host_calls'][0].parameters | 7d62ee04bc52fe29bd36a14366c96e8ce5542c46 | 688,192 |
def sql_flush(style, connection, only_django=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.... | 11ddd9a59bd03cf5e529984797325442f8dcf3cd | 688,194 |
def lit_eq(lit1,lit2):
""" Returns true lits are syntactically equal """
return lit1 == lit2 | 8346510f743c8639336a20d7101ffb95e33d494f | 688,199 |
def greedy_action(q, state):
"""
Computes the greedy action.
:param q: action-value table.
:type q: bidimensional numpy array.
:param state: current state.
:type state: int.
:return: greedy action.
:rtype: int.
"""
greedy_act = 0
q_max = q[state][greedy_act]
for action i... | 00ead3adb1da74bbe9ca0aef24793c6bd711aa83 | 688,203 |
import yaml
def load_config_file(path):
"""
Load and parser yaml file.
Parameters:
path (str): full yaml path location
Returns:
dict: yaml file in parsed into a dict
"""
with open(path) as file:
return yaml.load(file, Loader=yaml.FullLoader) | f0343b876a7b34b75986ebe7ff8cd2b8c8df3ac2 | 688,206 |
def clean_nginx_git_tag(tag):
"""
Return a cleaned ``version`` string from an nginx git tag.
Nginx tags git release as in `release-1.2.3`
This removes the the `release-` prefix.
For example:
>>> clean_nginx_git_tag("release-1.2.3") == "1.2.3"
True
>>> clean_nginx_git_tag("1.2.3") == "1... | 232da3ae24987fd921f5fcc9273f6218fc3a0371 | 688,207 |
def check_for_running_sfn(session, arn):
"""Check if a downsample step function already running
Args:
session (boto3.session):
arn (str): Step function arn
Returns:
(bool)
"""
client = session.client('stepfunctions')
resp = client.list_executions(stateMachineArn=arn, st... | 49e01f8109642d31d36a4c1d2e280e046771ae15 | 688,209 |
def has_usable_review_ui(user, review_request, file_attachment):
"""Returns whether a review UI is set and can be used."""
review_ui = file_attachment.review_ui
return (review_ui and
review_ui.is_enabled_for(user=user,
review_request=review_request,
... | fc97f8d7cc2a6ad9b1689341510eeae20c6e4c8d | 688,212 |
def to_binary(number: int) -> str:
"""Convert a decimal number to a binary numbers.
:param number: The number to convert to binary
:return: The binary representation of the number
"""
return bin(number)[2:] | 8638930cb711fefd3732d65db632e7f24e597291 | 688,215 |
import base64
def base64_encode(string):
"""
base64's `urlsafe_b64encode` uses '=' as padding.
These are not URL safe when used in URL parameters.
Removes any `=` used as padding from the encoded string.
"""
encoded = base64.urlsafe_b64encode(string)
return encoded.rstrip(b"=") | b660feeba6cf17f5be6b49d406951277af8403ce | 688,220 |
def handler(value, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["sub... | 700321db3f92bf87c9a6ecc8dc38d4dd2ad14229 | 688,222 |
def replace_string_in_list(str_list: list, original_str: str, target_str: str):
"""
Replace a string in a list by provided string.
Args:
str_list (list): A list contains the string to be replaced.
original_str (str): The string to be replaced.
target_str (str): The replacement of st... | 31649c8c7171518598f6b3fe4d5db1b46e5cd573 | 688,225 |
def _wait_before_serving(seconds):
"""Tell the server not to write to this socket for the specified time."""
def _helper(ps, soc):
ps.delay_writing_for(seconds * 1000, soc)
return _helper | df3a4c969b1d094f0ac7fe07c967980ab8500fc0 | 688,226 |
def test_stability(v1, v2, precision=10e-3):
"""tests if two lists of lists of floats are equal but a certain precision
Args:
v1 (list[list[float]]): first list containing ints
v2 (list[list[float]]): second list containing ints
precision (float, optional): the precision after which... | 07b12bd7255a7f88cff630b3b6f79513752b2cb3 | 688,228 |
def cast_elements_to_string(cast_list):
""" This function casts the top level elements of a list to strings. Note that it
does not flatten lists before doing so, so if its elements contain lists, it will
cast these lists to strings.
Apply flatten_list() before applying cast_elements_to_string() if you... | 8e34768a8fd9f159a9770562ce20d637b94de0b0 | 688,232 |
import torch
def test_observe_get_and_verify_response_input_unit(tmp_observe_class, method, tmp_val, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iteratuve" or "functions". Leverage
monkeypatching and create false class to mock that greattunes._observe wi... | a5b0d9bcb4ad7d893498e88395d297e84d7212b2 | 688,234 |
def no_walk_revctrl(dirname=''):
"""Return empty list.
"""
# Returning a non-empty list prevents egg_info from reading the
# existing SOURCES.txt
return [''] | 7b6e7a09f9a131c13774a328eee38e24c7f49f29 | 688,235 |
def get_rm(g):
"""Return membrane resistivity in Ohm*m^2
g -- membrane conductivity in S/m^2
"""
return 1/g | 803d4f05e702776053640280e25ca6656790cdc9 | 688,237 |
def load_md(path: str) -> list:
"""
Loads an existing file into a list.
:param path: path where the file is stored
:return: list with the lines from the file
"""
with open(path, "r", encoding="UTF-8") as mdfile:
return mdfile.readlines() | b2977ff137de15e0a32dd2898f0dafd9a5ba9663 | 688,238 |
def clean_name(name):
"""
Cleans a proposed character name.
"""
new_name = ''.join(ch for ch in name if ch.isalpha())
new_name = new_name.title()
return new_name | 8aa0d8c9b2e352d867d70248f1a1b0500561e401 | 688,240 |
def merge_dicts(*dicts, **kwargs):
"""Merge all dicts in `*dicts` into a single dict, and return the result. If any of the entries
in `*dicts` is None, and `default` is specified as keyword argument, then return `default`."""
result = {}
for d in dicts:
if d is None and "default" in kwargs:
... | e42717a86d9f92f8a4924dd1317e7fcc23ef0f97 | 688,243 |
def calc_median(values_list):
"""calculates the median of the list in O(n log n); thus also returns sorted list for optional use"""
median = 0.0
sorted_list = sorted(values_list)
n = len(sorted_list)
if n == 0:
return median, sorted_list, n
half = n >> 1
if n % 2 == 1:
media... | f6f77eb3b99b946b5d09acef3fc08756527af134 | 688,249 |
import json
def json_pp(json_object):
"""
Helper method to convert objects into json formatted pretty string
:param json_object: The object to be converted into pretty string
:return: A pretty formatted string
"""
formatted_json = json.dumps(json_object,
sort_ke... | 74e11e736d512137bfdfebef3230c66e48edce2f | 688,252 |
import string
def flow_key(flow):
"""Model a flow key string for ``ovs-ofctl``.
Syntax taken from ``ovs-ofctl`` manpages:
http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-ofctl.8
Example flow dictionary:
flow = {
'in_port': '1',
'idle_timeout': '0',
'act... | ef27317827587778315d10b0e55f75554c748c13 | 688,259 |
def cleanup_decorator(func):
"""Decorator which runs cleanup before and after a function"""
def clean_before_after(self, *args, **kwargs): # pylint: disable=missing-docstring
# pylint only complains about a missing docstring on py2.7?
self.cleanup()
result = func(self, *args, **kwargs)... | e993a1476f561284a8f85f2aee4dd91dba573bb9 | 688,265 |
def formatUintHex64(value):
"""
Format an 64 bits unsigned integer.
"""
return u"0x%016x" % value | af82b7b6dd138333f09cf93e9d7990be286333fd | 688,266 |
def get_account_id(sts_client):
"""Retrieve the AWS account ID for the authenticated user or role"""
response = sts_client.get_caller_identity()
return response['Account'] | 670961b88b978387c9c8a7dcffe364caae218bf6 | 688,268 |
def bull_engulf(Open, high, low, close, t=4):
"""
Identifies if prices is a Bullish Engulfing Pattern of not
Param:
Open: array of open prices (5-day)
high: array of high prices (5-day)
low: array of low prices (5-day)
close: array of close prices (5-day)
t: int ... | 375500d7517b866d8e738a52165ecc40b8931258 | 688,270 |
def _fit_one_ovo(bin_clf_idx, multi_ovo, dataset, verbose):
"""Fit the OVO classifier given an index.
This method fits a one-vs-one classifier wrt the
positive and negative labels taken from the list
clf_pair_idx at the index bin_clf_idx.
Parameters
----------
bin_clf_idx : int
Ind... | 31d8e6ce66e3f40a243777f80ce416c9309f4a1e | 688,271 |
def get2dgridsize(sz, tpb = (8, 8)):
"""Return CUDA grid size for 2d arrays.
:param sz: input array size
:param tpb: (optional) threads per block
"""
bpg0 = (sz[0] + (tpb[0] - 1)) // tpb[0]
bpg1 = (sz[1] + (tpb[1] - 1)) // tpb[1]
return (bpg0, bpg1), tpb | 9024bab4ccb6496622aef0e7e4c25b71a3057fc6 | 688,276 |
import torch
def pitchVocabularyFmt(X, vocab_col):
"""
Produces the tensors for training with a pitch vocabulary encoding.
"""
pitch = torch.tensor(X[:, vocab_col], dtype=torch.long)
score_feats = torch.cat([torch.tensor(X[:, :vocab_col], dtype=torch.float),
torch.tenso... | df846df0ec36ecc770f6afa032d3d1433a7497c8 | 688,277 |
def IsSimulator(target_cpu):
"""Returns whether the |target_cpu| corresponds to a simulator build."""
return not target_cpu.startswith('arm') | 3abb2d1a6051c12cd3b1a9d54448352165378595 | 688,281 |
import socket
import struct
def ipv6_to_long(ip):
"""Return the IPv6 address string as a long
>>> ipv6_to_long("2001:db8::1")
42540766411282592856903984951653826561L
>>> ipv6_to_long("::1")
1L
"""
ip_bytes_n = socket.inet_pton(socket.AF_INET6, ip)
ip_parts = struct.unpack('!QQ', ip_by... | 1c9f324dba9be791f6ec0fae9693047b06759b6d | 688,285 |
import functools
def join(*expressions):
""" Convenient function for joining many expressions in series
using ``ObserverExpression.then``
Parameters
----------
*expressions : iterable of ObserverExpression
Returns
-------
new_expression : ObserverExpression
Joined expression.... | 1e14369a7fa471b0b586287c7647980a789c6e6c | 688,286 |
def ssh_auth(username, address):
"""Render username and address part."""
if username:
return '{}@{}'.format(username, address)
return '{}'.format(address) | 2e44302f9d9e0048bfbc2c2013a9d170a9c38fce | 688,287 |
def round_int(value):
"""Cast the specified value to nearest integer."""
if isinstance(value, float):
return int(round(value))
return int(value) | 13522aa76ff75baa2f0e10e9840bc981c9e5d30b | 688,289 |
def accel_within_limits(v, a, v_range):
"""
Accelerate the car while clipping to a velocity range
Args:
v (int): starting velocity
a (int): acceleration
v_range (tuple): min and max velocity
Returns:
(int): velocity, clipped to min/max v_range
"""
v = v + a
... | 0ed6da91424149c04e6d793a740d13b3ec9728ec | 688,293 |
from typing import Iterable
def check_type(data):
"""
Check type of an object. Return False if it is dictionary or list, True - otherwise.
"""
if isinstance(data, str):
return True
elif isinstance(data, Iterable):
return False
return True | 9a614c33215c0217af8a491bd9164d5220307ced | 688,299 |
def get_ap_vel(df, time_step, scaling_factor): # Calculates 'angular persistence', 'velocity', and 'directed velocity'
"""
Primary function called by "get_chemotaxis_stats" and
"get_chemotaxis_stats_by_interval". Calculates the 'Angular_persistence', 'Velocity',
and 'Directed_velocity' for each timepoin... | 6982c7628b82c286a825cbc767d4cf39cb31f783 | 688,300 |
def _num_tokens_of(rule):
"""Calculate the total number of tokens in a rule."""
total = len(rule.get("tokens"))
for _ in ("prev_classes", "prev_tokens", "next_tokens", "next_classes"):
val = rule.get(_)
if val:
total += len(val)
return total | eafebe556ee28fe1ab4699894266b47b4c1ed63b | 688,301 |
def sort_dataframe(dataframe, sort_column, order='ascending', nulls_position='last', inplace=True):
"""
Sort the dataframe by the sort column
Arguments are the dataframe and column that you want to sort by
Optional arguments are:
- order (default ascending, can be ascending or descending) which dete... | a4d87ef0ff18d38b0caa747b885194e4820cac77 | 688,306 |
def job_id() -> str:
"""Returns a mock job ID."""
return "00000000-0000-0000-0000-000000000000" | 038d5035a5e5f78e5f04b9535423bd594fa7ed3f | 688,307 |
import re
def _split_text_by_opening(pattern, text):
"""
Splits text into parts identified by opening that matches `pattern`.
For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used
to split text into chapters.
"""
openings = re.findall(pattern, text)
if len(openings) == 0:
prin... | 2082f62b35b96173159bab0d8935ac849bb14d42 | 688,309 |
import json
def format_json(data, default=None):
"""
Pretty print JSON.
Arguments:
data (dict): JSON blob.
Returns:
str: Formatted JSON
"""
return json.dumps(
data, sort_keys=True, indent=2, separators=(",", ": "), default=default
) | 1ff400089ccac98cc4199a537e1dd5633d2d27b4 | 688,311 |
def oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator) | 833270146c2661005a8733cb400c3ba203a8fdd0 | 688,312 |
from functools import reduce
import operator
def MergeDicts( *dicts ):
"""Construct a merged dictionary from the given dicts.
If two dicts define the same key, the key from the dict later in the list is chosen."""
return dict( reduce( operator.add, map( dict.items, dicts ) ) ) | 185cd5d082c3bf97a40cb8ad4d9e1197f25d1e1f | 688,322 |
def get_label_color(status):
"""
Get a customized color of the status
:param status: The requested status to get a customized color for
:return: customized color
"""
colors = {'NEW':'grey',
'ASSIGNED':'blue',
'OPEN': 'orange',
'FIXED': 'purp... | 6bcf168d653801999bc2c2528a426ec43afdd349 | 688,333 |
def remove_duplicates(seq):
"""
Removes duplicates from a list.
This is the fastest solution, source:
http://www.peterbe.com/plog/uniqifiers-benchmark
Input arguments:
seq -- list from which we are removing duplicates
Output:
List without duplicates.
Example:
... | 460e585e04fb7f868e0216e6d82808c68be80a7d | 688,340 |
import torch
def compute_bboxes_from_keypoints(keypoints):
"""
keypoints: B x 68*2
return value: B x 4 (t, b, l, r)
Compute a very rough bounding box approximate from 68 keypoints.
"""
x, y = keypoints.float().view(-1, 68, 2).transpose(0, 2)
face_height = y[8] - y[27]
b = y[8] + face_... | 25a3135c40e9b2e615b2d8dc2eba425ff38177b2 | 688,341 |
def write_matlabbatch(template, nii_file, tpm_file, darteltpm_file, outfile):
""" Complete matlab batch from template.
Parameters
----------
template: str
path to template batch to be completed.
nii_files: list
the Nifti image to be processed.
tpm_file: str
path to the S... | 39cbf74ced5c35e171e18c5261b7f017adb8cd6c | 688,343 |
import html
import re
def reddit_sanitize( text ):
"""
Convert comments in the Reddit API format to actual plain-text likely
constructed by the individual who posted it. HTML is unescaped, markup
is removed, and quotes are removed.
"""
# Unescape HTML (IE, '>' becomes '>')
text = html.... | 304e3f0900a50d0e2d0e204f7391ba106bed805b | 688,350 |
def config2object(config):
"""
Convert dictionary into instance allowing access to dictionary keys using
dot notation (attributes).
"""
class ConfigObject(dict):
"""
Represents configuration options' group, works like a dict
"""
def __init__(self, *args, **kwargs):
... | 1523329a0ba6495d1b23530aa6d02f9c953d7e51 | 688,354 |
import socket
def _get_available_ports(n: int) -> list[int]:
"""
Get available ports.
Parameters
----------
n : int
number of ports to get.
Returns
-------
list[int]
Available ports.
"""
socks: list[socket.socket] = [socket.socket() for _ in range(n)]
list... | 64e4f6f0683ff7df34a2e264c0e22d2d3a7414ec | 688,355 |
def mean(sequence):
"""
Calculates the arithmetic mean of a list / tuple
"""
return sum(sequence) / float(len(sequence)) | aa800eac51de57c9b4c7c5e2fe749f058cfe6c81 | 688,356 |
import textwrap
def proteins_to_fasta(proteins, seqids=[], use_safe_seqid=False, width=50):
"""
Takes a proteins dictionary and returns a string containing
all the sequences in FASTA format. Option parameters are
a list of seqids to output (seqids) and the line width (width).
"""
if seqids:
... | 569682abb4f8b0d62cba39f5720e09fb8baf7ec8 | 688,359 |
def lowercase(data):
"""Lowercase text
Args:
data (list,str): Data to lowercase (either a string or a list
[of lists..] of strings)
Returns:
list, str: Lowercased data
"""
if isinstance(data, (list, tuple)):
return [lowercase(item) for item in data]
elif is... | fd173620e8ddb58d5966b235a3b9236ebf01f9d5 | 688,363 |
def overlay_image(foreground_image, mask, background_image):
""" Overlay foreground image onto the background given a mask
:param foreground_image: foreground image points
:param mask: [0-255] values in mask
:param background_image: background image points
:returns: image with foreground where mask > 0 overla... | fb6b8a854e99fe984b6f57eb683a8f77a507e155 | 688,365 |
def eq_or_in(val, options):
"""Return True if options contains value or if value is equal to options."""
return val in options if isinstance(options, tuple) else val == options | bbaa3fbc91429adc7db4c6fcbcfeb860508ade21 | 688,366 |
from typing import Counter
def get_bow(tokenized_text):
"""
Function to generate bow_list and word_freq from a tokenized_text
-----PARAMETER-----
tokenized_text should be in the form of [['a'], ['a', 'b'], ['b']] format,
where the object is a list of survey response, with each survey response
... | 656d9dab1b2bee350cecca5fd693fcbc3eafb2bd | 688,368 |
def is_same_py_file(file_1, file_2):
"""Compares 2 filenames accounting for .pyc files."""
if file_1.endswith('.pyc') or file_1.endswith('.pyo'):
file_1 = file_1[:-1]
if file_2.endswith('.pyc') or file_2.endswith('.pyo'):
file_2 = file_2[:-1]
return file_1 == file_2 | 897c6b84389290d98bf4fa449763a01c83354302 | 688,369 |
import torch
def loglikelihood(w, weights=None):
"""
Calculates the estimated loglikehood given weights.
:param w: The log weights, corresponding to likelihood
:type w: torch.Tensor
:param weights: Whether to weight the log-likelihood.
:type weights: torch.Tensor
:return: The log-likelihoo... | fbaff2c7d99c11b6c7d5dd2296b8470fdd798e03 | 688,371 |
def resolve_crop(im, crop):
"""Convert a crop (i.e. slice definition) to only positive values
crops might contain None, or - values"""
# only works for two dimension
crop = list(crop)
assert len(crop) == 2
for i in (0, 1):
assert len(crop[i]) == 2
for j in (0, 1):
if ... | 791331619401664f9cb4c4d01f852b39a568f585 | 688,372 |
def normalize_spaces(s):
"""replace any sequence of whitespace
characters with a single space"""
return ' '.join(s.split()) | f602797e46ec70309326fa71b305e24d2c180190 | 688,375 |
def checksum(digits):
"""
Returns the checksum of CPF digits.
References to the algorithm:
https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo
https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm
"""
s = 0
p = len(digit... | 0e3f8cc4b1f42265f27c03b10559183f0bbd87e0 | 688,376 |
def find_rlc(p_utility, q_utility, r_set, l_set, c_set):
"""
Proportional controllers for adjusting the resistance and capacitance values in the RLC load bank
:param p_utility: utility/source active power in watts
:param q_utility: utility/source reactive power in var
:param r_set: prior resistor %... | ec6a4476bb842f0e305e25da0245a4ee2c0945b0 | 688,380 |
def score1(rule, c=0):
"""
Calculate candidate score depending on the rule's confidence.
Parameters:
rule (dict): rule from rules_dict
c (int): constant for smoothing
Returns:
score (float): candidate score
"""
score = rule["rule_supp"] / (rule["body_supp"] + c)
r... | c329d1154d59aed6bf62f0af1bcbbd7e237871c2 | 688,382 |
import logging
import json
def extract_english_corpus(json_str, verbose=False):
"""A helper function to extract English corpus from KPTimes dataset in json
:param: json_str: the json string
:param: verbose: bool, if logging the process of data processing
:returns: the articles and keywords for each ... | 7a587733c24a33a5140dac695f4d10a5c18d6e97 | 688,387 |
import re
def humansorted_datasets(l, key=None):
"""Sort a list of datasets according to a key of a dataset
Parameters
----------
l : list
The list of datasets to be sorted
key : str (optional)
The key of the dataset the datasets should be sorted according to.
Defaults to ... | 8817fb61b563feaec51aa6ae35c7df1ae20f4ac7 | 688,393 |
def sum_multiples_three_five(number):
"""
number: random integer
return: the sum of all multipliers of 3 and 5 below number
"""
multipliers = []
n = 0
while n < number:
if n % 3 == 0 or n % 5 == 0:
multipliers.append(n)
n += 1
return sum(multipliers) | 8a8b5fcd5c66db6a9dea95e0a7fc5d3c5a7900a6 | 688,394 |
def asURL(epsg):
""" convert EPSG code to OGC URL CRS
``http://www.opengis.net/def/crs/EPSG/0/<code>`` notation """
return "http://www.opengis.net/def/crs/EPSG/0/%d" % int(epsg) | f0bb82853e2782cbef7fbd54414c26a159669a08 | 688,395 |
import copy
def min_specializations(h,domains,x):
"""Implement a function min_specializations(h, domains, x)
for a hypothesis h and an example x. The argument
domains is a list of lists, in which the i-th
sub-list contains the possible values of feature i.
The function should return all minimal specializati... | fb0205ca1a25aa31bcc9ebb4eefbacbd2dce8800 | 688,401 |
import random
def random_cell(grid, snake):
"""
Generates a new random position on the space of free cells.
:param grid: The grid.
:param snake: The snake whose body will represent occupied cells.
:returns: Position of a free cell.
"""
while True:
x = random.randrange(grid.rows)
... | f4cb0d7940c07e94972de3c1e38c3a9116acb435 | 688,403 |
def point_in_polygon(point, polygon):
"""
Determines whether a [x,y] point is strictly inside a convex polygon
defined as an ordered list of [x,y] points.
:param point: the point to check
:param polygon: the polygon
:return: True if point is inside polygon, False otherwise
"""
x = point... | a776f16b6560d2efcc8e86a56f89029cb35a2867 | 688,404 |
def tag(pages, tag):
"""Pages with a given tag."""
if not tag:
return pages
return [p for p in pages if tag in p.tags] | aadb70a84364042863e57bc6aa40b2ff8f4a4158 | 688,410 |
def get_speciesindices(specieslist):
"""
Create a dictionary to assign an arbitrary index to each of the species in
the kinetic scheme.
Parameters
----------
specieslist : list
a list of all the species in the model
Returns
-------
speciesindices : dict
... | 8f309de181cbed3eb6499821da59116a426c16c3 | 688,412 |
def dump_cookies(cookies_list):
"""Dumps cookies to list
"""
cookies = []
for c in cookies_list:
cookies.append({
'name': c.name,
'domain': c.domain,
'value': c.value})
return cookies | b04b1a54bc4aa10e15fe5e28d59b4b9a51a89f1f | 688,413 |
from typing import List
from typing import Dict
def divide_blocks(
blocks: List[int],
world_size: int) -> Dict[int, List[int]]:
"""
Divide the blocks into world_size partitions, and return the divided block indexes for the
given work_rank
:param blocks: the blocks and each item is the ... | ff14768161a78aacccfe827f00493482dd54c830 | 688,416 |
def split_train_test(X, y, test_percentage):
"""
Randomly split given dataset into training- and testing sets
:param X: Design matrix to split
:param y: Response vector to split
:param test_percentage: Percentage of samples to use as test
:return: Two tuples of: (train set X, train set y), (test... | 7f98f9bb5ef9376308da9e10518c94ee1680f71e | 688,417 |
def p1_f_linear(x):
"""DocTest module Expected Output Test - don't change or delete these lines
>>> x = [565, 872, 711, 964, 340, 761, 2, 233, 562, 854]
>>> print("The minimum is: ",p1_f_linear(x))
The minimum is: 2
"""
# ******ENTER YOUR FINAL CHECKED CODE AFTER THIS COMMENT BLOCK***... | 3f2cf29418d29aacce8e86f2b644da98cb683313 | 688,418 |
def erroCsv(csvFile):
"""
Rename the csv file with err notation
:param csvFile: input csv file name
:return: new file name
"""
return csvFile.replace('.csv', '_err.csv') | 5d53de212072be4b28f2655c75e6205af27eed69 | 688,421 |
from typing import List
def _generate_sharded_filenames(filename: str) -> List[str]:
"""Generates filenames of the each file in the sharded filepath.
Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py.
Args:
filename: The sharded filepath.
Returns:
A list of filepaths for... | 4686aee6dc4d1924dfb1745c5d8a3ae77a604a85 | 688,423 |
def drop_columns(tabular, n):
"""drops first n items from each row and returns new tabular data
>>> drop_columns([[1, 2, 3],
[21, 22, 23],
[31, 32, 33]],
1)
[[2, 3], [22, 23], [32, 33]]
"""
return [row[n:] for row in tabular] | d70698637c96eb579439e01bf7c913f7d64d3567 | 688,424 |
def clicked_quality_reward(responses):
"""Calculates the total clicked watchtime from a list of responses.
Args:
responses: A list of IEvResponse objects
Returns:
reward: A float representing the total watch time from the responses
"""
qual = 0.0
watch = 0.0
for response in res... | a1b1b5cd93b759775125f486146823e771fc4231 | 688,426 |
def get_nim_sum(state: tuple[int, ...]) -> int:
"""
Get the nim sum of a position. See https://www.archimedes-lab.org/How_to_Solve/Win_at_Nim.html
:param state: the state of the game
:return: the nim sum of the current position
"""
cur_sum = 0
for n in state:
cur_sum ^= n
return cur_sum | d1b3cf67d86fce56ffd69cb6ede438b8c2cc85f6 | 688,427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.