content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def kdump(self_update=False, snapshot=None):
"""Regenerate kdump initrd
A new initrd for kdump is created in a snapshot.
self_update
Check for newer transactional-update versions.
snapshot
Use the given snapshot or, if no number is given, the current
default snapshot as a base... | 7,100 |
def register_keywords_user(email, keywords, price):
"""Register users then keywords and creates/updates doc
Keyword arguments:
email - email for user
keywords - string of keywords
price -- (optional) max price can be set to None
"""
logging.info('[INFO] Registering user email \'{}\' '.form... | 7,101 |
def domain_spec_colpair(data, i, j, rel2sub2obj, col_2_errors_repair, pair_coverage, ignore_null):
"""
Checks two columns i and j against a relation. Checks if i is the subject and j is the object or the other way
around. If a match is found, objects that violate the matched relation are marked as errors.
... | 7,102 |
def test_Highest_Score__Two_Disks_Exploding_At_Last_Drop(score, max_score):
"""Function highest_score: Two disks, highest score when exploding at last drop."""
max_score.value += 12
try:
set_up()
test_board = Board.init_board \
(dimension=4, given_disks= \
([wrapp... | 7,103 |
def plot_distribution(df, inv, ax=None, distribution=None, tau_plot=None, plot_bounds=True, plot_ci=True,
label='', ci_label='', unit_scale='auto', freq_axis=True, area=None, normalize=False,
predict_kw={}, **kw):
"""
Plot the specified distribution as a function of t... | 7,104 |
def hilbert( turtle, length, depth ):
"""
Draw the U shape of iteration depth 1
"""
turtle.left(90)
turtle.forward(length)
turtle.right(90)
turtle.forward(length)
turtle.right(90)
turtle.forward(length)
"""
Finally turn into same direction we started with
"""
turtle.left(90) | 7,105 |
def test_pokemon_color_read(client: TestClient):
"""Test case for pokemon_color_read
"""
headers = {
}
response = client.request(
"GET",
"/api/v2/pokemon-color/{id}/".format(id=56),
headers=headers,
)
# uncomment below to assert the status code of the HTTP res... | 7,106 |
def dispatch(hostlist, commands, required_gpus=1, required_gpu_mem=8, required_cpu_mem=0, log_target='file'):
"""Main dispatcher method.
Arguments
----------
hostlist : list
List of hostnames or addresses
commands : list
List of command strings, as would be written in shell. Ensure the correct working direct... | 7,107 |
def profitsharing_order(self, transaction_id, out_order_no, receivers, unfreeze_unsplit,
appid=None, sub_appid=None, sub_mchid=None):
"""请求分账
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:para... | 7,108 |
def merge_time_batch_dims(x: Tensor) -> Tensor:
"""
Pack the time dimension into the batch dimension.
Args:
x: input tensor
Returns:
output tensor
"""
if xnmt.backend_dynet:
((hidden_dim, seq_len), batch_size_) = x.dim()
return dy.reshape(x, (hidden_dim,), batch_size=batch_size_ * seq_len)... | 7,109 |
def test_read_fhd_write_read_uvfits_no_layout():
"""
Test errors/warnings with with no layout file.
"""
fhd_uv = UVData()
files_use = testfiles[:-3] + [testfiles[-2]]
# check warning raised
with uvtest.check_warnings(UserWarning, "No layout file"):
fhd_uv.read(files_use, run_check=F... | 7,110 |
def get_log_likelihood(P, v, subs_counts):
"""
The stationary distribution of P is empirically derived.
It is proportional to the codon counts by construction.
@param P: a transition matrix using codon counts and free parameters
@param v: stationary distribution proportional to observed codon counts... | 7,111 |
def create_identity_model(
model_dir,
signature_name=(
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY),
tags=(tf.saved_model.tag_constants.SERVING,)):
"""Create a model and saved it in SavedModel format."""
g, signature_def_map = _identity_string_graph(signature_name)
_wr... | 7,112 |
def allChampMast(_reg, _summonerId, _apiKey):
"""Get all champion mastery entries sorted by number of champion points descending"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/" + _summonerId +"?api_key=" + _apiKey)
data = json.load... | 7,113 |
def union(l1, l2):
""" return the union of two lists """
return list(set(l1) | set(l2)) | 7,114 |
def actual_kwargs():
"""
Decorator that provides the wrapped function with an attribute 'actual_kwargs' containing just those keyword
arguments actually passed in to the function.
Based on code from http://stackoverflow.com/a/1409284/127480
"""
def decorator(function):
def inner(*args... | 7,115 |
def k1(f, t, y, paso):
"""
f : funcion a integrar. Retorna un np.ndarray
t : tiempo en el cual evaluar la funcion f
y : para evaluar la funcion f
paso : tamano del paso a usar.
"""
output = paso * f(t, y)
return output | 7,116 |
def script_from_json_string(json_string, base_dir=None):
"""Returns a Script instance parsed from the given string containing JSON.
"""
raw_json = json.loads(json_string)
if not raw_json:
raw_json = []
return script_from_data(raw_json, base_dir) | 7,117 |
def write_version_file(version):
"""Writes a file with version information to be used at run time
Parameters
----------
version: str
A string containing the current version information
Returns
-------
version_file: str
A path to the version file
"""
try:
gi... | 7,118 |
def check_api_key(key: str, hashed: str) -> bool:
"""
Check a API key string against a hashed one from the user database.
:param key: the API key to check
:type key: str
:param hashed: the hashed key to check against
:type hashed: str
"""
return hash_api_key(key) == hashed | 7,119 |
def travel_chart(user_list, guild):
"""
Builds the chart to display travel data for Animal Crossing
:param user_list:
:param guild:
:return:
"""
out_table = []
fruit_lookup = {'apple': '🍎', 'pear': '🍐', 'cherry': '🍒', 'peach': '🍑', 'orange': '🍊'}
for user in user_list:
... | 7,120 |
def evaluate_hyperparameters(parameterization):
""" Train and evaluate the network to find the best parameters
Args:
parameterization: The hyperparameters that should be evaluated
Returns:
float: classification accuracy """
net = Net()
net, _, _ = train_bayesian_optimization(net=net,... | 7,121 |
def get_post_type(h_entry, custom_properties=[]):
"""
Return the type of a h-entry per the Post Type Discovery algorithm.
:param h_entry: The h-entry whose type to retrieve.
:type h_entry: dict
:param custom_properties: The optional custom properties to use for the Post Type Discovery algorithm.
... | 7,122 |
def get_start_end(sequence, skiplist=['-','?']):
"""Return position of first and last character which is not in skiplist.
Skiplist defaults to ['-','?'])."""
length=len(sequence)
if length==0:
return None,None
end=length-1
while end>=0 and (sequence[end] in skiplist):
end-=1
... | 7,123 |
def view_event(request, eventid):
"""
View an Event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param eventid: The ObjectId of the event to get details for.
:type eventid: str
:returns: :class:`django.http.HttpResponse`
"""
ana... | 7,124 |
def main():
"""Main script."""
(opts, args) = parser.parse_args()
if opts.howto:
print HOWTO
return 1
if not args:
print "No sensor expression given."
parser.print_usage()
return 1
if opts.cm_url in CM_URLS:
cm = CentralStore(CM_URLS[opts.cm_url], o... | 7,125 |
def archiveOpen(self, file, path):
"""
This gets added to the File model to open a path within an archive file.
:param file: the file document.
:param path: the path within the archive file.
:returns: a file-like object that can be used as a context or handle.
"""
return ArchiveFileHandle(s... | 7,126 |
def get_masked_bin(args, key: int) -> str:
"""Given an input, output, and mask type: read the bytes, identify the factory, mask the bytes, write them to disk."""
if args.bin == None or args.mask == None:
logger.bad("Please specify -b AND -m (bin file and mask)")
return None
# get the bytes... | 7,127 |
def count_entries(df, col_name = 'lang'):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: cols_count
cols_count = {}
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over the column in DataFrame
for entry in c... | 7,128 |
def print_header(size: int, name: str):
"""
Print a header looking like this :
------
NAME
------
:param size: width of the header in characters
:type size: int
:param name: name displayed by the header
:type name: str
"""
templateBar = "{:-^" + str(size) + "s}"
templat... | 7,129 |
def bo_run_7():
"""
Run the bayesian optimization experiemnt 7.
Same as experiment 6, but with ARD kernel, different tolerance
and different max_iter.
"""
import GPyOpt
import pickle
exper = IE5_experiment_6(0,2)
domain =[{'name': 'init_runs', 'type': 'd... | 7,130 |
def polyPoke(*args, **kwargs):
"""
Introduces a new vertex in the middle of the selected face, and connects it to the rest of the vertices of the face.
Returns: `string` The node name
"""
pass | 7,131 |
def workerfunc(prob, *args, **kwargs):
""" Helper function for wrapping class methods to allow for use
of the multiprocessing package """
return prob.run_simulation(*args, **kwargs) | 7,132 |
def ensure_pytest_builtin_helpers(helpers='skip raises'.split()):
""" hack (py.test.) raises and skip into builtins, needed
for applevel tests to run directly on cpython but
apparently earlier on "raises" was already added
to module's globals.
"""
import __builtin__
for helper... | 7,133 |
async def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
with patch.object(config, "SECTIONS", ["core"]):
assert await async_setup_component(hass, "config", {})
return await hass_ws_client(hass) | 7,134 |
def test_curve_plot(curve):
"""
Tests mpl image of curve.
"""
fig = curve.plot().get_figure()
return fig | 7,135 |
async def _app_parser_stats():
"""Retrieve / update cached parser stat information.
Fields:
id: identifier of parser
size_doc: approximate size (bytes) per document or null
"""
parser_cfg = faw_analysis_set_util.lookup_all_parsers(
app_mongodb_conn.delegate, app_config)
... | 7,136 |
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock_entry_with_yaml_config,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with command templates and different encoding."""
domain = siren.DOMAIN
config = copy.deepcopy(DEF... | 7,137 |
def test_announcement_get_price_result():
"""Testing the announcement get_price_result function."""
# TODO
assert False | 7,138 |
def update_wishlists(wishlist_id):
"""
Update a Wishlist
This endpoint will update a Wishlist based the body that is posted
"""
app.logger.info('Request to Update a wishlist with id [%s]', wishlist_id)
check_content_type('application/json')
wishlist = Wishlist.find(wishlist_id)
if not w... | 7,139 |
def simplifiedview(av_data: dict, filehash: str) -> str:
"""Builds and returns a simplified string containing basic information about the analysis"""
neg_detections = 0
pos_detections = 0
error_detections = 0
for engine in av_data:
if av_data[engine]['category'] == 'malicious' or av_data[e... | 7,140 |
def decompress_hyper(y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir):
"""Decompress bitstream to cubes.
Input: compressed bitstream. latent representations (y) and hyper prior (z).
Output: cubes with shape [batch size, length, width, height, channel(1)]
"""
print... | 7,141 |
def GetManualInsn(ea):
"""
Get manual representation of instruction
@param ea: linear address
@note: This function returns value set by SetManualInsn earlier.
"""
return idaapi.get_manual_insn(ea) | 7,142 |
def test_regular_expression(exp=".*", text="", fLOG=fLOG):
"""
Tests a regular expression.
@param exp regular expression
@param text text to check
@param fLOG logging function
"""
fLOG("regex", exp)
fLOG("text", text)
ex = re.compile(exp)
ma = ex.search(t... | 7,143 |
def feature_time(data: pd.DataFrame) -> pd.DataFrame:
"""
Time Feature Engineering.
"""
# print(data)
# print(data.info())
day = 24*60*60
year = (365.2425)*day
time_stp = data['time'].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:00") if isinstance(x, str) else x
).ma... | 7,144 |
def _transform(mock_file) -> Tuple[List[Page], SaneJson]:
""" Prepare the data as sections before calling report """
transformer = Transform(get_mock(mock_file, ret_dict=False))
sane_json = transformer.get_sane_json()
pages = transformer.get_pages()
return pages, sane_json | 7,145 |
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
AllowParamFailure = True
if re.match("^No", Options["--allowParamFailure"], re.I):
AllowParamFailure = False
... | 7,146 |
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D array containing object that can be converted to string using `str(obj)`.
:param labels: Array containing the column labels, the length must equal that of rows.
:param center... | 7,147 |
def quantify_leakage(align_net_file, train_contigs, valid_contigs, test_contigs, out_dir):
"""Quanitfy the leakage across sequence sets."""
def split_genome(contigs):
genome_contigs = []
for ctg in contigs:
while len(genome_contigs) <= ctg.genome:
genome_contigs.append([])
genome_contig... | 7,148 |
def coef_determ(y_sim, y_obs):
"""
calculate the coefficient of determination
:param y_sim: series of simulated values
:param y_obs: series of observed values
:return:
"""
assert y_sim.ndim == 1 and y_obs.ndim == 1 and len(y_sim) == len(y_obs)
r = np.corrcoef(y_sim, y_obs)
r2 = r[0... | 7,149 |
async def generate_latest_metrics(client):
"""Generate the latest metrics and transform the body."""
resp = await client.get(prometheus.API_ENDPOINT)
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == CONTENT_TYPE_TEXT_PLAIN
body = await resp.text()
body = body.split("\n"... | 7,150 |
def vsphere(r):
"""volume sphere"""
print(4 / 3 * math.pi * r ** 3) | 7,151 |
def carrierchecker_bundles(mcc, mnc, hwid):
"""
:param mcc: Country code.
:type mcc: int
:param mnc: Network code.
:type mnc: int
:param hwid: Device hardware ID.
:type hwid: str
"""
releases = networkutils.available_bundle_lookup(mcc, mnc, hwid)
print("\nAVAILABLE BUNDLES:")
... | 7,152 |
def cmd_tags(request):
"""Список тегов
Вывод топа клубов с количеством сообщений для каждого
redeye: tags
"""
tags = list(x.doc for x in (yield objs.Tag.find_sort({'_id': {'$ne': ''}}, [('value', -1)], limit=20)))
defer.returnValue(dict(ok=True, format='tags', tags=tags,
... | 7,153 |
def test_constructor_loads_info_from_constant():
"""Test non-dev mode loads info from SERVERS constant."""
hass = MagicMock(data={})
with patch.dict(cloud.SERVERS, {
'beer': {
'cognito_client_id': 'test-cognito_client_id',
'user_pool_id': 'test-user_pool_id',
'reg... | 7,154 |
def simulate(population: int, n: int, timer: int) -> int:
"""
Recursively simulate population growth of the fish.
Args:
population (int): Starting population
n (int): Number of days to simulate.
timer (int): The reset timer of the fish
initialised at 6 or 8 depending on ... | 7,155 |
def execute_deeper(source, table1, table2, number_of_pairs, destination, predictionsFileName):
"""
This method runs deeper on a dataset.
"""
table1_path = ""
table2_path = ""
predictions_file_path = ""
pred_pairs_path = ""
threshold_path = ""
if source.endswith("/"):
table1_... | 7,156 |
def normalize_valign(valign, err):
"""
Split align into (valign_type, valign_amount). Raise exception err
if align doesn't match a valid alignment.
"""
if valign in (TOP, MIDDLE, BOTTOM):
return (valign, None)
elif (isinstance(valign, tuple) and len(valign) == 2 and
valign[0... | 7,157 |
def svn_auth_open(*args):
"""
svn_auth_open(svn_auth_baton_t auth_baton, apr_array_header_t providers,
apr_pool_t pool)
"""
return apply(_core.svn_auth_open, args) | 7,158 |
def create_repository(git):
"""Create repository"""
raise click.ClickException("Not implemented") | 7,159 |
def quote_key(key):
"""特殊字符'/'转义处理
"""
return key.replace('/', '%2F') | 7,160 |
def test_extras_import_mode_strange(new_archive):
"""Check a mode that probably does not make much sense but is still available
(keep original, create new, delete)"""
imported_node = import_extras(new_archive)
imported_node = modify_extras(new_archive, imported_node, mode_existing=('k', 'c', 'd'))
... | 7,161 |
def download_knbc(target_dir: str):
"""Downloads the KNBC corpus and extracts files.
Args:
target_dir: A path to the directory to expand files.
"""
os.makedirs(target_dir, exist_ok=True)
download_file_path = os.path.join(target_dir, 'knbc.tar.bz2')
try:
urllib.request.urlretrieve(RESOURCE_URL, down... | 7,162 |
def middle(word):
"""Returns all but the first and last characters of a string."""
return word[1:-1] | 7,163 |
def test_recordmodel(mock_program_dao):
"""Get a record model instance for each test function."""
dut = mock_program_dao.do_select_all(RAMSTKMissionRecord, _all=False)
yield dut
# Delete the device under test.
del dut | 7,164 |
def surprise_communities(g_original, initial_membership=None, weights=None, node_sizes=None):
"""
Surprise_communities is a model where the quality function to optimize is:
.. math:: Q = m D(q \\parallel \\langle q \\rangle)
where :math:`m` is the number of edges, :math:`q = \\frac{\\sum_c m_c}{m}`,... | 7,165 |
def advisory_factory(adv_data, adv_format, logger):
"""Converts json into a list of advisory objects.
:param adv_data: A dictionary describing an advisory.
:param adv_format: The target format in ('default', 'ios')
:param logger: A logger (for now expecting to be ready to log)
:returns advisory inst... | 7,166 |
def _validate_int(value, min, max, type):
"""Validates a constrained integer value.
"""
try:
ivalue = int(value)
if min and max:
if ivalue < min or ivalue > max:
raise ValueError()
except ValueError:
err = f"{type} index must be an integer"
if... | 7,167 |
def validate_broker(broker_definition):
# type: (list)
"""Validate broker-definition.
Set broker-list as a string for admin-conf: 'host:port,host:port'.
Keyword arguments:
broker_definition -- list containing broker. Pattern per broker: 'host:port'.
"""
broker_def_list = []
for broker i... | 7,168 |
def get_azpl(cdec, cinc, gdec, ginc):
"""
gets azimuth and pl from specimen dec inc (cdec,cinc) and gdec,ginc (geographic) coordinates
"""
TOL = 1e-4
Xp = dir2cart([gdec, ginc, 1.])
X = dir2cart([cdec, cinc, 1.])
# find plunge first
az, pl, zdif, ang = 0., -90., 1., 360.
while zdif... | 7,169 |
def read_embroidery(reader, f, settings=None, pattern=None):
"""Reads fileobject or filename with reader."""
if reader == None:
return None
if pattern == None:
pattern = EmbPattern()
if is_str(f):
text_mode = False
try:
text_mode = reader.READ_FILE_... | 7,170 |
def test_load_configuration():
"""
loads the configuration store from it's relevant file.
the store has not been loaded yet.
"""
try:
create_config_file('new_settings_load.ini')
config_services.load_configuration('new_settings_load')
sections = config_services.get_section_na... | 7,171 |
def load_stdlib_public_names(version: str) -> dict[str, frozenset[str]]:
"""Load stdlib public names data from JSON file"""
if not re.fullmatch(r"\d+\.\d+", version):
raise ValueError(f"{version} is not a valid version")
try:
json_file = Path(__file__).with_name("stdlib_public_names") / (
... | 7,172 |
def deep_q_learning(sess,
env,
q_estimator,
target_estimator,
state_processor,
num_episodes,
experiment_dir,
replay_memory_size=500000,
replay_memory_init_size=... | 7,173 |
def obs_cb(store):
"""
callback for observe thread.
"""
if not store:
log.error("obs_cb is broken")
return
log.info("obs_cb: clear obs_key_cas %s" % store.obs_key_cas)
store.obs_key_cas.clear() | 7,174 |
def mask_inside_range(cube, minimum, maximum):
"""
Mask inside a specific threshold range.
Takes a MINIMUM and a MAXIMUM value for the range, and masks off anything
that's between the two in the cube data.
"""
cube.data = np.ma.masked_inside(cube.data, minimum, maximum)
return cube | 7,175 |
def s3_client() -> client:
"""
Returns a boto3 s3 client - configured to point at a specfic endpoint url if provided
"""
if AWS_RESOURCES_ENDPOINT:
return client("s3", endpoint_url=AWS_RESOURCES_ENDPOINT)
return client("s3") | 7,176 |
def ts_ac_plot(ts_arr, t_d, dt, p=2):
"""
Plot autocorrelations, mean += std dev at each lag over a list of time series arrays.
Plot in reference to scintillation timescale and time resolution, up to lag p.
"""
ac_dict = {'lag': [], 'ac': [], 'type': []}
p = min(p+1, len(ts_arr[0])) - 1
... | 7,177 |
def tokenize_with_new_mask(orig_text, max_length, tokenizer, orig_labels, orig_re_labels, label_map, re_label_map):
"""
tokenize a array of raw text and generate corresponding
attention labels array and attention masks array
"""
pad_token_label_id = -100
simple_tokenize_results = [list(tt)... | 7,178 |
def zonal_stats_from_raster(vector, raster, bands=None, all_touched=False, custom=None):
"""
Compute zonal statistics for each input feature across all bands of an input
raster. God help ye who supply large non-block encoded rasters or large
polygons...
By default min, max, mean, standard deviati... | 7,179 |
async def test_flow_all_discovered_bridges_exist(hass, aioclient_mock):
"""Test config flow discovers only already configured bridges."""
aioclient_mock.get(const.API_NUPNP, json=[
{'internalipaddress': '1.2.3.4', 'id': 'bla'}
])
MockConfigEntry(domain='hue', data={
'host': '1.2.3.4'
... | 7,180 |
def plot_rna(data_merged, id_cell, framesize=(7, 7), path_output=None,
ext="png"):
"""Plot cytoplasm border and RNA spots.
Parameters
----------
data_merged : pandas.DataFrame
Dataframe with the coordinate of the cell and those of the RNA.
id_cell : int
ID of the cell t... | 7,181 |
def RMSE(stf_mat, stf_mat_max):
"""error defined as RMSE"""
size = stf_mat.shape
err = np.power(np.sum(np.power(stf_mat - stf_mat_max, 2.0))/(size[0]*size[1]), 0.5)
return err | 7,182 |
def update_efo():
"""Update experimental factor ontology."""
url = 'https://www.ebi.ac.uk/efo/efo.obo'
OboClient.update_resource(path, url, 'efo', remove_prefix=True,
allowed_external_ns={'BFO'}) | 7,183 |
def get_read_only_permission_codename(model: str) -> str:
"""
Create read only permission code name.
:param model: model name
:type model: str
:return: read only permission code name
:rtype: str
"""
return f"{settings.READ_ONLY_ADMIN_PERMISSION_PREFIX}_{model}" | 7,184 |
def hours_to_minutes( hours: str ) -> int:
"""Converts hours to minutes"""
return int(hours)*60 | 7,185 |
def RenameNotes(windowID):
"""
Rename selected notetrack.
"""
slotIndex = cmds.optionMenu(OBJECT_NAMES[windowID][0]+"_SlotDropDown", query=True, select=True)
currentIndex = cmds.textScrollList(OBJECT_NAMES[windowID][0]+"_NoteList", query=True, selectIndexedItem=True)
if currentIndex != None and ... | 7,186 |
def core_profiles_summary(ods, time_index=None, fig=None, combine_dens_temps=True, show_thermal_fast_breakdown=True, show_total_density=True, **kw):
"""
Plot densities and temperature profiles for electrons and all ion species
as per `ods['core_profiles']['profiles_1d'][time_index]`
:param ods: input o... | 7,187 |
def migrate_source_attribute(attr, to_this, target_file, regex):
"""Updates __magic__ attributes in the source file"""
change_this = re.compile(regex, re.S)
new_file = []
found = False
with open(target_file, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswi... | 7,188 |
def sir_model():
"""
this returns a density dependent population process of an SIR model
"""
ddpp = rmf.DDPP()
ddpp.add_transition([-1, 1, 0], lambda x: x[0]+2*x[0]*x[1])
ddpp.add_transition([0, -1, +1], lambda x: x[1])
ddpp.add_transition([1, 0, -1], lambda x: 3*x[2]**3)
return ddpp | 7,189 |
def read_cmupd(strip_stress=False, apostrophe="'"):
"""Read the CMU-Pronunciation Dictionary
Parameters
----------
strip_stress : bool
Remove stress from pronunciations (default ``False``).
apostrophe : str | bool
Character to replace apostrophe with in keys (e.g., "COULDN'T"; defau... | 7,190 |
def validate(loader, model, logger_test, samples_idx_list, evals_dir):
"""
Evaluate the model on dataset of the loader
"""
softmax = torch.nn.Softmax(dim=1)
model.eval() # put model to evaluation mode
confusion_mtrx_df_val_dmg = pd.DataFrame(columns=['img_idx', 'class', 'true_pos', 'true_n... | 7,191 |
def my_eval(inputstring, seq, xvalues=None, yvalues=None):
"""
Evaluate a string as an expression to make a data set.
This routine attempts to evaluate a string as an expression.
It uses the python "eval" function. To guard against bad inputs,
only numpy, math and builtin functions can be used in ... | 7,192 |
def readlines(filepath):
"""
read lines from a textfile
:param filepath:
:return: list[line]
"""
with open(filepath, 'rt') as f:
lines = f.readlines()
lines = map(str.strip, lines)
lines = [l for l in lines if l]
return lines | 7,193 |
def process_images():
""" TODO """
return downloader(request.args.get('img_url')) | 7,194 |
def batch_lambda_handler(event, lambda_context):
"""Entry point for the batch Lambda function.
Args:
event: [dict] Invocation event. If 'S3ContinuationToken' is one of the keys, the S3 bucket
will be enumerated beginning with that continuation token.
lambda_context: [LambdaContext] ... | 7,195 |
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
... | 7,196 |
def n_tuple(n):
"""Factory for n-tuples."""
def custom_tuple(data):
if len(data) != n:
raise TypeError(
f'{n}-tuple requires exactly {n} items '
f'({len(data)} received).'
)
return tuple(data)
return custom_tuple | 7,197 |
def test_silhouette():
"""
Testing silhouette score, using make_clusters
and corresponding labels to test error
bounds.
"""
clusters, labels = make_clusters(scale=0.2, n=500)
# start
ss = Silhouette()
# get scores for each clabel in clusters
get_scores =ss.score(clusters, labels)
# check bounds
assert g... | 7,198 |
def novel_normalization(data, base):
"""Initial data preparation of CLASSIX."""
if base == "norm-mean":
# self._mu, self._std = data.mean(axis=0), data.std()
_mu = data.mean(axis=0)
ndata = data - _mu
_scl = ndata.std()
ndata = ndata / _scl
elif base == "pca":
... | 7,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.