content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def extras_features(*features):
"""
Decorator used to register extras provided features to a model
"""
def wrapper(model_class):
# Initialize the model_features store if not already defined
if "model_features" not in registry:
registry["model_features"] = {f: collections.def... | 10,600 |
def setFeedMoleFraction(H2COxRatio, CO2COxRatio):
"""
set inlet feed mole fraction
"""
# feed properties
# H2/COx ratio
# H2COxRatio = 2.0
# CO2/CO ratio
# CO2COxRatio = 0.8
# mole fraction
y0_H2O = 0.00001
y0_CH3OH = 0.00001
y0_DME = 0.00001
# total molar fractio... | 10,601 |
def ship_hit(ai_settings, stat, screen, sb, ship, aliens, bullets):
"""Respond to ship being hit by aliens
"""
if stat.ship_left > 0:
# Decrement ship_left
stat.ship_left -= 1
# Update scoreboard
sb.prep_ship()
# Delete bullets and aliens
bullets.empty()
... | 10,602 |
def format(message, *args, **kwargs):
"""Shortcut for :class:`tossi.Formatter.format` of the default registry.
"""
return formatter.vformat(message, args, kwargs) | 10,603 |
def reverse_url(url_name,id,request):
"""
编辑标签返回当前页
:param url_name:
:param id:
:param request:
:return:
"""
from django.http.request import QueryDict
path = request.get_full_path()
query_dict_obj = QueryDict(mutable=True)
query_dict_obj['next'] = path
encode_url = query_dict_obj.urlencode()
prefix_path ... | 10,604 |
def add_default_to_data(data: Dict[str, object], schema: SchemaDictType) -> Dict[str, object]:
"""Adds the default values present in the schema to the required fields
if the values are not provided in the data
"""
# add non as defaults to the field that is not required and does not have
# a def... | 10,605 |
def repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str:
"""Normalize repeating characters in `text`.
Truncating their number of consecutive repetitions to `maxn`.
Duplicates Textacy's `utils.normalize_repeating_chars`.
Args:
text (str): The text to normalize.
chars: One or... | 10,606 |
def export_data_csv():
""" Build a CSV file with the Order data from the database
:return: The CSV file in StringIO
"""
result = query_order.get_all_orders()
output = io.StringIO()
writer = csv.writer(output)
line = ['Numéro de commande', 'Date', 'Montant total', 'Numéro client', 'Référe... | 10,607 |
def findurls(s):
"""Use a regex to pull URLs from a message"""
regex = r"(?i)\b(((https?|ftp|smtp):\/\/)?(www.)?[a-zA-Z0-9_.-]+\.[a-zA-Z0-9_.-]+(\/[a-zA-Z0-9#]+\/?)*\/*)"
url = re.findall(regex,s)
return [x[0] for x in url] | 10,608 |
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided call... | 10,609 |
def test_maddrs(host_maddrs, expected_maddrs):
"""Test that the multiple addresses are correctly assigned."""
strategy = CollaborativeStrategy(target_batch_size=1, host_maddrs=host_maddrs)
assert strategy.dht.kwargs["host_maddrs"] == expected_maddrs | 10,610 |
def expand_envvars(d):
""" Recursively convert lookup that look like environment vars in a dict
This function things that environmental variables are values that begin
with `$` and are evaluated with :func:`os.path.expandvars`. No exception
will be raised if an environment variable is not set.
Arg... | 10,611 |
def get_minhash(
doc: str,
normalization_func: Callable,
split_method: str,
ngram_size: int,
ngram_stride: int,
num_minhashes: int,
random_seed: int,
) -> LeanMinHash:
"""Returns a minhash fingerprint for the given document.
Args:
doc (str):
The document to creat... | 10,612 |
def test_emphasis_484():
"""
Test case 484: (part 3) Rule 17
"""
# Arrange
source_markdown = """*<img src="foo" title="*"/>"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):*:]",
'[raw-html(1,2):img src="foo" title="*"/]',
"[end-para:::True]",
]
expect... | 10,613 |
def create_profile(sender, instance, created, **kwargs):
"""
una señal que crea un profile automaticamente al momento de registrar un nuevo usuario
esto permitiria la edicion de un modulo de usuario para la compra de codigos
como en las redes sociales, falto la señal para eliminar un usuario cuando se elimina su pr... | 10,614 |
def silu(x):
"""Sigmoid Linear Unit (SiLU) function, also known as the swish function.
silu(x) = x * sigmoid(x).
""" | 10,615 |
def tokenize(text):
"""
Function:
tokenize: This function splits text into words and return the root form of the words
Args:
text(str): the message
Return:
lemm(list of str): a list of the root form of the message words
"""
# Normalizing text (a-zA-Z0-9 matches all al... | 10,616 |
def displayRandomForest():
"""Run displayRandomForest"""
executionStartTime = int(time.time())
# status and message
success = True
message = "ok"
plotUrl = ''
dataUrl = ''
# get model1, var1, pres1, model2, var2, pres2, start time, end time, lon1, lon2, lat1, lat2, nSample
center =... | 10,617 |
def get_reports(request):
"""
Get a list of all :model:`reporting.Report` entries associated with
an individual :model:`users.User` via :model:`rolodex.Project` and
:model:`rolodex.ProjectAssignment`.
"""
active_reports = []
active_projects = (
ProjectAssignment.objects.select_relate... | 10,618 |
def get_db():
"""Database dependency
This dependency creates a new SessionLocal used for a single
request and closes when request is completed.
"""
try:
db = SessionLocal()
yield db
finally:
db.close() | 10,619 |
def test_get_well_position_with_top_offset(
decoy: Decoy,
well_plate_def: LabwareDefinition,
standard_deck_def: DeckDefinitionV3,
labware_view: LabwareView,
subject: GeometryView,
) -> None:
"""It should be able to get the position of a well top in a labware."""
labware_data = LoadedLabware(... | 10,620 |
def create_response(key, value):
"""Return generic AWS Lamba proxy response object format."""
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({key: value})
} | 10,621 |
def load_image_buffer_to_tensor(image_buf, device):
"""Maps image bytes buffer to tensor
Args:
image_buf (bytes buffer): The image bytes buffer
device (object): The pytorch device object
Returns:
py_tensor tensor: Pytorch tensor
"""
image = Image.open(io.BytesIO(image_buf))... | 10,622 |
def alpha2tand(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) loss tangent.
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangen... | 10,623 |
def convert_path_to_pixels(path):
"""
Purpose:
---
This function should convert the obtained path (list of tuples) to pixels.
Teams are free to choose the number of points and logic for this conversion.
Input Arguments:
---
`path` : [ list ]
Path returned from task_4a.find_path() function.
Returns:
... | 10,624 |
def uniform_prob(*args, prob=None, inside=None, pscale=1.):
""" Uniform probability function for discrete and continuous vtypes. """
# Detect ptype, default to prob if no values, otherwise detect vtype
assert len(args) >= 1, "Minimum of a single positional argument"
pscale = eval_pscale(pscale)
use_logs = ... | 10,625 |
def update_bullets(game_settings, screen, stats, sb, ship, aliens, bullets):
"""Update position of bullets and get rid of old bullets"""
# Update live bullets position on screen (group autocalls for all bullets in sprite)
bullets.update()
# Remove old bullets out of the screen space
for bullet in ... | 10,626 |
def obtenerListaArchivos(path: str):
""" genera una lista de los archivos alojados en str """
lista = glob.glob(path, recursive=True)
return lista | 10,627 |
def is_equal_to(amount: float) -> Predicate:
"""Says that a field is exactly equal to some constant amount."""
return is_nearly_equal_to(amount, tolerance=0, taper=0) | 10,628 |
def get_version():
"""Return the current version info.
The first call to this function will call version_info.load() and cache the
result for later calls.
"""
global _version
if _version is None:
_version = version_info.load()
return _version | 10,629 |
def instance_power_specs_delete(context, instance_uuid, session=None):
""" Removes an existing Server PowerSpecs from the Database """
# If we weren't given a session, then we need to create a new one
if not session:
session = nova_db_sa_api.get_session()
# Create a Transaction around the delete... | 10,630 |
def copy_file_or_flo(input_, output, buffer_size=64 * 1024, cb=None):
""" Copy a file name or file-like-object to another file name or file-like object"""
from os import makedirs
from os.path import isdir, dirname
assert bool(input_)
assert bool(output)
input_opened = False
output_opened ... | 10,631 |
def load_yaml(fname):
"""Load a YAML file."""
yaml = YAML(typ="safe")
# Compat with HASS
yaml.allow_duplicate_keys = True
# Stub HASS constructors
HassSafeConstructor.name = fname
yaml.Constructor = HassSafeConstructor
with open(fname, encoding="utf-8") as conf_file:
# If config... | 10,632 |
def unmix_cvxopt(data, endmembers, gammaConst=0, P=None):
"""
******************************************************************
unmix finds an accurate estimation of the proportions of each endmember
Syntax: P2 = unmix(data, endmembers, gammaConst, P)
This product is Copyright (c) 2013 University o... | 10,633 |
def _accumulate_reward(
timestep: dm_env.TimeStep,
episode_return: float) -> float:
"""Accumulates rewards collected over the course of an episode."""
if timestep.reward and timestep.reward != 0:
logging.info('Reward: %s', timestep.reward)
episode_return += timestep.reward
if timestep.first():
... | 10,634 |
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader) | 10,635 |
def load_tl_gan_model():
"""
Load the linear model (matrix) which maps the feature space
to the GAN's latent space.
"""
with open(FEATURE_DIRECTION_FILE, 'rb') as f:
feature_direction_name = pickle.load(f)
# Pick apart the feature_direction_name data structure.
feature_direction = f... | 10,636 |
def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Path]:
"""
Find test file from cucumber report file path format
e.g) Test-features-foo-hoge.xml -> features/foo/hoge.feature or features/foo-hoge.feature
"""
report_file = os.path.basename(report)
report_file = report... | 10,637 |
def luminance(qcolor):
""" Gives the pseudo-equivalent greyscale value of this color """
r,g,b = qcolor.red(), qcolor.green(), qcolor.blue()
return int(0.2*r + 0.6*g + 0.2*b) | 10,638 |
def read_info(path, layer=None, encoding=None):
"""Read information about an OGR data source.
`crs` and `geometry` will be `None` and `features` will be 0 for a
nonspatial layer.
Parameters
----------
path : str or pathlib.Path
layer : [type], optional
Name or index of layer in dat... | 10,639 |
def save_hdf5(path, freq, traces, **kwargs):
"""Save GWINC budget data to an HDF5 file.
The `freq` argument should be the frequency array, and `traces`
should be the traces (recursive) dictionary. Keyword arguments
are stored in the HDF5 top level 'attrs' key-value store. If an
'ifo' keyword arg ... | 10,640 |
def _macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从 20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失,
只选择有数据的国家返回
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-0... | 10,641 |
def p_object_path_expr_1(p):
"""
object_path_expr : empty
"""
p[0] = [] | 10,642 |
def createparser():
"""Create an :class:`argparse.ArgumentParser` instance
:return: parser instance
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(prog=__package__,
description=__doc__)
s = parser.add_subparsers()
# create... | 10,643 |
def get_activation(preact_dict, param_name, hook_type):
"""
Hooks used for in sensitivity schedulers (LOBSTE, Neuron-LOBSTER, SERENE).
:param preact_dict: Dictionary in which save the parameters information.
:param param_name: Name of the layer, used a dictionary key.
:param hook_type: Hook type.
... | 10,644 |
def createAbsorption(cfgstr):
"""Construct Absorption object based on provided configuration (using available factories)"""
return Absorption(cfgstr) | 10,645 |
def add_arrow(
ax: plt.Axes,
from_square: str,
to_square: str,
alpha=1.0,
color="black",
):
"""
Adds an arrow from one square to the next
Draws an arrow connecting two squares together. Can be used to represent
moves.
Parameters
----------
ax: plt.Axes
Axes cont... | 10,646 |
def move(*args, **kwargs):
"""
The move command is used to change the positions of geometric objects.
Returns: None
"""
pass | 10,647 |
def data_route(session, df, site_info, file_id, fname, initial_upload=True):
"""
:param session: SQLAlchemy database session instance
:param df: Pandas dataframe containing CODAR data
:param site_id: ID of CODAR site in hfrSites table
:param file_id: ID for the file metadata that was updated to hfr... | 10,648 |
def get_optimizer_config():
"""Gets configuration for optimizer."""
optimizer_config = configdict.ConfigDict()
# Learning rate scheduling. One of: ["fixed", "exponential_decay"]
optimizer_config.learning_rate_scheduling = "exponential_decay"
# Optimization algorithm. One of: ["SGD", "Adam", "RMSprop"].
opt... | 10,649 |
def flat2seq(x: Tensor, num_features: int) -> Tensor:
"""Reshapes tensor from flat format to sequence format.
Flat format: (batch, sequence x features)
Sequence format: (batch, sequence, features)
Args:
x (Tensor): a tensor in the flat format (batch, sequence x features).
num_features ... | 10,650 |
def run_results(results_data, time_column, pathway_column, table_letters, letters,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions, simulation_transitions,
intervention_codes, target, individuals,
save_location, simulation_... | 10,651 |
def get_spatial_anchors_account(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpatialAnchorsAccountResult:
"""
Get information about an Azure Spatial Anchors Accou... | 10,652 |
def test_agilent_3d():
""" 3D time agilent, pipe <-> agilent, pipe """
# prepare Agilent converter
vdic, vdata = ng.varian.read(os.path.join(DATA_DIR, "agilent_3d"))
uvdic = ng.varian.guess_udic(vdic, vdata)
vC = ng.convert.converter()
vC.from_varian(vdic, vdata, uvdic)
# prepare NMRPipe co... | 10,653 |
def format_query(str_sql):
"""Strips all newlines, excess whitespace, and spaces around commas"""
stage1 = str_sql.replace("\n", " ")
stage2 = re.sub(r"\s+", " ", stage1).strip()
stage3 = re.sub(r"(\s*,\s*)", ",", stage2)
return stage3 | 10,654 |
def make_ytick_labels(current_ticks, n, numstring = ""):
"""
"""
new_ticks = []
for item in current_ticks:
if int(item) == item:
new_ticks.append(f"{int(item)}{numstring}")
else:
new_ticks.append(f"{item:.1f}{numstring}")
return new_ticks | 10,655 |
def check_token(token):
"""
Returns `True` if *token* is a valid XML token, as defined by XML
Schema Part 2.
"""
return (token == '' or
re.match(
"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token)
is not None) | 10,656 |
def save_experiment_results(data_dir, prefix, run_time, travel_cost):
"""
An utility function to save experiment results with time info.
Parameters
----------
data_dir: path-like object
Path of data folder.
prefix: str
Name of the experiment.
run_time: array
List of ... | 10,657 |
def download_audio(url, dir_path):
"""
Extract audio track from YouTube video and save to given path.
:param url: YouTube video URL
:param dir_path: Path to save audio file
"""
opts = {
'format': 'bestaudio/best',
'forcefilename': True,
'outtmpl': str(dir_path),
... | 10,658 |
def generate_random_data(n=10):
"""Generate random data."""
return rand(10) | 10,659 |
def get_basenames(root, path, remove='.py'):
"""Get file basenames of a folder.
Args:
root (str): Root path
path (str): Path to folder
remove (str, optional): Defaults to '.py'. Part to remove from filename.
Returns:
list: list of names
"""
regex = re.compile(remove... | 10,660 |
def test_json_dumps():
""" Implicitly tests DynamoEncoder """
assert True | 10,661 |
def get_phoible_feature_list(var_to_index):
"""
Function that takes a var_to_index object and return a list of Phoible segment features
:param var_to_index: a dictionary mapping variable name to index(column) number in Phoible data
:return :
"""
return list(var_to_index.keys())[11:] | 10,662 |
def split_data(dataset):
"""Split pandas dataframe to data and labels."""
data_predictors = [
"Steps_taken",
"Minutes_sitting",
"Minutes_physical_activity",
"HR",
"BP",
]
X = dataset[data_predictors]
y = dataset.Health
x_train, x_test, y_train, y_test = ... | 10,663 |
def validate_max_incidents(max_incidents: str) -> None:
"""
Validates the value of max_incident parameter.
:params max_incidents: In fetch-incident maximum number of incidents to return.
:raises ValueError: if max incidents parameter is not a positive integer.
:return: None
"""
try:
... | 10,664 |
def test_reservoir_param_type(simulation):
""" Verify reservoir param."""
assert isinstance(simulation.res_param, dict) | 10,665 |
def multiple_ticker_tca_aggregated_with_results_example():
"""Example of how to do TCa analysis on multiple tickers with TCAResults
"""
tca_engine = TCAEngineImpl(version=tca_version)
# Run a TCA computation for multiple tickers, calculating slippage
tca_request = TCARequest(start_date=start_date,... | 10,666 |
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
return response | 10,667 |
def recursive_isomorphism_counter(smp, matching, *,
unspec_cover, verbose, init_changed_cands, tmplt_equivalence=False,
world_equivalence=False):
"""
Recursive routine for solving subgraph isomorphism.
Parameters
----------
smp : MatchingProblem
A subgraph matching problem
... | 10,668 |
def test_zarr_to_anndata(benchmark, tmp_path):
"""Test loading anndata from zarr"""
_, setup_output = setup_anndata(fpath='spots_reduced.csv', out_dir=tmp_path)
ann_obj = setup_output['ann_obj']
tmp_out_path = setup_output['out_path']
ann_obj.write_zarr(tmp_out_path)
read_func = partial(anndata... | 10,669 |
def draw_lane_on_unwarped_frame(frame, left_line, right_line, trsf_mtx_inv):
""" Drawing of the unwarped lane lines and lane area to the current frame.
Args:
left_line: left Line instance
right_line: right Line instance
trsf_mtx_inv: inverse of the perspective transformation matri... | 10,670 |
def _init_buffer_file() -> str:
"""Returns file path to the temporary buffer file. Creates the
temp directory and temp buffer file.
"""
if not os.path.exists(".git"):
raise NotAGitRepoException(f"No .git folder found. {os.getcwd()} is not a git repo!")
file_path = os.path.join(".git", "MKCOM... | 10,671 |
def n_states_of_vec(l, nval):
""" Returns the amount of different states a vector of length 'l' can be
in, given that each index can be in 'nval' different configurations.
"""
if type(l) != int or type(nval) != int or l < 1 or nval < 1:
raise ValueError("Both arguments must be positive integ... | 10,672 |
def remove_items_from_dict(a_dict, bad_keys):
"""
Remove every item from a_dict whose key is in bad_keys.
:param a_dict: The dict to have keys removed from.
:param bad_keys: The keys to remove from a_dict.
:return: A copy of a_dict with the bad_keys items removed.
"""
new_dict = {}
for ... | 10,673 |
def writeObject(img_array, obj_array, bbox):
"""Writes depression objects to the original image.
Args:
img_array (np.array): The output image array.
obj_array (np.array): The numpy array containing depression objects.
bbox (list): The bounding box of the depression object.
Returns:... | 10,674 |
def create_info_message_files(msg=None, msg_details=None):
"""
Creates the _alt_msg.txt and _alt_msg_details.txt
files for population into the job status json.
:param msg: The short info message. Can be a list or a string.
Should be shorter than 35 characters.
:param msg_details: The message d... | 10,675 |
def process_model(current_val):
"""
:param current_val: model generated by sat solver, atom is satisfied if in modal.
:return tuple of sets comprising true and false atoms.
"""
true_atoms, false_atoms = set(), set()
for atom in current_val:
if current_val[atom]:
true_atoms.... | 10,676 |
def filter_strace_output(lines):
"""
a function to filter QEMU logs returning only the strace entries
Parameters
----------
lines : list
a list of strings representing the lines from a QEMU log/trace.
Returns
-------
list
a list of strings representing only the strace l... | 10,677 |
def export_gmf_xml(key, dest, sitecol, imts, ruptures, rlz,
investigation_time):
"""
:param key: output_type and export_type
:param dest: name of the exported file
:param sitecol: the full site collection
:param imts: the list of intensity measure types
:param ruptures: an ord... | 10,678 |
def track_state_change(entity_ids, from_state=None, to_state=None):
"""Decorator factory to track state changes for entity id."""
def track_state_change_decorator(action):
"""Decorator to track state changes."""
event.track_state_change(HASS, entity_ids,
functool... | 10,679 |
def phrase():
"""Generate and return random phrase."""
return models.PhraseDescription(text=random_phrase.make_random_text()) | 10,680 |
def classify_tweets(text):
"""
classify tweets for tweets about car accidents and others
:param text: tweet text
:return: boolean, true if tweet is about car accident, false for others
"""
return text.startswith(u'בשעה') and (
(u'הולך רגל' in text or
u'הולכת רגל' in text... | 10,681 |
def _build_results(drift_type, raw_metrics):
"""Generate all results for queried time window or run id of some a datadriftdetector.
:param raw_metrics: origin data diff calculation results.
:return: a list of result dict.
"""
results = []
for metric in raw_metrics:
ep = _properties(met... | 10,682 |
def minima():
"""
This is a minima value
"""
v = pd.read_csv(
"src\kjpcjs.csv")
lang = ['Kotlin', 'Java', 'Python', 'C++', 'JavaScript']
mx = list()
for i in lang:
x = min(v[i])
mx.append(x)
for i in range(0, 5):
print(lang[i], "minimum gross is", mx[i])... | 10,683 |
def no_test_server_credentials():
"""
Helper function that returns true when TEST_INTEGRATION_*
credentials are undefined or empty.
"""
client_id = getattr(settings, 'TEST_INTEGRATION_CLIENT_ID', None)
username = getattr(settings, 'TEST_INTEGRATION_USERNAME', None)
password = getattr(setting... | 10,684 |
async def test_veltpvp_status(mcsrvstats_client: Client) -> None:
"""Checks veltpvp returns correct data if status and last played is set."""
f = open("tests/html/test_veltpvp_status.html")
html = f.read()
with aioresponses() as m:
m.get("https://www.veltpvp.com/u/xtreepvps", status=200, body=ht... | 10,685 |
def skip_spaces(st: ST) -> Tuple[ST, Any]:
"""
Pula espaços.
"""
pos, src = st
while pos < len(src) and src[pos].isspace():
pos += 1
return (pos, src), None | 10,686 |
def mixed_phone_list():
"""Return mixed phone number list."""
return _MIXED_PHONE_LIST_ | 10,687 |
def bootstrap(config):
"""
Configure the existing account for subsequent deployer runs.
Create S3 buckets & folders, upload artifacts required by
infrastructure to them.
Args:
config: dictionary containing all variable settings required
to run terraform with
Returns:
... | 10,688 |
def get_report_summary(report):
"""
Retrieve the docstring summary content for the given report module.
:param report: The report module object
:returns: the first line of the docstring for the given report module
"""
summary = None
details = get_report_details(report)
if not details:
... | 10,689 |
def transform_toctree(doc: AstDoc, code: Code):
"""convert toctree to nested <ul> tag"""
headers = doc.headers()
def get_children(index, node):
for sub_index in range(index + 1, len(headers)):
child = headers[sub_index]
if child.header_level() == node.header_level() + 1:
... | 10,690 |
def load_dataset(dataset_identifier, train_portion='75%', test_portion='25%', partial=None):
"""
:param dataset_identifier:
:param train_portion:
:return: dataset with (image, label)
"""
# splits are not always supported
# split = ['train[:{0}]'.format(train_portion), 'test[{0}:]'.format(tes... | 10,691 |
def stop(ids: List[str]):
"""Stop one or more instances"""
return functools.partial(ec2.stop_instances, InstanceIds=ids) | 10,692 |
def add_page_to_index(index, url, content):
"""
Takes three inputs:
- index (dictionary)
- url (string)
- content (string)
Updates the index to include all of the word occurrences found in
the page content by adding the url to the word's associated url list.
"""
words = content.split... | 10,693 |
def search_names(word, archive=TAXDB_NAME, name="names.dmp", limit=None):
"""
Processes the names.dmp component of the taxdump.
"""
# Needs a taxdump to work.
if not os.path.isfile(archive):
utils.error("taxdump file not found (download and build it first)")
# Open stream into the tarf... | 10,694 |
def simple_parse(config_file):
"""
Do simple parsing and home-brewed type interference.
"""
config = ConfigObj(config_file, raise_errors=True)
config.walk(string_to_python_type)
# Now, parse input and output in the Step definition by hand.
_step_io_fix(config)
return(config) | 10,695 |
def deserialize_columns(headers, frames):
"""
Construct a list of Columns from a list of headers
and frames.
"""
columns = []
for meta in headers:
col_frame_count = meta["frame_count"]
col_typ = pickle.loads(meta["type-serialized"])
colobj = col_typ.deserialize(meta, fra... | 10,696 |
def server_siteone_socket_udp(parms):
""" A simple echo server """
server_host = parms['host']
server_port = parms['port']
server_protocol = parms['protocol']
data_payload = 2048
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the p... | 10,697 |
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_pat... | 10,698 |
def words_with_joiner(joiner):
"""Pass through words unchanged, but add a separator between them."""
def formatter_function(i, word, _):
return word if i == 0 else joiner + word
return (NOSEP, formatter_function) | 10,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.