content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def needed_to_build_multi(deriv_outputs, existing=None, on_server=None):
"""
:param deriv_outputs: A mapping from derivations to sets of outputs.
:type deriv_outputs: ``dict`` of ``Derivation`` to ``set`` of ``str``
"""
if existing is None:
existing = {}
if on_server is None:
on... | 9,600 |
def iscode(c):
"""
Tests if argument type could be lines of code,
i.e. list of strings
"""
if type(c) == type([]):
if c:
return type(c[0]) == type('')
else:
return True
else: return False | 9,601 |
def get_comment_list(request, thread_id, endorsed, page, page_size, requested_fields=None):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of th... | 9,602 |
def files(name: str, dependencies=False, excludes=None) -> List[PackagePath]:
"""
List all files belonging to a distribution.
Arguments:
name:
The name of the distribution.
dependencies:
Recursively collect files of dependencies too.
excludes:
Dis... | 9,603 |
def ready_to_delete_data_node(name, has_executed, graph):
"""
Determines if a DataPlaceholderNode is ready to be deleted from the
cache.
Args:
name:
The name of the data node to check
has_executed: set
A set containing all operations that have been executed so fa... | 9,604 |
def search():
"""Use open ewather api to look up current weather conditions given a city/ city, country"""
global response
#Get API response
#URL and my api key....USE YOUR OWN API KEY!
url = 'https://api.openweathermap.org/data/2.5/weather'
api_key = '6da92ea5e09090fa9c8a08e08eb30284' #... | 9,605 |
def extract_hash_parts(repo):
"""Extract hash parts from repo"""
full_hash = hashlib.sha1(repo.encode("utf-8")).hexdigest()
return full_hash[:2], full_hash[2:] | 9,606 |
def _testComponentReferences():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> font = Font(getTestFontPath())
>>> font.componentReferences
{'A': set(['C']), 'B': set(['C'])}
>>> glyph = font["C"]
>>> font.componentReferences
{'A': set(['C']), 'B': set(['C'])}
""" | 9,607 |
def create_cloud_mask(im_QA, satname, cloud_mask_issue):
"""
Creates a cloud mask using the information contained in the QA band.
KV WRL 2018
Arguments:
-----------
im_QA: np.array
Image containing the QA band
satname: string
short name for the satellite: ```'L5', 'L7', 'L8... | 9,608 |
def split_bucket(s3_key):
"""
Returns the bucket name and the key from an s3 location string.
"""
match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE)
if not match:
return None, s3_key
return match.group(1), match.group(2) | 9,609 |
def contains_chinese(ustr):
"""
将字符串中的中文去除
Args:
ustr: 字符串
Returns: 去除中文的字符串
"""
return any('\u4e00' <= char <= '\u9fff' for char in ustr) | 9,610 |
def create_publish_recipe_file(component_name, component_version, parsed_component_recipe):
"""
Creates a new recipe file(json or yaml) with anme `<component_name>-<component_version>.extension` in the component
recipes build directory.
This recipe is updated with the component version calculated and a... | 9,611 |
def MakeCdf(live):
"""Plot the CDF of pregnancy lengths for live births.
live: DataFrame for live births
"""
cdf = thinkstats2.Cdf(live.prglngth, label='prglngth')
thinkplot.Cdf(cdf)
thinkplot.Save('cumulative_prglngth_cdf',
title='Pregnancy length',
xla... | 9,612 |
def make_non_writable(location):
"""
Make location non writable for tests purpose.
"""
if on_posix:
current_stat = stat.S_IMODE(os.lstat(location).st_mode)
os.chmod(location, current_stat & ~stat.S_IWRITE)
else:
make_non_readable(location) | 9,613 |
def payment_activity():
"""Request for extra-curricular activity"""
try:
req_json = request.get_json(force=True)
except TypeError:
return jsonify(message='Invalid json input'), 400
activity_info = req_json['activity']
student = req_json['student']
envelope_args = {
'sign... | 9,614 |
def gdc_to_dos_list_response(gdcr):
"""
Takes a GDC list response and converts it to GA4GH.
:param gdc:
:return:
"""
mres = {}
mres['data_objects'] = []
for id_ in gdcr.get('ids', []):
mres['data_objects'].append({'id': id_})
if len(gdcr.get('ids', [])) > 0:
mres['nex... | 9,615 |
def get_labels_by_db_and_omic_from_graph(graph):
"""Return labels by db and omic given a graph."""
db_subsets = defaultdict(set)
db_entites = defaultdict(dict)
entites_db = defaultdict(dict)
# entity_type_map = {'Gene':'genes', 'mirna_nodes':'mirna', 'Abundance':'metabolites', 'BiologicalProcess':'... | 9,616 |
def python_modules():
"""Determine if there are python modules in the cwd.
Returns:
list of python modules as strings
"""
ignored = ["setup.py", "conftest.py"]
py_modules = []
for file_ in os.listdir(os.path.abspath(os.curdir)):
if file_ in ignored or not os.path.isfile(file_)... | 9,617 |
def covid_API_england ():
"""Function retrieves date, hospital admissions, total deaths
and daily cases using government API"""
england_only = [
'areaType=nation',
'areaName=England'
]
dates_and_cases = {
"date": "date",
"newCasesByPublishDate": "newCasesByPublis... | 9,618 |
def test_should_bail_out(check, instance):
"""
backoff should give up after 3 attempts
"""
with mock.patch('datadog_checks.php_fpm.php_fpm.requests') as r:
attrs = {'raise_for_status.side_effect': FooException()}
r.get.side_effect = [
mock.MagicMock(status_code=503, **attrs),... | 9,619 |
def _complete_uninstall(
action : Optional[List[str]] = None,
**kw : Any
) -> List[str]:
"""
Override the default Meerschaum `complete_` function.
"""
if action is None:
action = []
options = {
'plugin': _complete_uninstall_plugins,
'plugins': _complete_un... | 9,620 |
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket' | 9,621 |
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser) : Args parsed by the argument parser.
Returns:
args (CoreclrArguments) : Args parsed
Notes:
If the arguments are valid then return them all in a tuple. If not,
... | 9,622 |
def test_edit_product(login, navigator, service, threescale):
"""
Test:
- Create service via API
- Edit service via UI
- Assert that name is correct
- Assert that description is correct
"""
edit = navigator.navigate(ProductEditView, product=service)
edit.update("updat... | 9,623 |
def _path_restrict(path, repo):
"""Generate custom package restriction from a given path.
This drops the repo restriction (initial entry in path restrictions)
since runs can only be made against single repo targets so the extra
restriction is redundant and breaks several custom sources involving
ra... | 9,624 |
def us_send_code():
"""
Send code view.
This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES)
and a method request to send a code.
"""
form_class = _security.us_signin_form
if request.is_json:
if request.content_length:
form = form_class(MultiDict(request.ge... | 9,625 |
def login_user(email):
"""Adds a user's email to their session to track that
they have logged into the application."""
session['user'] = email | 9,626 |
def merge_texts(files, file_index, data_type):
""" merge the dataframes in your list """
dfs, filenames = get_dataframe_list(files, file_index, data_type)
# enumerate over the list, merge, and rename columns
try:
df = dfs[0]
# print(*[df_.columns for df_ in dfs],sep='\n')
for i, ... | 9,627 |
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths):
"""Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value."""
hurricanes = dict()
num_hurricanes = len(names)
for i in range(num_hurricanes):
hur... | 9,628 |
def poison_weights_by_pretraining(
poison_train: str,
clean_train: str,
tgt_dir: str,
poison_eval: str = None,
epochs: int = 3,
L: float = 10.0,
ref_batches: int = 1,
label: int = 1,
seed: int = 0,
model_type: str = "bert",
model_name_or_path: str = "bert-base-uncased",
o... | 9,629 |
def cov(c):
""" Checks code coverage """
c.run(f"coverage run -m py.test {test_file}")
c.run(f"coverage report -m {files}")
c.run(f"coverage html {files}") | 9,630 |
def test_create_pod():
"""Launch simple pod in DC/OS root marathon.
"""
_clear_pods()
client = marathon.create_client()
pod_id = "/pod-create"
pod_json = _pods_json()
pod_json["id"] = pod_id
client.add_pod(pod_json)
deployment_wait()
pod = client.show_pod(pod_id)
assert pod ... | 9,631 |
def create_variable_type(parent, nodeid, bname, datatype):
"""
Create a new variable type
args are nodeid, browsename and datatype
or idx, name and data type
"""
nodeid, qname = _parse_nodeid_qname(nodeid, bname)
if datatype and isinstance(datatype, int):
datatype = ua.NodeId(datatyp... | 9,632 |
def test_two_proteins_same_gene_in_gff3_2(): # ***Incomplete test
"""Test the two_proteins_same_gene_in_gff3_2 function in the paralogue_counter.py file.
"""
##########################
# Arrange.
sql_database = "sql_database"
prot_id_1 = "prot_id_1"
prot_id_2 = "prot_id_2"
############... | 9,633 |
def load_NWP(input_nc_path_decomp, input_path_velocities, start_time, n_timesteps):
"""Loads the decomposed NWP and velocity data from the netCDF files
Parameters
----------
input_nc_path_decomp: str
Path to the saved netCDF file containing the decomposed NWP data.
input_path_velocities: str
... | 9,634 |
def test_stable_config(tmp_path, config, defaultenv):
"""
A dumped, re-read and re-dumped config should match the dumped config.
Note: only dump vs. re-dump must be equal, as the original config file might
be different because of default values, whitespace, and quoting.
"""
# Set environment ... | 9,635 |
def test_state_distinguishability_yyd_density_matrices():
"""Global distinguishability of the YYD states should yield 1."""
psi0 = bell(0) * bell(0).conj().T
psi1 = bell(1) * bell(1).conj().T
psi2 = bell(2) * bell(2).conj().T
psi3 = bell(3) * bell(3).conj().T
states = [
np.kron(psi0, ps... | 9,636 |
def get_ts(fn, tc, scale=0):
"""Returns timestamps from a frame number and timecodes file or cfr fps
fn = frame number
tc = (timecodes list or Fraction(fps),tc_type)
scale default: 0 (ns)
examples: 3 (µs); 6 (ms); 9 (s)
"""
scale = 9 - scale
tc, tc_type = tc
if tc_type... | 9,637 |
def apply(script: str,
cube: str,
output: str,
params: str,
variables: str,
dask: str,
format: str,
dtype: str):
"""
Apply a function to data cubes.
The function is used to transform N chunks of equal shape to a new chunk of same shape.
... | 9,638 |
def init():
"""Initialize the global QWebSettings."""
cache_path = standarddir.cache()
data_path = standarddir.data()
QWebSettings.setIconDatabasePath(standarddir.cache())
QWebSettings.setOfflineWebApplicationCachePath(
os.path.join(cache_path, 'application-cache'))
QWebSettings.globalS... | 9,639 |
def adapt_array(array):
"""
Using the numpy.save function to save a binary version of the array,
and BytesIO to catch the stream of data and convert it into a BLOB.
:param numpy.array array: NumPy array to turn into a BLOB
:return: NumPy array as BLOB
:rtype: BLOB
"""
out = BytesIO()
... | 9,640 |
def main(input_path, output_path):
"""
The input path is where Python can find the folder.
The output path is where the generated html can find the folder.
"""
current_path = os.path.dirname(__file__)
file_path = os.path.relpath(input_path, current_path)
counter = -1
with open(file_pat... | 9,641 |
def write_json(file_path: str, contents: Dict[str, Any]) -> None:
"""Write contents as JSON to file_path.
Args:
file_path: Path to JSON file.
contents: Contents of json as dict.
Raises:
FileNotFoundError: if parent directory of file_path does not exist
"""
if not utils.is_... | 9,642 |
def make_queue(paths_to_image, labels, num_epochs=None, shuffle=True):
"""returns an Ops Tensor with queued image and label pair"""
images = tf.convert_to_tensor(paths_to_image, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.uint8)
input_queue = tf.train.slice_input_producer(
... | 9,643 |
def display_import(request, import_id):
"""Display the details of an import."""
import_object = get_object_or_404(RegisteredImport, pk=import_id)
context_data = {'import': import_object}
return render(request, 'eats/edit/display_import.html', context_data) | 9,644 |
def test_ffmpeg_calls_check_call(mock_check_call):
"""
Should call check_call with the ffmpeg binary and supplied carguments when
capture_stdout is False.
"""
args = ["a", "b", "c"]
avtoolkit.video.ffmpeg(args, capture_stdout=False)
assert mock_check_call.called
assert mock_check_call.ca... | 9,645 |
def TransformContainerAnalysisData(image_name, occurrence_filter=None,
deployments=False):
"""Transforms the occurrence data from Container Analysis API."""
analysis_obj = container_analysis_data_util.ContainerAndAnalysisData(
image_name)
occs = FetchOccurrencesForResource... | 9,646 |
def generate_frames(
ds: "Dataset", reshape: bool = True
) -> Iterable["np.ndarray"]:
"""Yield a *Pixel Data* frame from `ds` as an :class:`~numpy.ndarray`.
.. versionadded:: 2.1
Parameters
----------
ds : pydicom.dataset.Dataset
The :class:`Dataset` containing an :dcm:`Image Pixel
... | 9,647 |
def setup_milp(model, target, remove_blocked=False, exclude_reaction_ids=set()):
"""
This function constructs the MILP.
exclude_reaction_ids takes a list of reaction ids that shouldn't be considered for heterologous addition
(i.e. spontaneous reactions and exchange reactions). These reactions are thus a... | 9,648 |
def bilinear_initializer(shape, dtype, partition_info):
"""
Bilinear initializer for deconvolution filters
"""
kernel = get_bilinear_kernel(shape[0], shape[1], shape[2])
broadcasted_kernel = np.repeat(kernel.reshape(shape[0], shape[1], shape[2], -1), repeats=shape[3], axis=3)
return broadcaste... | 9,649 |
def draw_stalker_scene_menu_item(self, context):
"""draws one scene menu item
"""
logger.debug('entity_id : %s' % self.stalker_entity_id)
logger.debug('entity_name : %s' % self.stalker_entity_name)
layout = self.layout
scene = Task.query.get(self.stalker_entity_id)
# Add Everything
... | 9,650 |
def pdf():
"""
Демо-версия PDF отчеа, открывается прямо в браузере,
это удобнее, чем каждый раз скачивать
"""
render_pdf(sample_payload_obj, './output.pdf')
upload_file('./output.pdf')
return send_file('./output.pdf', attachment_filename='output.pdf') | 9,651 |
def run_clear_db_es(app, arg_env, arg_skip_es=False):
"""
This function actually clears DB/ES. Takes a Pyramid app as well as two flags. _Use with care!_
For safety, this function will return without side-effect on any production system.
Also does additional checks based on arguments supplied:
If... | 9,652 |
def test_login_user_via_session(app):
"""Test the login-via-view function/hack."""
email = 'test@example.org'
password = '1234'
with app.app_context():
user = testutils.create_test_user(email, password)
with app.test_client() as client:
assert not testutils.client_authentica... | 9,653 |
def interp_logp_pressure(sounding, missing=-9999):
"""Interpolate pressure from heights.
Parameters
----------
sounding : dict
Sounding dictionary structure.
Notes
-----
This function is similar to the MR_INTP subroutine from GEMPAK.
"""
i = 0
ilev = -1
klev = -1
... | 9,654 |
def init_total_population():
"""
Real Name: b'init total population'
Original Eqn: b'init Infected asymptomatic+init Susceptible'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return init_infected_asymptomatic() + init_susceptible() | 9,655 |
def test_dll_append(dll_fixture):
"""Test the append method on doubly linked list."""
dll_fixture.append('one')
dll_fixture.append('two')
dll_fixture.append('three')
dll_fixture.push('zero')
assert dll_fixture._len == 4 | 9,656 |
def one_hot(dim: int, idx: int):
""" Get one-hot vector """
v = np.zeros(dim)
v[idx] = 1
return v | 9,657 |
def process_priors(prior_flat, initial_fit):
"""Process prior input array into fit object."""
if any(
[float(val) <= 0 for key, val in prior_flat.items() if key.endswith("sdev")]
):
raise ValueError("Standard deviations must be larger than zero.")
prior = {}
for key, val in initial_... | 9,658 |
def _sample_data(ice_lines, frac_to_plot):
"""
Get sample ice lines to plot
:param ice_lines: all ice lines
:param frac_to_plot: fraction to plot
:return: the sampled ice lines
"""
if frac_to_plot < 1.:
ice_plot_data = ice_lines.sample(int(ice_lines.shape[0] * frac_to_plot))
e... | 9,659 |
def fast_dot(M1, M2):
"""
Specialized interface to the numpy.dot function
This assumes that A and B are both 2D arrays (in practice)
When A or B are represented by 1D arrays, they are assumed to reprsent
diagonal arrays
This function then exploits that to provide faster multiplication
""... | 9,660 |
async def autoredeem(
bot: commands.Bot,
guild_id: int
) -> bool:
"""Iterates over the list of users who have
enabled autoredeem for this server, and if
one of them does redeem some of their credits
and alert the user."""
await bot.wait_until_ready()
conn = bot.db.conn
guild = bot.g... | 9,661 |
def get_ISO_369_3_from_string(term: str,
default: str = None,
strict: bool = False,
hdp_lkg: dict = None) -> str:
"""Convert an individual item to a ISO 369-3 language code, UPPERCASE
Args:
term (str): The input t... | 9,662 |
def append_local2global_config(name: str) -> None:
"""Appends an included configuration to the system configuration
Args:
name (str): The requirested configuration
"""
__append_to_global_config(get_config(name), name) | 9,663 |
def plot_heatmap(
data: DataFrame,
columns: Optional[Sequence[str]] = None,
droppable: bool = True,
sort: bool = True,
cmap: Optional[Sequence[str]] = None,
names: Optional[Sequence[str]] = None,
yaxis: bool = False,
xaxis: bool = True,
legend_kws:... | 9,664 |
def iadd_tftensor(left, right, scale=1):
"""This function performs an in-place addition. However, TensorFlow returns
a new object after a mathematical operation. This means that in-place here
only serves to avoid the creation of a TfTensor instance. We do not have
any control over the memory where the T... | 9,665 |
def bookmark_desc_cmd(query):
"""describe: desc [num.. OR url/tag substr..]."""
split_query = query[4:].strip().split()
if not split_query:
sys.stderr.write(BOOKMARK_HELP)
return False
bk_indices = find_bookmark_indices(split_query)
if bk_indices:
return describe_bookmark(bk... | 9,666 |
def encode(string_):
"""Change String to Integers"""
return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \
s))(lambda f, s: sum(f[i] * 256 ** i for i in \
range(len(f))), str(string_)) | 9,667 |
def generate_file_prefix(bin_params):
""" Use the bin params to generate a file prefix."""
prefix = "bin_"
for j in range(0, len(bin_params)):
if (j + 1) % 2 != 0:
prefix += str(bin_params[j]) + "-"
else:
prefix += str(bin_params[j]) + "_"
return prefix | 9,668 |
def test_assert_constraint_contact_info_not_null():
"""
Check constraint that assures that at least mail phone or url contact info is present.
"""
with pytest.raises(IntegrityError):
JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
location=None,
contact_... | 9,669 |
def test_Collection_build_landsat_c1_toa():
"""Test if the Landsat TOA (non RT) collections can be built"""
coll_obj = default_coll_obj(
collections=['LANDSAT/LC08/C01/T1_TOA', 'LANDSAT/LE07/C01/T1_TOA'])
output = utils.getinfo(coll_obj._build())
assert parse_scene_id(output) == SCENE_ID_LIST
... | 9,670 |
def main():
"""Main method
"""
args = parse_args(sys.argv[1:])
# Parse config file
config_file_loc = args.config_file
with open(str(config_file_loc), 'r') as file:
parsed_config_file = toml.loads(file.read())
# Fetch AWS params
ssm_store = EC2ParameterStore()
param_store_pa... | 9,671 |
def check_input_checkpoint(input_checkpoint):
"""Check if input_checkpoint is a valid path or path prefix."""
if not saver_lib.checkpoint_exists(input_checkpoint):
print("Input checkpoint '{}' doesn't exist!".format(input_checkpoint))
exit(-1) | 9,672 |
def extract_features_from_html(html, depth, height):
"""Given an html text, extract the node based features
including the descendant and ancestor ones if depth and
height are respectively nonzero."""
root = etree.HTML(html.encode('utf-8')) # get the nodes, serve bytes, unicode fails if html has meta
... | 9,673 |
def __clean_field(amazon_dataset, option):
"""Cleanes the Text field from the datset """
clean = []
if option == 1:
for i in amazon_dataset['Text']:
clean.append(__one(i))
elif option == 2:
for i in amazon_dataset['Summary']:
clean.append(__one(i))
else:
... | 9,674 |
def write_bruker_search_path(ftype, destfile, sourcefile=None, sourcetext=None):
"""Will copy a file from sourcefile (out of the add_files directory) or
text to destfile in first directory of Bruker search path for
ftype = cpd, f1, gp, ... with checks for overwrite, identity, etc.
"""
if pp.run_flag... | 9,675 |
def _read_atom_line(line):
"""
COLUMNS DATATYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 RecordName "ATOM "
7 - 11 Integer serial Atom serial number.
13 - 16 Atom ... | 9,676 |
def filter_job_build_by_result(job, *, result):
"""filter build by build results, avaliable results are:
'SUCCESS', 'UNSTABLE', 'FAILURE', 'NOT_BUILT', 'ABORTED'
see: https://javadoc.jenkins-ci.org/hudson/model/Result.html
"""
expect = ['SUCCESS', 'UNSTABLE', 'FAILURE', 'NOT_BUILT', 'ABORTED']
i... | 9,677 |
def semantic_parse_entity_sentence(sent: str) -> List[str]:
"""
@param sent: sentence to grab entities from
@return: noun chunks that we consider "entities" to work with
"""
doc = tnlp(sent)
ents_ke = textacy.ke.textrank(doc, normalize="lemma")
entities = [ent for ent, _ in ents_ke]
re... | 9,678 |
def extract_to_files(pkr_path, verbose=False):
"""
Extract data and image to .json and .png (if any) next to the .pkr
"""
title, buttons, png_data = parse_animschool_picker(pkr_path, verbose)
# Save to json
with open(pkr_path + '.json', 'w') as f:
json.dump([title, buttons], f, indent=4)... | 9,679 |
def readLog(jobpath):
"""
Reads log to determine disk/mem usage, runtime
For processing time, it will only grab the last execution/evict/terminated times.
And runTime supercedes evictTime (eg. an exec->evict combination will not be written if
a later exec-termination combination exists in the log)
... | 9,680 |
def check_table_files_load(i_df, dir_context):
"""Used for rules 0007 and 0009
:param i_df: An investigation DataFrame
:param dir_context: Path to where the investigation file is found
:return: None
"""
for i, study_df in enumerate(i_df['studies']):
study_filename = study_df.iloc[0]['St... | 9,681 |
def match_cam_time(events, frame_times):
"""
Helper function for mapping ephys events to camera times. For each event in events, we return the nearest
camera frame before the event.
Parameters
----------
events : 1D numpy array
Events of interest. Sampled at a higher rate than frame_... | 9,682 |
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is ... | 9,683 |
def encode_instructions(
stream: Sequence[Instruction],
func_pool: List[bytes],
string_pool: List[bytes],
) -> Tuple[bytearray, List[bytes], List[bytes]]:
"""
Encode the bytecode stream as a single `bytes` object that can be
written to file or kept in memory.
Parameters
----------
s... | 9,684 |
def get_random(selector):
"""Return one random game"""
controller = GameController
return controller.get_random(MySQLFactory.get(), selector) | 9,685 |
def get_ssh_dispatcher(connection, context):
"""
:param Message context: The eliot message context to log.
:param connection: The SSH connection run commands on.
"""
@deferred_performer
def perform_run(dispatcher, intent):
context.bind(
message_type="flocker.provision.ssh:ru... | 9,686 |
def read_ac(path, cut_off, rnalen):
"""Read the RNA accessibility file and output its positions and values
The file should be a simple table with two columns:
The first column is the position and the second one is the value
'#' will be skipped
"""
access = []
with open(path) as f:
... | 9,687 |
def subtableD0(cxt: DecoderContext, fmt: Format):
""" ORI """
fmt = FormatVI(fmt)
return MNEM.ORI, [Imm(fmt.imm16, width=16, signed=False), Reg(fmt.reg1), Reg(fmt.reg2)], 2 | 9,688 |
def format_date(unix_timestamp):
""" Return a standardized date format for use in the two1 library.
This function produces a localized datetime string that includes the UTC timezone offset. This offset is
computed as the difference between the local version of the timestamp (python's datatime.fromtimestamp... | 9,689 |
def _env_clear():
"""
clear old extract file, parsed file and combine file.
:return: null
"""
if os.path.isdir(BIN_FILE_PATH):
shutil.rmtree(BIN_FILE_PATH)
os.mkdir(BIN_FILE_PATH)
if os.path.isdir(PARSED_FILE_PATH):
shutil.rmtree(PARSED_FILE_PATH)
os.mkdir(PARSED_FILE_PAT... | 9,690 |
def or_(kb, goals, substitutions=dict(), depth=0, mask=None,
k_max=None, max_depth=1):
"""Base function of prover, called recursively.
Calls and_, which in turn calls or_, in order to recursively calculate scores for every possible proof in proof
tree.
Args:
kb: dict of facts / rules
... | 9,691 |
def print_term(thy, t):
"""More sophisticated printing function for terms. Handles printing
of operators.
Note we do not yet handle name collisions in lambda terms.
"""
def get_info_for_operator(t):
return thy.get_data("operator").get_info_for_fun(t.head)
def get_priority(t):
... | 9,692 |
def build_model():
"""Builds the model."""
return get_model()() | 9,693 |
def truncate_field_data(model, data):
"""Truncate all data fields for model by its ``max_length`` field
attributes.
:param model: Kind of data (A Django Model instance).
:param data: The data to truncate.
"""
fields = dict((field.name, field) for field in model._meta.fields)
return dict((n... | 9,694 |
def get_pools(web3):
"""Iterator over all pools. Returns tuples like (token_address, pool_address)."""
router = web3.eth.contract(address=_VETHER_ROUTER_ADDRESS, abi=router_vether_abi)
for index in range(router.functions.tokenCount().call()):
token = router.functions.getToken(index).call()
p... | 9,695 |
def plot_sn(filenames, sn_spectra, wave, idrfilenames, outfname):
"""Return a figure with the SN
Parameters
----------
fname : str
Output file name
"""
sn_max = sn_spectra.max()
day_exp_nums = [fname.split('_')[1:4] for fname in filenames]
phase_strings = [fname.split('_')[-2... | 9,696 |
def append_after(filename="", search_string="", new_string=""):
"""appends "new_string" after a line containing
"search_string" in "filename" """
with open(filename, 'r', encoding='utf-8') as f:
line_list = []
while True:
line = f.readline()
if line == "":
... | 9,697 |
def get_all_students(zip):
"""Returns student tuple for all zipped submissions found in the zip file."""
students = []
# creating all the student objects that we can zip files of
for filename in zip.namelist():
if not filename.endswith(".zip"):
continue
firstname, surname = s... | 9,698 |
def is_shipping_method_applicable_for_postal_code(
customer_shipping_address, method
) -> bool:
"""Return if shipping method is applicable with the postal code rules."""
results = check_shipping_method_for_postal_code(customer_shipping_address, method)
if not results:
return True
if all(
... | 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.