content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def sgrib_variable_crop(tmp_grib, nthreads_w, fp_out, logger):
"""
Take the small grib file from grib_to_small_grib and cut it down
to the variables we need
Args:
tmp_grib: File path to small grib2 file
nthreads_w: Number of threads for running wgrib2 commands
fp_out: Path... | 8,700 |
def cs_geo():
"""Geographic lat/lon coordinates in WGS84 datum.
"""
cs = CSGeo()
cs.inventory.datumHoriz = "WGS84"
cs.inventory.datumVert = "mean sea level"
cs.inventory.spaceDim = 2
cs._configure()
cs.initialize()
return cs | 8,701 |
def make_range(value):
"""
Given an integer 'value',
return the value converted into a range.
"""
return range(value) | 8,702 |
def run(actor, observer, content):
"""
Shortcut to run an Onirim and return the result.
Returns:
True if win, False if lose, None if other exception thrown.
"""
return Flow(Core(actor, observer, content)).whole() | 8,703 |
def sort_bedfile(infile, outfile, add_header: bool = True, sort_by_bedtools: bool = False):
"""
sort bed file
@2020.10.10 by Zhang Yiming: several modifications
1. if infile and outfile is same, use a temp file
2. add parameter to contol the bed header
3. using check_output to better handle the output from comma... | 8,704 |
def GetPoseBoneFCurveFromArmature(armatureObj, poseBoneName, data_path, parameterIndex):
"""
In Blender the FCurves are used to define the Key Frames.
In general, for a single object, there's one FCurve for each of
the following properties.
data_path, index
'location', 0 (.x... | 8,705 |
def get_document_instance(conf=None):
"""
Helper function to get a database Document model instance based on CLA configuration.
:param conf: Same as get_database_models().
:type conf: dict
:return: A Document model instance based on configuration specified.
:rtype: cla.models.model_interfaces.D... | 8,706 |
def test_execute_empty_resolved(_mock_call_insights):
"""Test the function execute."""
with open("tests/data/stack_aggregator_empty_resolved.json", "r") as f:
payload = json.load(f)
r = RecommendationTask()
out = r.execute(arguments=payload, persist=False)
assert out['recommendation'] == "... | 8,707 |
def test_apptuit_send_exception_repr():
"""
Test __repr__ for ApptuitSendException
"""
err = repr(ApptuitSendException(
"test", 400, 1, 1, [{"datapoint": "test", "error": "test_error"}]
))
assert_equals(err, "1 points failed with status: 400\ntest_error error occurred in the "
... | 8,708 |
def get_full_private_keys(gpg: gnupg.GPG) -> List[GPGKey]:
"""Get a list of private keys with a full private part.
GPG supports exporting only the subkeys for a given key, and in this case
a stub of the primary private key is also exported (the stub). This stub
cannot be used to do anything with the pr... | 8,709 |
def create_blueprint():
"""Creates a Blueprint"""
blueprint = Blueprint('Health Check Blueprint', __name__)
blueprint.route('/')(healthcheck.healthcheck)
return blueprint | 8,710 |
def main() -> None:
"""Run Sub Manager though the CLI."""
submanager.cli.main() | 8,711 |
def show_Permissions(dx):
"""
Show where permissions are used in a specific application
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
p = dx.get_permissions( [] )
for i in p:
print i, ":"
for j in p[i]:
show_Path(... | 8,712 |
def playable_card(card, fireworks, n_colors):
# if isinstance(card, pyhanabi.HanabiCard):
# card = {'color':colors[card.color],'rank':card.rank}
"""A card is playable if it can be placed on the fireworks pile."""
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank != pyh... | 8,713 |
def get_transformation_id(action):
""" Get the id of a transformation.
Parameters
----------
action: function
The transformation function
Returns
-------
int
The id of the action (-1 if not found)
"""
for index, trans in TRANSFORMATIONS.items():
if trans == ... | 8,714 |
def getString(t):
"""If t is of type string, return it, otherwise raise InvalidTypeError.
"""
s = c_char_p()
if PL_get_chars(t, byref(s), REP_UTF8|CVT_STRING):
return s.value
else:
raise InvalidTypeError("string") | 8,715 |
def prep_data(filename, in_len, pred_len):
"""load data from the file and chunk it into windows of input"""
# Columns are
# 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed
data = np.genfromtxt(filename, delimiter=',', skip_header=1,
usecols=(1, ... | 8,716 |
def should_build_ib():
"""
Helper function that detects the system's IB support and returns if we
should build with IB support.
"""
ib_util_found = False
ib_lib_found = False
ib_header_found = False
try:
# If the command doesn't exist, we can directly return instead of
#... | 8,717 |
def run_reporter():
"""
Entry point to reporter service
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--host",
help="Reporter's host, defaults to localhost",
default="localhost",
action="store",
)
parser.add_argument(
"--port",
h... | 8,718 |
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
i... | 8,719 |
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_ac... | 8,720 |
def _get_security_group_id(connection, security_group_name):
"""
Takes a security group name and
returns the ID. If the name cannot be
found, the name will be attempted
as an ID. The first group found by
this name or ID will be used.)
:param connection:
:param security_group_name:
:... | 8,721 |
def viz_preprocessing(df_path):
"""
Preprocess the aggregation csv into a good format for visualization
"""
df = pd.read_csv(df_path)
res = df.T
res = res.rename(columns=res.iloc[0]).drop(res.index[0])
res = res.astype("int64")
res.reset_index(inplace=True)
res["index"] = res["index"... | 8,722 |
def test_get_filenames_with_multiple_attachments_data(
signal_notification_service: SignalNotificationService,
) -> None:
"""Test getting filenames with multiple 'attachments' in data."""
data = {"attachments": ["test", "test2"]}
result = signal_notification_service.get_filenames(data)
assert resul... | 8,723 |
def build_encoded_broadcast_from_model(model_fn, encoder_fn):
"""Builds `StatefulBroadcastFn` for weights of model returned by `model_fn`.
This method creates a `SimpleEncoder` for every weight of model created by
`model_fn`, as returned by `encoder_fn`.
Args:
model_fn: A Python callable with no arguments... | 8,724 |
def adjacency(G, nodelist=None, weight="weight"):
"""
Returns the sparse adjacency matrix
representation of the graph.
"""
if nodelist is None:
nodelist = G.nodes()
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
return A | 8,725 |
def sample_movie(user, **params):
"""Create and return a movie"""
defaults = {
'title': 'A Walk to Remember',
'duration': datetime.timedelta(hours=2, minutes=15),
'price': 8.99
}
defaults.update(params)
return Movie.objects.create(user=user, **defaults) | 8,726 |
def set_edit():
""" Set to Edit Mode - Note may be buggy """
try:
bpy.ops.object.mode_set(mode = "EDIT")
except:
pass | 8,727 |
def lines_diff(lines1, lines2):
"""Show difference between lines."""
is_diff = False
diffs = list()
for line in difflib.ndiff(lines1, lines2):
if not is_diff and line[0] in ('+', '-'):
is_diff = True
diffs.append(line)
return is_diff, diffs | 8,728 |
def parse_ipmi_hpm(output):
"""Parse the output of the hpm info retrieved with ipmitool"""
hrdw = []
line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|')
for line in output:
match = line_pattern.match(line)
if match:
name = ... | 8,729 |
def export_trust_stores() -> None:
"""Export the content of the trust store of each supported platform to a PEM file at ./export.
"""
certs_repo = RootCertificatesRepository.get_default()
out_pem_folder = ROOT_PATH / 'export'
out_pem_folder.mkdir(exist_ok=True)
# Export each trust store as a PE... | 8,730 |
def visualize_model(model, num_images=8, figsize=(15,15)):
"""Show a grid of predictions"""
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure(figsize=figsize)
dataloader_val = dataloaders['val']
with torch.no_grad():
for i, (inputs, labels) in enumer... | 8,731 |
def get_genes_and_pathways(reactions, r_numbers, species):
"""Returns a CSV-formatted string with the list of genes and pathways where
the reaction(s) of 'species' appear.
:param reactions: list of reactions for species
:param r_numbers: RNumbers object
:param species: KEGG organism code
:retur... | 8,732 |
def set(key, value):
"""Sets the value for a key.
Sets the value of the specified configuration key in bob's global
configuration file.
\b
Arguments
---------
key : str
The key to set the value for.
value : str
The value of the key.
\b
Fails
-----
* If ... | 8,733 |
def datasetFiles(request):
"""
Return a list all dataset files in the datasets directory, by looking for files ending
with .h5 suffix. eg. ['/Users/jarnyc/BioPyramid/data/datasets/lanner.1.0.h5']
"""
# This is the dataset directory, set by the config file
datadir = request.registry.settings['biopyramid.model.data... | 8,734 |
def relate_ca(assessment, template):
"""Generates custom attribute list and relates it to Assessment objects
Args:
assessment (model instance): Assessment model
template: Assessment Temaplte instance (may be None)
"""
if not template:
return None
ca_definitions = all_models.CustomAttri... | 8,735 |
def getSenderNumberMgtURL(request):
"""
λ°μ λ²νΈ κ΄λ¦¬ νμ
URLμ λ°νν©λλ€.
- 보μμ μ±
μ λ°λΌ λ°νλ URLμ 30μ΄μ μ ν¨μκ°μ κ°μ΅λλ€.
- https://docs.popbill.com/fax/python/api#GetSenderNumberMgtURL
"""
try:
# νλΉνμ μ¬μ
μλ²νΈ
CorpNum = settings.testCorpNum
# νλΉνμ μμ΄λ
UserID = settings.testUserID
... | 8,736 |
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:... | 8,737 |
def average(w, axis=-1):
"""Calculate average
Example:
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]))
>>> average(w1)
Waveform(array([0, 1]), array([ 2. , 2.5]))
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \
xlabels=['row'... | 8,738 |
def random():
"""Return a random parameter set for the model."""
total_thickness = 10**np.random.uniform(2, 4.7)
Nlayers = np.random.randint(2, 200)
d_spacing = total_thickness / Nlayers
thickness = d_spacing * np.random.uniform(0, 1)
length_head = thickness * np.random.uniform(0, 1)
length_... | 8,739 |
def InformationalBuilders(site_config, boards_dict, ge_build_config):
"""Create all informational builders.
We have a number of informational builders that are built, but whose output is
not directly used for anything other than reporting success or failure.
Args:
site_config: config_lib.SiteConfig to be ... | 8,740 |
def test_fva(ec_model_core, fva_targets):
"""Test that fva returns the expected results."""
df = flux_variability_analysis(ec_model_core)
assert ((df.maximum - df.minimum) > 1e-3).sum() == 38 | 8,741 |
def extract_features_to_dict(image_dir, list_file):
"""extract features and save them with dictionary"""
label, img_list = load_image_list(image_dir, list_file)
ftr = feature
integer_label = label_list_to_int(label)
feature_dict = {'features': ftr,
'label': integer_label,
... | 8,742 |
def database_connection(
autocommit: bool = False,
) -> typing.Iterator[psycopg2.extensions.connection]:
"""Context manager for database transactions.
By default the transaction is commited when exiting the context
manager normally or rolled back in case of unhandled exception. But
since e.g. VACUU... | 8,743 |
def roi_heads_forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
featur... | 8,744 |
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize() | 8,745 |
def S_tunnel_e0(self, mu, sig, Efl, Efr, Tl, Tr):
"""energy flux
Conduction band edge 0 at higher of the two
"""
a = mu-sig/2
b = mu+sig/2
kTl = sc.k*Tl
kTr = sc.k*Tr
Blr = (a/kTl+1)*np.exp(-a/kTl)-(b/kTl+1)*np.exp(-b/kTl)
Brl = (a/kTr+1)*np.exp(-a/kTr)-(b/kTr+1)*np.exp(-b/k... | 8,746 |
def to_camel_java(text, first_lower=True):
"""Returns the text in camelCase or CamelCase format for Java
"""
return to_camelcase(text, first_lower=first_lower,
reserved_keywords=JAVA_KEYWORDS, suffix="_") | 8,747 |
def parse_resources(resource_name, resource_data, book_node, **auth_info):
""" Creates resource topics """
resource_data = resource_data or []
resource_str = "{}-{}".format(book_node.source_id, resource_name.replace(' ', '-').lower())
# Create resource topic
resource_node = nodes.TopicNode(source_i... | 8,748 |
def rectangle_field(N_1, N_2, B_1, B_2, H, D, r_b):
"""
Build a list of boreholes in a rectangular bore field configuration.
Parameters
----------
N_1 : int
Number of borehole in the x direction.
N_2 : int
Number of borehole in the y direction.
B_1 : float
Distance (... | 8,749 |
def mask_outputs(machine):
"""Erase outputs from each edge where they are zero."""
for u, v, d in machine.edges(data=True):
for k in d:
if k in machine.outputs and d[k] == 0:
d.pop(k) | 8,750 |
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90',
printout=True):
"""
Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O.
Based on Table 3 of Denis et al. 2013
"""
if phase == 'Fo90':
H_to_1_ppm = 16.35
elif phase == 'opx':
H_t... | 8,751 |
def run(file: str, expected: str) -> None:
""" Run with input """
rv, out = getstatusoutput(f'{RUN} {file}')
assert rv == 0
assert out.rstrip() == expected | 8,752 |
def countBasesInFasta(fastaFile):
"""
Given a fasta file, return a dict where the number of records and
the total number of bases are given by 'records' and 'bases' respectively.
"""
recordRE = re.compile(r'^>')
whiteSpaceRE = re.compile(r'\s+')
total_bases = 0
total_seqs = 0
with op... | 8,753 |
def load_mnist_denoising(path_raw_dataset, batch_size=1, mu=0., sigma=0.6, deterministic=True):
"""
1. Get the MNIST dataset via PyTorch built-in APIs.
2. Wrap it with customized wrapper with additive Gaussian noise processor
3. Build PyTorch data loader objects.
:param path_raw_dataset:
:param... | 8,754 |
def test_create_env_def_file_cwl():
"""testing create_env_def_file with cwl option and an input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'cwl_v1',
'cwl_url': 'someurl',
'main_cwl': 'somecw... | 8,755 |
def help_systempowerlimiton(self, commands):
"""
limiton: Activates the powerlimit for a server, and
enables power throttling.
==================================================================
Usage:
set system power limiton -i {serverid}
... | 8,756 |
def make_triplet_freqs(sentence, triplet_freqs):
"""
ζεεγ3γ€η΅γ«γγ
"""
# Janomeγ§εθͺγ«εε²γγ
t = Tokenizer()
morphemes = [token.surface for token in t.tokenize(sentence)]
if len(morphemes) < 3:
return {}
# ηΉ°γθΏγ
for i in range(len(morphemes) - 2):
triplet = tuple(morphemes[i... | 8,757 |
def test_train_val_split(patient_id,
sub_dataset_ids,
cv_fold_number):
""" if cv_fold_number == 1:
if patient_id in sub_dataset_ids[-5:]: return 'test'
elif patient_id in sub_dataset_ids[-7:-5]: return 'validation'
else: return 'train'
... | 8,758 |
def f_rank(iterable, start=1):
"""Fractional ranking"""
last, fifo = None, []
for n, item in enumerate(iterable, start):
if item[0] != last:
if fifo:
mean = sum(f[0] for f in fifo) / len(fifo)
while fifo:
yield mean, fifo.pop(0)[1]
... | 8,759 |
def generate_data_design_config(
random_generator: np.random.Generator,
) -> Iterable[DataSetParameters]:
"""Generates the data design configuration for evaluating M3 strategy."""
keys = LEVELS.keys()
levels = [len(LEVELS[k]) for k in keys]
for i, sample in enumerate(
lhs(n=len(levels), samp... | 8,760 |
def get_convertible_info():
"""
D:\Trade\TDX\cjzq_tdx\T0002\hq_cache\speckzzdata.txt
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\speckzzdata.txt')
columns = [
'exchange', 'code', 'stock_code', 'convert_price', 'current_interest', 'list_amount', 'call_price'... | 8,761 |
def test_striplog_colour_plot():
"""
Tests mpl image of striplog with the ladder option.
"""
legend = Legend.builtin('NSDOE')
imgfile = "tutorial/M-MG-70_14.3_135.9.png"
striplog = Striplog.from_image(imgfile, 14.3, 135.9, legend=legend)
for iv in striplog:
iv.data['porosity'] = i... | 8,762 |
def fetch_data(
o_included: str,
flowcharts: dict,
o_metadata_file: str,
o_biom_file: str,
p_redbiom_context: str,
p_bloom_sequences: str,
p_reads_filter: int,
unique: bool,
update: bool,
dim: bool) -> pd.DataFrame:
"""
Parameters
... | 8,763 |
def searchArtist(artistName, session=models.session):
"""Search for artist. Returns models.ArtistSearch"""
return models.ArtistSearch(artistName, session) | 8,764 |
def _ddnone():
"""allow defaultdict to be pickled"""
return defaultdict(_none) | 8,765 |
def apply_join(query: Select, table: Table, join_table: Table, join: TableJoin):
"""
Performs a inner or outer join between two tables on a given query object.
TODO: enable multiple joins
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: ... | 8,766 |
def filter_shapely(feature):
"""
feature1 = feature_extract(feature)
feature2 = filter_shapely(feature1)
"""
tmp = extract_Accumulation_entropy_list(feature)
tmp2=[]
for i in range(len(tmp)):
if i!=0:
tmp2.append(tmp[i]-tmp[i-1])
else:
tmp2.a... | 8,767 |
def stations_by_river(stations):
"""Returns a dictionary mapping river names (key)
to a list of stations (object)"""
rivers_stations_dict = {} # Create empty dictionary
for i in range(len(stations)): # Iterate through list of stations
# Data type checks
if type(stations[i]) is ... | 8,768 |
def getCurrentProfile():
"""
Get the name of the current profile.
"""
return __createJSON("GetCurrentProfile", {}) | 8,769 |
def get_params_from_request(req: web.Request) -> QueryParams:
"""
This function need for convert query string to filter parameters.
"""
page = int(req.rel_url.query.get('page', '1'))
cursor = req.rel_url.query.get('cursor')
sort = req.rel_url.query.get('sort')
sort_dir = req.rel_url.query.ge... | 8,770 |
def norm_mem_interval(pt):
"""Normalize membership in interval."""
return pt.on_prop(arg_conv(binop_conv(auto.auto_conv()))) | 8,771 |
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig | 8,772 |
def train():
"""
Main script.
"""
args = get_args()
# Get context.
from nnabla.contrib.context import extension_context
extension_module = args.context
if args.context is None:
extension_module = 'cpu'
logger.info("Running in %s" % extension_module)
ctx = extension_cont... | 8,773 |
def rdf_reader(src):
"""rdf = rdf_reader(src)
src rdf filename
rdf The RDF mapping object"""
return RDF(*list(rdf_include(src))) | 8,774 |
def get_batches_xy(x, y, batch_size):
"""
Generate inputs and targets in a batch-wise fashion for feed-dict
Args:
x: entire source sequence array
y: entire output sequence array
batch_size: batch size
Returns:
x_batch, y_batch, source_sentence_length, target_sentence_leng... | 8,775 |
def test_run_failure_filenotfounderror(tmp_path, sample_catalog_minimal):
"""Test failure of _run on RemoveCmd in specifying a nonexistent file."""
# Create a temporary catalog file with responsible-parties
content_type = FileContentType.JSON
catalog_def_dir, catalog_def_file = test_utils.prepare_trestl... | 8,776 |
def notImplementedYet():
"""
shows a dialog that says that this feature its not implemented
"""
wx.GenericMessageDialog(
parent=wx.GetActiveWindow(),
message=loc('popup.notimplemented.text'),
caption=loc('popup.notimplemented.title'),
style=wx.ICON_INFORMATION | wx.STAY_ON_TOP | wx.OK
).ShowModal() | 8,777 |
def test_nf_calc(gain, nf_expected, enabled, setup_edfa, si):
""" compare the 2 amplifier models (polynomial and estimated from nf_min and max)
=> nf_model vs nf_poly_fit for boundary gain values: gain_min (and below) & gain_flatmax
same values are expected between the 2 models
=> unitary test for Edfa... | 8,778 |
def draw_signalData(Nsamp=1, alpha=__alpha, beta=__beta, **kwargs):
"""
draw an SNR from the signal distribution
"""
return np.array([ncx2.rvs(__noise_df, nc) for nc in __draw_truncatedPareto(Nsamp, alpha=alpha, beta=beta)]) | 8,779 |
def choose_weighted_images_forced_distribution(num_images, images, nodes):
"""Returns a list of images to cache
Enforces the distribution of images to match the weighted distribution as
closely as possible. Factors in the current distribution of images cached
across nodes.
It is important to note... | 8,780 |
def predict_lumbar_ankles_model(data):
"""Generate lumbar + 2 ankles model predictions for data.
Args:
data (dict): all data matrices/lists for a single subject.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
... | 8,781 |
def breakOnEnter(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function is called.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugg... | 8,782 |
def linear_search(iterable, item):
"""Returns the index of the item in the unsorted iterable.
Iterates through a collection, comparing each item to the target item, and
returns the index of the first item that is equal to the target item.
* O(n) time complexity
* O(1) space complexity
Args:
iterable:... | 8,783 |
def create_MD_tag(reference_seq, query_seq):
"""Create MD tag
Args:
reference_seq (str) : reference sequence of alignment
query_seq (str) : query bases of alignment
Returns:
md_tag(str) : md description of the alignment
"""
no_change = 0
md = []
for ref_base, query_ba... | 8,784 |
def HDFScopyMerge (src_dir, dst_file, overwrite=False, deleteSource=False):
"""
copyMerge() merges files from an HDFS directory to an HDFS files.
File names are sorted in alphabetical order for merge order.
Inspired by https://hadoop.apache.org/docs/r2.7.1/api/src-html/org/apache/hadoop/fs/FileUti... | 8,785 |
def get_diff(base, head=None):
"""Return a git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[str]
"""
if not head or head == 'HEAD':
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
cache = '/tmp/git-diff-cache-%s-%s... | 8,786 |
def split_last(dataframe, target_col, sort_col='date', cut=.9):
"""Splits the dataframe on sort_column at the given cut ratio, and splits
the target column
Args:
dataframe: dataframe to be cut
sort_col: column to be sorted on. Default='date'
cut: cut ratio for the train/eval sets
... | 8,787 |
def genparams_rst255(ctx: click.Context) -> None:
"""
Generate Ristretto255 system parameters.
For details about Ristretto255, refer to https://ristretto.group/
"""
from .ristretto_255 import create_ristretto_255_parameters
pvss = pvss_from_datadir(ctx.obj)
params = create_ristretto_255_pa... | 8,788 |
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(1)"]
):
"""
usage.scipy: 1
"""
... | 8,789 |
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ig... | 8,790 |
def readCSV(associated_ipaddr, ipaddr, timestamp):
"""
Method that extracts observations from a CSV file.
Parameters:
associated_ipaddr (str): The name of the column that specifies IP addresses of VPN clients
ipaddr (str): The name of the column that specifies IP addresses of us... | 8,791 |
def interp2d(x, y, z, outshape, verbose=True, doplot=True):
"""
Parameters
----------
x, y : int
X and Y indices of `z`.
z : float
Values for given `x` and `y`.
outshape : tuple of int
Shape of 2D output array.
verbose : bool, optional
Print info to screen.... | 8,792 |
def setup_app(app_name=__name__, db_uri=None):
"""
Set up Flask application and database.
Args:
app_name: Name of the Flask application.
db_uri: Database URI for SQLAlchemy to connected to.
"""
global app, db
# Flask application
app = Flask(app_name)
# Application config... | 8,793 |
def run_cmd(cmd: Text, split: bool = True, shell=False, verbose: bool = True):
"""Run a system command and print output."""
print(f'CMD: {cmd}')
cmd = shlex.split(cmd) if split else [cmd]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
while True:
output = process.stdout... | 8,794 |
def upload_command():
"""
Upload object into MWDB
"""
pass | 8,795 |
def _nms_boxes(detections, nms_threshold):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding
boxes with their confidence scores and return an array with the
indexes of the bounding boxes we want to keep.
# Args
detections: Nx7 numpy arrays of
[[x, y, w, h, ... | 8,796 |
def test_list_registered_one_private(caplog, store_mock):
"""List registered with one private item in the response."""
caplog.set_level(logging.INFO, logger="charmcraft.commands")
store_response = [
Charm(name='charm', private=True, status='status'),
]
store_mock.list_registered_names.retur... | 8,797 |
def dht_get_key(data_key):
"""
Given a key (a hash of data), go fetch the data.
"""
dht_client = get_dht_client()
ret = dht_client.get(data_key)
if ret is not None:
if type(ret) == types.ListType:
ret = ret[0]
if type(ret) == types.DictType and ret.has_key("value")... | 8,798 |
def dumpJSON(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
... | 8,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.