content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def purchase_index(request):
"""displays users purchase history"""
login_id = request.user.id
context = {'histories': Purchase_history.objects.all().filter(acc_id=login_id).order_by('-date')} # get users purchase history
return render(request, 'profile/histories/purchase_history.html', context) | 16,100 |
def solve_EEC(self):
"""Compute the parameters dict for the equivalent electrical circuit
cf "Advanced Electrical Drives, analysis, modeling, control"
Rik de doncker, Duco W.J. Pulle, Andre Veltman, Springer edition
<--- --->
-----R-----wsLqIq---- ... | 16,101 |
def plot_figure_legend(results_dir):
"""
Make a standalone legend
:return:
"""
from hips.plotting.layout import create_legend_figure
labels = ["SBM-LDS (Gibbs)", "HMM (Gibbs)", "Raw LDS (Gibbs)", "LNM-LDS (pMCMC)"]
fig = create_legend_figure(labels, colors[:4], size=(5.25,0.5),
... | 16,102 |
def submit(job):
"""Submit a job."""
# Change into the working directory and submit the job.
cmd = ["cd " + job["destdir"] + "\n", "sbatch " + job["subfile"]]
# Process the submit
try:
shellout = shellwrappers.sendtossh(job, cmd)
except exceptions.SSHError as inst:
if "violat... | 16,103 |
def ns_alarm_create(ctx, name, ns, vnf, vdu, metric, severity,
threshold_value, threshold_operator, statistic):
"""creates a new alarm for a NS instance"""
# TODO: Check how to validate threshold_value.
# Should it be an integer (1-100), percentage, or decimal (0.01-1.00)?
try:
... | 16,104 |
def is_active(seat):
"""Return True if seat is empty. If occupied return False. """
active = seat_map.get(seat, ".")
return True if active == "#" else False | 16,105 |
def calibrate_intensity_to_powder(peak_intensity: dict, powder_peak_intensity: dict,
powder_peak_label: List[str], image_numbers: List[int], powder_start: int = 1):
"""Calibrate peak intensity values to intensity measurements taken from a 'random' powder sample."""
corrected_pe... | 16,106 |
def obsrio_temperatures(
observatory: str,
input_factory: Optional[TimeseriesFactory] = None,
output_factory: Optional[TimeseriesFactory] = None,
realtime_interval: int = 600,
update_limit: int = 10,
):
"""Filter temperatures 1Hz miniseed (LK1-4) to 1 minute legacy (UK1-4)."""
starttime, end... | 16,107 |
def examine(path):
""" Look for forbidden tasks in a job-output.json file path """
data = json.load(open(path))
to_fix = False
for playbook in data:
if playbook['trusted']:
continue
for play in playbook['plays']:
for task in play['tasks']:
for hos... | 16,108 |
def determine_disjuct_modules_alternative(src_rep):
"""
Potentially get rid of determine_added_modules and get_modules_lst()
"""
findimports_output = subprocess.check_output(['findimports', src_rep])
findimports_output = findimports_output.decode('utf-8').splitlines()
custom_modules_lst = []
for i, elem in enu... | 16,109 |
def test_start_notasks(event_loop):
"""If there are no tasks, the event is not started"""
event = LoadLimitEvent()
assert not event.started
assert len(event.tasks) == 0
with pytest.raises(NoEventTasksError):
event.start(loop=event_loop)
assert not event.started | 16,110 |
def observe_simulation(star_error_model=None, progenitor_error_model=None,
selection_expr=None, output_file=None, overwrite=False,
seed=None, simulation_path=None, snapfile=None):
""" Observe simulation data and write the output to an HDF5 file """
if os.path.exist... | 16,111 |
def config_ask(default_message = True,
config_args = config_variables):
"""Formats user command line input for configuration details"""
if default_message:
print("Enter configuration parameters for the following variables... ")
config_dictionary = dict()
for v in config_ar... | 16,112 |
def parseAndRun(args):
"""interface used by Main program and py.test (arelle_test.py)
"""
try:
from arelle import webserver
hasWebServer = True
except ImportError:
hasWebServer = False
cntlr = CntlrCmdLine() # need controller for plug ins to be loaded
usage = "usage: %pr... | 16,113 |
def build_A(N):
"""
Build A based on the defined problem.
Args:
N -- (int) as defined above
Returns:
NumPy ndarray - A
"""
A = np.hstack( (np.eye(N), np.negative(np.eye(N))) )
A = np.vstack( (A, np.negative(np.hstack( (np.eye(N), np.eye(N)) ))) )
A = np.vstack( (A, np.h... | 16,114 |
def test_multiple_genbanks_multiple_cazymes(db_session, monkeypatch):
"""test adding protein to db when finding multiple identical CAZymes and GenBank accesisons."""
def mock_add_protein_to_db(*args, **kwargs):
return
monkeypatch.setattr(sql_interface, "add_data_to_protein_record", mock_add_protei... | 16,115 |
def gms_change_est2(T_cont, T_pert, q_cont, precip, level, lat,
lev_sfc=925., gamma=1.):
"""
Gross moist stability change estimate.
Near surface MSE difference between ITCZ and local latitude, neglecting
geopotential term and applying a thermodynamic scaling for the moisture
ter... | 16,116 |
def solid_polygon_info_(base_sides, printed=False):
"""Get information about a solid polygon from its side count."""
# Example: A rectangular solid (Each base has four sides) is made up of
# 12 edges, 8 vertices, 6 faces, and 12 triangles.
edges = base_sides * 3
vertices = base_sides * 2
faces =... | 16,117 |
def read(id=None):
"""
This function responds to a request for /api/people
with the complete lists of people
:return: sorted list of people
"""
# Create the list of people from our data
with client() as mcl:
# Database
ppldb = mcl.ppldb
# collection (kind of... | 16,118 |
def load_prism_theme():
"""Loads a PrismJS theme from settings."""
theme = get_theme()
if theme:
script = (
f"""<link href="{PRISM_PREFIX}{PRISM_VERSION}/themes/prism-{theme}"""
""".min.css" rel="stylesheet">"""
)
return mark_safe(script)
return "" | 16,119 |
def get_root_name(depth):
""" Returns the Rootname. """
return Alphabet.get_null_character() * depth | 16,120 |
def md5(fname):
"""
Cacualte the MD5 hash of the file given as input.
Returns the hash value of the input file.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest() | 16,121 |
def date2num(date_axis, units, calendar):
"""
A wrapper from ``netCDF4.date2num`` able to handle "years since" and "months since" units.
If time units are not "years since" or "months since" calls usual ``netcdftime.date2num``.
:param numpy.array date_axis: The date axis following units
:param str ... | 16,122 |
def generate_int_file_from_fit(
fitfn_zbt, fitfn_sp, fitfn_mp,
exp_list, mass_range,
std_io_map=STANDARD_IO_MAP,
metafitter_zbt=single_particle_firstp_zbt_metafit,
metafitter_sp=single_particle_firstp_metafit,
metafitter_mp=multi_particle_firstp_metafit,
dpath_sou... | 16,123 |
def _(txt):
""" Custom gettext translation function that uses the CurlyTx domain """
t = gettext.dgettext("CurlyTx", txt)
if t == txt:
#print "[CurlyTx] fallback to default translation for", txt
t = gettext.gettext(txt)
return t | 16,124 |
def bell(num=1, delay=100):
"""Rings the bell num times using tk's bell command.
Inputs:
- num number of times to ring the bell
- delay delay (ms) between each ring
Note: always rings at least once, even if num < 1
"""
global _TkWdg
if not _TkWdg:
_TkWdg = tkinter.Frame()
... | 16,125 |
def output_node(ctx, difference, path, indentstr, indentnum):
"""Returns a tuple (parent, continuation) where
- parent is a PartialString representing the body of the node, including
its comments, visuals, unified_diff and headers for its children - but
not the bodies of the children
- continua... | 16,126 |
def split_tree_into_feature_groups(tree: TreeObsForRailEnv.Node, max_tree_depth: int) -> (
np.ndarray, np.ndarray, np.ndarray):
"""
This function splits the tree into three difference arrays of values
"""
data, distance, agent_data = _split_node_into_feature_groups(tree)
for direction in TreeObsFor... | 16,127 |
def _generate_training_batch(ground_truth_data, representation_function,
batch_size, num_points, random_state):
"""Sample a set of training samples based on a batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Functi... | 16,128 |
def get_mnist_loaders(data_dir, b_sz, shuffle=True):
"""Helper function that deserializes MNIST data
and returns the relevant data loaders.
params:
data_dir: string - root directory where the data will be saved
b_sz: integer - the batch size
shuffle: boolean - whether... | 16,129 |
def run_example_interactive():
"""Example function
Running the exact same Example QuEST provides in the QuEST git repository
with the interactive python interface of PyQuEST-cffi
"""
print('PyQuEST-cffi tutorial based on QuEST tutorial')
print(' Basic 3 qubit circuit')
# creating envir... | 16,130 |
def atomic_coordinates_as_json(pk):
"""Get atomic coordinates from database."""
subset = models.Subset.objects.get(pk=pk)
vectors = models.NumericalValue.objects.filter(
datapoint__subset=subset).filter(
datapoint__symbols__isnull=True).order_by(
'datapoint_id', 'counter'... | 16,131 |
def additional_bases():
""""Manually added bases that cannot be retrieved from the REST API"""
return [
{
"facility_name": "Koltyr Northern Warpgate",
"facility_id": 400014,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
... | 16,132 |
async def test_flow_non_encrypted_already_configured_abort(opp):
"""Test flow without encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="0.0.0.0",
data=MOCK_CONFIG_DATA,
).add_to_opp(opp)
result = await opp.config_entries.flow.async_ini... | 16,133 |
def write_junit_xml(name, message=None):
"""
Write a JUnit results XML file describing the outcome of a quality check.
"""
if message:
failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message))
else:
failure_element = ''
data = {
'failure_count': 1 if... | 16,134 |
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(l... | 16,135 |
def load_input(file: str) -> ArrayLike:
"""Load the puzzle input and duplicate 5 times in each direction,
adding 1 to the array for each copy.
"""
input = puzzle_1.load_input(file)
input_1x5 = np.copy(input)
for _ in range(4):
input = np.clip(np.mod(input + 1, 10), a_min=1, a_max=Non... | 16,136 |
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals | 16,137 |
def check_destroy_image_view(test, device, image_view, device_properties):
"""Checks the |index|'th vkDestroyImageView command call atom, including the
device handler value and the image view handler value.
"""
destroy_image_view = require(test.next_call_of("vkDestroyImageView"))
require_equal(devic... | 16,138 |
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with h... | 16,139 |
def _write_batch_lmdb(db, batch):
"""
Write a batch to an LMDB database
"""
try:
with db.begin(write=True) as lmdb_txn:
for i, temp in enumerate(batch):
datum, _id = temp
key = str(_id)
lmdb_txn.put(key, datum.SerializeToString())
... | 16,140 |
def get_reference_shift( self, seqID ):
"""Get a ``reference_shift`` attached to a particular ``seqID``.
If none was provided, it will return **1** as default.
:param str seqID: |seqID_param|.
:type shift: Union[:class:`int`, :class:`list`]
:raises:
:TypeError: |indf_error|.
.. rubr... | 16,141 |
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be ove... | 16,142 |
def test_splitinfo_throws():
"""make sure bad behavior is caught"""
short_profile = dict(DEMO_SPLIT)
short_profile.pop('split_rate', None)
with pytest.raises(exceptions.InvalidSplitConfig):
split_obj = split_utils.SplitInfo(short_profile)
bad_split = dict(DEMO_SPLIT)
bad_split['split_ra... | 16,143 |
def load_and_resolve_feature_metadata(eval_saved_model_path: Text,
graph: tf.Graph):
"""Get feature data (feature columns, feature) from EvalSavedModel metadata.
Like load_feature_metadata, but additionally resolves the Tensors in the given
graph.
Args:
eval_saved_mod... | 16,144 |
def greater_than_or_eq(quant1, quant2):
"""Binary function to call the operator"""
return quant1 >= quant2 | 16,145 |
def _download_smeagol_PWMset():
"""Function to download the curated set of motifs used in the SMEAGOL paper.
Returns:
df (pandas df): contains matrices
"""
download_dir = 'motifs/smeagol_datasets'
remote_paths = ['https://github.com/gruber-sciencelab/VirusHostInteractionAtlas/t... | 16,146 |
def test_peekleft_after_appendleft(deque_fixture):
"""Test peekleft after appending to the left of deque."""
deque_fixture.appendleft(7)
assert deque_fixture.peekleft() == 7 | 16,147 |
def pr_define_role(pe_id,
role=None,
role_type=None,
entity_type=None,
sub_type=None):
"""
Back-end method to define a new affiliates-role for a person entity
@param pe_id: the person entity ID
@param role: the role... | 16,148 |
def inherently_superior(df):
"""
Find rows in a dataframe with all values 'inherently superior',
meaning that all values for certain metrics are as high or higher
then for all other rows.
Parameters
----------
df : DataFrame
Pandas dataframe containing the columns to be compared... | 16,149 |
def LineColourArray():
"""Line colour options array"""
Colour = [
'Black',
'dimgrey',
'darkgrey',
'silver',
'lightgrey',
'maroon',
'darkred',
'firebrick',
'red',
'orangered',
'darkorange',
'orange',
... | 16,150 |
def os_to_maestral_error(exc, dbx_path=None, local_path=None):
"""
Gets the OSError and tries to add a reasonably informative error message.
.. note::
The following exception types should not typically be raised during syncing:
InterruptedError: Python will automatically retry on interrupt... | 16,151 |
def parse_ccu_sys_var(data: dict[str, Any]) -> tuple[str, Any]:
"""Helper to parse type of system variables of CCU."""
# pylint: disable=no-else-return
if data[ATTR_TYPE] == ATTR_HM_LOGIC:
return data[ATTR_NAME], data[ATTR_VALUE] == "true"
if data[ATTR_TYPE] == ATTR_HM_ALARM:
return data... | 16,152 |
def one_time_log_fixture(request, workspace) -> Single_Use_Log:
"""
Pytest Fixture for setting up a single use log file
At test conclusion, runs the cleanup to delete the single use text file
:return: Single_Use_Log class
"""
log_class = Single_Use_Log(workspace)
request.addfinalizer(log_cl... | 16,153 |
def details(request, path):
"""
Returns detailed information on the entity at path.
:param path: Path to the entity (namespaceName/.../.../.../)
:return: JSON Struct: {property1: value, property2: value, ...}
"""
item = CACHE.get(ENTITIES_DETAIL_CACHE_KEY)
# ENTITIES_DETAIL : {"namespaceName": {"name":""... | 16,154 |
def ca_get_container_capability_set(slot, h_container):
"""
Get the container capabilities of the given slot.
:param int slot: target slot number
:param int h_container: target container handle
:return: result code, {id: val} dict of capabilities (None if command failed)
"""
slot_id = CK_SL... | 16,155 |
def load_pyfunc(model_file):
"""
Loads a Keras model as a PyFunc from the passed-in persisted Keras model file.
:param model_file: Path to Keras model file.
:return: PyFunc model.
"""
return _KerasModelWrapper(_load_model(model_file)) | 16,156 |
def business_days(start, stop):
"""
Return business days between two datetimes (inclusive).
"""
return dt_business_days(start.date(), stop.date()) | 16,157 |
def empty_nzb_document():
""" Creates xmldoc XML document for a NZB file. """
# http://stackoverflow.com/questions/1980380/how-to-render-a-doctype-with-pythons-xml-dom-minidom
imp = minidom.getDOMImplementation()
dt = imp.createDocumentType("nzb", "-//newzBin//DTD NZB 1.1//EN",
"http://... | 16,158 |
def get_output_directory(create_statistics=None, undersample=None, oversample=None):
"""
Determines the output directory given the balance of the dataset as well as columns.
Parameters
----------
create_statistics: bool
Whether the std, min and max columns have been created
undersample: ... | 16,159 |
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
... | 16,160 |
def isSol(res):
"""
Check if the string is of the type ai bj ck
"""
if not res or res[0] != 'a' or res[-1] != 'c':
return False
l = 0
r = len(res)-1
while res[l] == "a":
l+=1
while res[r] == "c":
r-=1
if r-l+1 <= 0:
return False
... | 16,161 |
def test_fleurinpgen_with_parameters(aiida_profile, fixture_sandbox, generate_calc_job, fixture_code,
generate_structure): # file_regression
"""Test a default `FleurinputgenCalculation`."""
# Todo add (more) tests with full parameter possibilities, i.e econfig, los, ....
... | 16,162 |
def new_trip(direction, day, driver, time):
"""
Adds a new trip to the system.
:param direction: "Salita" or "Discesa".
:param day: A day spanning the whole work week ("Lunedì"-"Venerdì").
:param driver: The chat_id of the driver.
:param time: The time of departure.
:return:
"""
... | 16,163 |
def nixpkgs_python_configure(
name = "nixpkgs_python_toolchain",
python2_attribute_path = None,
python2_bin_path = "bin/python",
python3_attribute_path = "python3",
python3_bin_path = "bin/python",
repository = None,
repositories = {},
nix_file_deps = None... | 16,164 |
def _h1_to_dataframe(h1: Histogram1D) -> pandas.DataFrame:
"""Convert histogram to pandas DataFrame."""
return pandas.DataFrame(
{"frequency": h1.frequencies, "error": h1.errors},
index=binning_to_index(h1.binning, name=h1.name),
) | 16,165 |
def fit_one_grain( gr, flt, pars):
"""
Uses scipy.optimize to fit a single grain
"""
args = flt, pars, gr
ub = np.linalg.inv(gr.ubi)
x0 = ub.ravel().copy()
xf, cov_v, info, mesg, ier = leastsq(
calc_teo_fit, x0, args, full_output=True)
ub = xf.copy()
ub.shape = 3, 3
ubi =... | 16,166 |
def wgt_area_sum(data, lat_wgt, lon_wgt):
"""wgt_area_sum() performas weighted area addition over a geographical area.
data: data of which last 2 dimensions are lat and lon. Strictly needs to be a masked array
lat_wgt: weights over latitude of area (usually cos(lat * pi/180))
lon_wgt: weights over long... | 16,167 |
def test_complain_about_missing_fields(tmp_path: Path, l1_ls8_folder: Path):
"""
It should complain immediately if I add a file without enough metadata to write the filename.
(and with a friendly error message)
"""
out = tmp_path / "out"
out.mkdir()
[blue_geotiff_path] = l1_ls8_folder.rgl... | 16,168 |
def get_files_from_path(path, recurse=False, full_path=True):
"""
Get Files_Path From Input Path
:param full_path: Full path flag
:param path: Input Path
:param recurse: Whether Recursive
:return: List of Files_Path
"""
files_path_list = []
if not os.path.exists(path):
return... | 16,169 |
def load(filename):
"""Load the labels and scores for Hits at K evaluation.
Loads labels and model predictions from files of the format:
Query \t Example \t Label \t Score
:param filename: Filename to load.
:return: list_of_list_of_labels, list_of_list_of_scores
"""
result_labels = []
re... | 16,170 |
def test_send_message_two_chat_ids(get_token: str, get_chat_id: int):
"""Отправка базового сообщения в два чата"""
test_name = inspect.currentframe().f_code.co_name
msg = f"test two chat_ids(2 msg to one chat id) send message. {test_name}"
two_tokens = [get_chat_id, get_chat_id]
client = Telegram(t... | 16,171 |
def expanding_sum(a, axis = 0, data = None, state = None):
"""
equivalent to pandas a.expanding().sum().
- works with np.arrays
- handles nan without forward filling.
- supports state parameters
:Parameters:
------------
a : array, pd.Series, pd.DataFrame or list/dict of these
... | 16,172 |
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an int... | 16,173 |
def read_configuration_from_file(path: str) -> Dict[str, Any]:
"""
Read the JSON file and return a dict.
:param path: path on file system
:return: raw, unchanged dict
"""
if os.path.isfile(path):
with open(path) as json_file:
return json.load(json_file)
else:
rais... | 16,174 |
def logwrap(
func: typing.Optional[typing.Callable] = None,
*,
log: logging.Logger = _log_wrap_shared.logger,
log_level: int = logging.DEBUG,
exc_level: int = logging.ERROR,
max_indent: int = 20,
spec: typing.Optional[typing.Callable] = None,
blacklisted_names: typing.Optional[typing.Lis... | 16,175 |
def sum_digits(number):
"""
Write a function named sum_digits which takes a number as input and
returns the sum of the absolute value of each of the number's decimal digits.
"""
return sum(int(n) for n in str(number) if n.isdigit()) | 16,176 |
def label(input, structure=None, output=None):
"""Labels features in an array.
Args:
input (cupy.ndarray): The input array.
structure (array_like or None): A structuring element that defines
feature connections. ```structure``` must be centersymmetric. If
None, structure... | 16,177 |
def test_two_related_w_a_wout_c(clean_db, family_with_trials, capsys):
"""Test two related experiments with --all."""
orion.core.cli.main(["status", "--all"])
captured = capsys.readouterr().out
expected = """\
test_double_exp-v1
==================
id status
-------------... | 16,178 |
def get_experiment_fn(nnObj,data_dir, num_gpus,variable_strategy,use_distortion_for_training=True):
"""Returns an Experiment function.
Experiments perform training on several workers in parallel,
in other words experiments know how to invoke train and eval in a sensible
fashion for distributed training. Argume... | 16,179 |
def calc_psnr(tar_img, ref_img):
""" Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
psnr : float
The PSNR metric.
References
----------
..... | 16,180 |
def celegans(path):
"""Load the neural network of the worm C. Elegans [@watts1998collective].
The neural network consists of around 300 neurons. Each connection
between neurons is associated with a weight (positive integer)
capturing the strength of the connection.
Args:
path: str.
Path to director... | 16,181 |
def glacier_wrap(
f: Callable[..., None],
enum_map: Dict[str, Dict[str, Any]],
) -> Callable[..., None]:
"""
Return the new function which is click-compatible
(has no enum signature arguments) from the arbitrary glacier compatible
function
"""
# Implemented the argument convert logic
... | 16,182 |
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, [])))... | 16,183 |
def group_by_time(df, col, by='day', fun='max', args=(), kwargs={}, index='categories'):
""" See <https://pandas.pydata.org/pandas-docs/stable/api.html#groupby>_ for the set of `fun` parameters
available. Examples are: 'count', 'max', 'min', 'median', etc
.. Tip:: Since Access inherits from Tim... | 16,184 |
def fetch(url, params=None, keepalive=False, requireValidCert=False,
debug=False):
"""
Fetches the desired @url using an HTTP GET request and appending and
@params provided in a dictionary.
If @keepalive is False, a fresh connection will be made for this request.
If @requireValidCert is True, then an exceptio... | 16,185 |
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
... | 16,186 |
def main(df: pyam.IamDataFrame) -> pyam.IamDataFrame:
"""Main function for validation and processing (for the ARIADNE-intern instance)"""
# load list of allowed scenario names
with open(path / "scenarios.yml", "r") as stream:
scenario_list = yaml.load(stream, Loader=yaml.FullLoader)
# validate... | 16,187 |
def test_upgrade_tz_noop(tz):
"""Tests that non-shim, non-pytz zones are unaffected by upgrade_tzinfo."""
actual = pds_helpers.upgrade_tzinfo(tz)
assert actual is tz | 16,188 |
def test_acq_func_set_acq_func_fails_wrong_acqfunc_name(ref_model_and_training_data):
"""
test that set_acq_func does not set acquisition function if wrong name chosen
"""
# load data and model
train_X = ref_model_and_training_data[0]
train_Y = ref_model_and_training_data[1]
# load pretrai... | 16,189 |
def huber_loss(x, delta=1.):
""" Standard Huber loss of parameter delta
https://en.wikipedia.org/wiki/Huber_loss
returns 0.5 * x^2 if |a| <= \delta
\delta * (|a| - 0.5 * \delta) o.w.
"""
if torch.abs(x) <= delta:
return 0.5 * (x ** 2)
else:
return delta * (torch.abs... | 16,190 |
def licenses_mapper(license, licenses, package): # NOQA
"""
Update package licensing and return package based on the `license` and
`licenses` values found in a package.
Licensing data structure has evolved over time and is a tad messy.
https://docs.npmjs.com/files/package.json#license
license(... | 16,191 |
def send_command(target, data):
"""sends a nudge api command"""
url = urljoin(settings.NUDGE_REMOTE_ADDRESS, target)
req = urllib2.Request(url, urllib.urlencode(data))
try:
return urllib2.urlopen(req)
except urllib2.HTTPError, e:
raise CommandException(
'An exception occu... | 16,192 |
def logistic_log_partial_ij(x_i, y_i, beta, j):
"""i is index of point and j is index of derivative"""
return (y_i - logistic(dot(x_i, beta))) * x_i[j] | 16,193 |
def expected_win(theirs, mine):
"""Compute the expected win rate of my strategy given theirs"""
assert abs(theirs.r + theirs.p + theirs.s - 1) < 0.001
assert abs(mine.r + mine.p + mine.s - 1) < 0.001
wins = theirs.r * mine.p + theirs.p * mine.s + theirs.s * mine.r
losses = theirs.r * mine.s + theirs... | 16,194 |
def rock_paper_scissors():
"""This function Handles the main operation of Rock, Paper, Scissors.
The User will input their choice against computer and win."""
player_points = 0
comp_points = 0
while player_points < 3 or comp_points < 3:
lst = ['rock', 'paper', 'scissors']
computer ... | 16,195 |
def get_first_where(data, compare):
"""
Gets first dictionary in list that fit to compare-dictionary.
:param data: List with dictionarys
:param compare: Dictionary with keys for comparison {'key';'expected value'}
:return: list with dictionarys that fit to compare
"""
l = get_all_where(data, compare)
i... | 16,196 |
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by ``megam`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise Va... | 16,197 |
def syntheticModeOn():
"""Sets the global syntheticMode flag to True."""
setGlobalVariable('syntheticModeFlag', True) | 16,198 |
def project_statistics(contributions):
"""Returns a dictionary containing statistics about all projects."""
projects = {}
for contribution in contributions:
# Don't count unreviewed contributions
if contribution["status"] == "unreviewed":
continue
project = contribution["... | 16,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.