id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
6,800 | google-research/batch-ppo | agents/scripts/utility.py | define_saver | def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.... | python | def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.... | [
"def",
"define_saver",
"(",
"exclude",
"=",
"None",
")",
":",
"variables",
"=",
"[",
"]",
"exclude",
"=",
"exclude",
"or",
"[",
"]",
"exclude",
"=",
"[",
"re",
".",
"compile",
"(",
"regex",
")",
"for",
"regex",
"in",
"exclude",
"]",
"for",
"variable"... | Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object. | [
"Create",
"a",
"saver",
"for",
"the",
"variables",
"we",
"want",
"to",
"checkpoint",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L80-L97 |
6,801 | google-research/batch-ppo | agents/scripts/utility.py | initialize_variables | def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what ch... | python | def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what ch... | [
"def",
"initialize_variables",
"(",
"sess",
",",
"saver",
",",
"logdir",
",",
"checkpoint",
"=",
"None",
",",
"resume",
"=",
"None",
")",
":",
"sess",
".",
"run",
"(",
"tf",
".",
"group",
"(",
"tf",
".",
"local_variables_initializer",
"(",
")",
",",
"t... | Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recov... | [
"Initialize",
"or",
"restore",
"variables",
"from",
"a",
"checkpoint",
"if",
"available",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L100-L129 |
6,802 | google-research/batch-ppo | agents/scripts/utility.py | save_config | def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkp... | python | def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkp... | [
"def",
"save_config",
"(",
"config",
",",
"logdir",
"=",
"None",
")",
":",
"if",
"logdir",
":",
"with",
"config",
".",
"unlocked",
":",
"config",
".",
"logdir",
"=",
"logdir",
"message",
"=",
"'Start a new run and write summaries and checkpoints to {}.'",
"tf",
... | Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configu... | [
"Save",
"a",
"new",
"configuration",
"by",
"name",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L132-L159 |
6,803 | google-research/batch-ppo | agents/scripts/utility.py | load_config | def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""... | python | def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""... | [
"def",
"load_config",
"(",
"logdir",
")",
":",
"# pylint: disable=missing-raises-doc",
"config_path",
"=",
"logdir",
"and",
"os",
".",
"path",
".",
"join",
"(",
"logdir",
",",
"'config.yaml'",
")",
"if",
"not",
"config_path",
"or",
"not",
"tf",
".",
"gfile",
... | Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object. | [
"Load",
"a",
"configuration",
"from",
"the",
"log",
"directory",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L162-L185 |
6,804 | google-research/batch-ppo | agents/scripts/utility.py | set_up_logging | def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False | python | def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False | [
"def",
"set_up_logging",
"(",
")",
":",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf",
".",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'tensorflow'",
")",
".",
"propagate",
"=",
"False"
] | Configure the TensorFlow logger. | [
"Configure",
"the",
"TensorFlow",
"logger",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L188-L191 |
6,805 | google-research/batch-ppo | agents/scripts/visualize.py | _define_loop | def _define_loop(graph, eval_steps):
"""Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
None, graph.step, graph.should_log, graph.do_report, ... | python | def _define_loop(graph, eval_steps):
"""Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
None, graph.step, graph.should_log, graph.do_report, ... | [
"def",
"_define_loop",
"(",
"graph",
",",
"eval_steps",
")",
":",
"loop",
"=",
"tools",
".",
"Loop",
"(",
"None",
",",
"graph",
".",
"step",
",",
"graph",
".",
"should_log",
",",
"graph",
".",
"do_report",
",",
"graph",
".",
"force_reset",
")",
"loop",... | Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object. | [
"Create",
"and",
"configure",
"an",
"evaluation",
"loop",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L74-L92 |
6,806 | google-research/batch-ppo | agents/scripts/visualize.py | visualize | def visualize(
logdir, outdir, num_agents, num_episodes, checkpoint=None,
env_processes=True):
"""Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate... | python | def visualize(
logdir, outdir, num_agents, num_episodes, checkpoint=None,
env_processes=True):
"""Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate... | [
"def",
"visualize",
"(",
"logdir",
",",
"outdir",
",",
"num_agents",
",",
"num_episodes",
",",
"checkpoint",
"=",
"None",
",",
"env_processes",
"=",
"True",
")",
":",
"config",
"=",
"utility",
".",
"load_config",
"(",
"logdir",
")",
"with",
"tf",
".",
"d... | Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate in parallel.
num_episodes: Total number of episodes to simulate.
checkpoint: Checkpoint name to loa... | [
"Recover",
"checkpoint",
"and",
"render",
"videos",
"from",
"it",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L95-L126 |
6,807 | google-research/batch-ppo | agents/scripts/visualize.py | main | def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
... | python | def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
... | [
"def",
"main",
"(",
"_",
")",
":",
"utility",
".",
"set_up_logging",
"(",
")",
"if",
"not",
"FLAGS",
".",
"logdir",
"or",
"not",
"FLAGS",
".",
"outdir",
":",
"raise",
"KeyError",
"(",
"'You must specify logging and outdirs directories.'",
")",
"FLAGS",
".",
... | Load a trained algorithm and render videos. | [
"Load",
"a",
"trained",
"algorithm",
"and",
"render",
"videos",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L129-L138 |
6,808 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | reinit_nested_vars | def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
... | python | def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
... | [
"def",
"reinit_nested_vars",
"(",
"variables",
",",
"indices",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tf",
".",
"group",
"(",
"*",
"[",
"reinit_nested_vars",
"(",
"variable",... | Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation. | [
"Reset",
"all",
"variables",
"in",
"a",
"nested",
"tuple",
"to",
"zeros",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L28-L45 |
6,809 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | assign_nested_vars | def assign_nested_vars(variables, tensors, indices=None):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Opera... | python | def assign_nested_vars(variables, tensors, indices=None):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Opera... | [
"def",
"assign_nested_vars",
"(",
"variables",
",",
"tensors",
",",
"indices",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tf",
".",
"group",
"(",
"*",
"[",
"assign_nested_vars",
... | Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Operation. | [
"Assign",
"tensors",
"to",
"matching",
"nested",
"tuple",
"of",
"variables",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L48-L66 |
6,810 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | discounted_return | def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(... | python | def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(... | [
"def",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"discount",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
"... | Discounted Monte-Carlo returns. | [
"Discounted",
"Monte",
"-",
"Carlo",
"returns",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L69-L77 |
6,811 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | fixed_step_return | def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.co... | python | def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.co... | [
"def",
"fixed_step_return",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"window",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(... | N-step discounted return. | [
"N",
"-",
"step",
"discounted",
"return",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L80-L91 |
6,812 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | lambda_return | def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stac... | python | def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stac... | [
"def",
"lambda_return",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"lambda_",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
... | TD-lambda returns. | [
"TD",
"-",
"lambda",
"returns",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L94-L105 |
6,813 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | lambda_advantage | def lambda_advantage(reward, value, length, discount, gae_lambda):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discoun... | python | def lambda_advantage(reward, value, length, discount, gae_lambda):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discoun... | [
"def",
"lambda_advantage",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"gae_lambda",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
... | Generalized Advantage Estimation. | [
"Generalized",
"Advantage",
"Estimation",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L108-L118 |
6,814 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | available_gpus | def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU'] | python | def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU'] | [
"def",
"available_gpus",
"(",
")",
":",
"local_device_protos",
"=",
"device_lib",
".",
"list_local_devices",
"(",
")",
"return",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"local_device_protos",
"if",
"x",
".",
"device_type",
"==",
"'GPU'",
"]"
] | List of GPU device names detected by TensorFlow. | [
"List",
"of",
"GPU",
"device",
"names",
"detected",
"by",
"TensorFlow",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L121-L124 |
6,815 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | gradient_summaries | def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping s... | python | def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping s... | [
"def",
"gradient_summaries",
"(",
"grad_vars",
",",
"groups",
"=",
"None",
",",
"scope",
"=",
"'gradients'",
")",
":",
"groups",
"=",
"groups",
"or",
"{",
"r'all'",
":",
"r'.*'",
"}",
"grouped",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"... | Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summ... | [
"Create",
"histogram",
"summaries",
"of",
"the",
"gradient",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L127-L157 |
6,816 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | variable_summaries | def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scop... | python | def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scop... | [
"def",
"variable_summaries",
"(",
"vars_",
",",
"groups",
"=",
"None",
",",
"scope",
"=",
"'weights'",
")",
":",
"groups",
"=",
"groups",
"or",
"{",
"r'all'",
":",
"r'.*'",
"}",
"grouped",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
... | Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor. | [
"Create",
"histogram",
"summaries",
"for",
"the",
"provided",
"variables",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L160-L189 |
6,817 | google-research/batch-ppo | agents/algorithms/ppo/utility.py | set_dimension | def set_dimension(tensor, axis, value):
"""Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specifi... | python | def set_dimension(tensor, axis, value):
"""Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specifi... | [
"def",
"set_dimension",
"(",
"tensor",
",",
"axis",
",",
"value",
")",
":",
"shape",
"=",
"tensor",
".",
"shape",
".",
"as_list",
"(",
")",
"if",
"shape",
"[",
"axis",
"]",
"not",
"in",
"(",
"value",
",",
"None",
")",
":",
"message",
"=",
"'Cannot ... | Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specified. | [
"Set",
"the",
"length",
"of",
"a",
"tensor",
"along",
"the",
"specified",
"dimension",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L192-L208 |
6,818 | google-research/batch-ppo | agents/scripts/configs.py | default | def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Environment
normalize_ranges = True
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', val... | python | def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Environment
normalize_ranges = True
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', val... | [
"def",
"default",
"(",
")",
":",
"# General",
"algorithm",
"=",
"algorithms",
".",
"PPO",
"num_agents",
"=",
"30",
"eval_episodes",
"=",
"30",
"use_gpu",
"=",
"False",
"# Environment",
"normalize_ranges",
"=",
"True",
"# Network",
"network",
"=",
"networks",
"... | Default configuration for PPO. | [
"Default",
"configuration",
"for",
"PPO",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L29-L57 |
6,819 | google-research/batch-ppo | agents/scripts/configs.py | pendulum | def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
# Optimization
batch_size = 20
chunk_length = 50
return locals() | python | def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
# Optimization
batch_size = 20
chunk_length = 50
return locals() | [
"def",
"pendulum",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'Pendulum-v0'",
"max_length",
"=",
"200",
"steps",
"=",
"1e6",
"# 1M",
"# Optimization",
"batch_size",
"=",
"20",
"chunk_lengt... | Configuration for the pendulum classic control task. | [
"Configuration",
"for",
"the",
"pendulum",
"classic",
"control",
"task",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L60-L70 |
6,820 | google-research/batch-ppo | agents/scripts/configs.py | cartpole | def cartpole():
"""Configuration for the cart pole classic control task."""
locals().update(default())
# Environment
env = 'CartPole-v1'
max_length = 500
steps = 2e5 # 200k
normalize_ranges = False # The env reports wrong ranges.
# Network
network = networks.feed_forward_categorical
return locals(... | python | def cartpole():
"""Configuration for the cart pole classic control task."""
locals().update(default())
# Environment
env = 'CartPole-v1'
max_length = 500
steps = 2e5 # 200k
normalize_ranges = False # The env reports wrong ranges.
# Network
network = networks.feed_forward_categorical
return locals(... | [
"def",
"cartpole",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'CartPole-v1'",
"max_length",
"=",
"500",
"steps",
"=",
"2e5",
"# 200k",
"normalize_ranges",
"=",
"False",
"# The env reports wr... | Configuration for the cart pole classic control task. | [
"Configuration",
"for",
"the",
"cart",
"pole",
"classic",
"control",
"task",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L73-L83 |
6,821 | google-research/batch-ppo | agents/scripts/configs.py | reacher | def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals() | python | def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals() | [
"def",
"reacher",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'Reacher-v2'",
"max_length",
"=",
"1000",
"steps",
"=",
"5e6",
"# 5M",
"discount",
"=",
"0.985",
"update_every",
"=",
"60",
... | Configuration for MuJoCo's reacher task. | [
"Configuration",
"for",
"MuJoCo",
"s",
"reacher",
"task",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L86-L95 |
6,822 | google-research/batch-ppo | agents/scripts/configs.py | bullet_ant | def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals() | python | def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals() | [
"def",
"bullet_ant",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"import",
"pybullet_envs",
"# noqa pylint: disable=unused-import",
"env",
"=",
"'AntBulletEnv-v0'",
"max_length",
"=",
"1000",
"steps",
"=",
"3... | Configuration for PyBullet's ant task. | [
"Configuration",
"for",
"PyBullet",
"s",
"ant",
"task",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L151-L160 |
6,823 | google-research/batch-ppo | agents/tools/batch_env.py | BatchEnv.step | def step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
for index, (env, action) in enumer... | python | def step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
for index, (env, action) in enumer... | [
"def",
"step",
"(",
"self",
",",
"actions",
")",
":",
"for",
"index",
",",
"(",
"env",
",",
"action",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"_envs",
",",
"actions",
")",
")",
":",
"if",
"not",
"env",
".",
"action_space",
".",
"con... | Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags. | [
"Forward",
"a",
"batch",
"of",
"actions",
"to",
"the",
"wrapped",
"environments",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/batch_env.py#L69-L99 |
6,824 | google-research/batch-ppo | agents/tools/wrappers.py | ExternalProcess.call | def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that block... | python | def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that block... | [
"def",
"call",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"name",
",",
"args",
",",
"kwargs",
"self",
".",
"_conn",
".",
"send",
"(",
"(",
"self",
".",
"_CALL",
",",
"payload",
")",
")",
"retu... | Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called. | [
"Asynchronously",
"call",
"a",
"method",
"of",
"the",
"external",
"environment",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L363-L376 |
6,825 | google-research/batch-ppo | agents/tools/wrappers.py | ExternalProcess.close | def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join() | python | def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join() | [
"def",
"close",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_conn",
".",
"send",
"(",
"(",
"self",
".",
"_CLOSE",
",",
"None",
")",
")",
"self",
".",
"_conn",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"# The connection was already closed."... | Send a close message to the external process and join it. | [
"Send",
"a",
"close",
"message",
"to",
"the",
"external",
"process",
"and",
"join",
"it",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L378-L386 |
6,826 | google-research/batch-ppo | agents/tools/wrappers.py | ExternalProcess.step | def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.... | python | def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.... | [
"def",
"step",
"(",
"self",
",",
"action",
",",
"blocking",
"=",
"True",
")",
":",
"promise",
"=",
"self",
".",
"call",
"(",
"'step'",
",",
"action",
")",
"if",
"blocking",
":",
"return",
"promise",
"(",
")",
"else",
":",
"return",
"promise"
] | Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple. | [
"Step",
"the",
"environment",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L388-L403 |
6,827 | google-research/batch-ppo | agents/tools/wrappers.py | ExternalProcess._receive | def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = sel... | python | def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = sel... | [
"def",
"_receive",
"(",
"self",
")",
":",
"message",
",",
"payload",
"=",
"self",
".",
"_conn",
".",
"recv",
"(",
")",
"# Re-raise exceptions in the main process.",
"if",
"message",
"==",
"self",
".",
"_EXCEPTION",
":",
"stacktrace",
"=",
"payload",
"raise",
... | Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message. | [
"Wait",
"for",
"a",
"message",
"from",
"the",
"worker",
"process",
"and",
"return",
"its",
"payload",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L421-L438 |
6,828 | google-research/batch-ppo | agents/tools/wrappers.py | ExternalProcess._worker | def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
... | python | def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
... | [
"def",
"_worker",
"(",
"self",
",",
"constructor",
",",
"conn",
")",
":",
"try",
":",
"env",
"=",
"constructor",
"(",
")",
"while",
"True",
":",
"try",
":",
"# Only block for short times to have keyboard exceptions be raised.",
"if",
"not",
"conn",
".",
"poll",
... | The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type. | [
"The",
"process",
"waits",
"for",
"actions",
"and",
"sends",
"back",
"environment",
"results",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L440-L478 |
6,829 | google-research/batch-ppo | agents/tools/wrappers.py | ConvertTo32Bit.step | def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._en... | python | def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._en... | [
"def",
"step",
"(",
"self",
",",
"action",
")",
":",
"observ",
",",
"reward",
",",
"done",
",",
"info",
"=",
"self",
".",
"_env",
".",
"step",
"(",
"action",
")",
"observ",
"=",
"self",
".",
"_convert_observ",
"(",
"observ",
")",
"reward",
"=",
"se... | Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object. | [
"Forward",
"action",
"to",
"the",
"wrapped",
"environment",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L503-L518 |
6,830 | google-research/batch-ppo | agents/tools/wrappers.py | ConvertTo32Bit._convert_observ | def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError... | python | def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError... | [
"def",
"_convert_observ",
"(",
"self",
",",
"observ",
")",
":",
"if",
"not",
"np",
".",
"isfinite",
"(",
"observ",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Infinite observation encountered.'",
")",
"if",
"observ",
".",
"dtype",
"==",
... | Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type. | [
"Convert",
"the",
"observation",
"to",
"32",
"bits",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L530-L548 |
6,831 | google-research/batch-ppo | agents/tools/wrappers.py | ConvertTo32Bit._convert_reward | def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward en... | python | def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward en... | [
"def",
"_convert_reward",
"(",
"self",
",",
"reward",
")",
":",
"if",
"not",
"np",
".",
"isfinite",
"(",
"reward",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Infinite reward encountered.'",
")",
"return",
"np",
".",
"array",
"(",
"rewa... | Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type. | [
"Convert",
"the",
"reward",
"to",
"32",
"bits",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L550-L564 |
6,832 | google-research/batch-ppo | agents/tools/streaming_mean.py | StreamingMean.value | def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype) | python | def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype) | [
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"self",
".",
"_dtype",
")"
] | The current value of the mean. | [
"The",
"current",
"value",
"of",
"the",
"mean",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L42-L44 |
6,833 | google-research/batch-ppo | agents/tools/streaming_mean.py | StreamingMean.submit | def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.ass... | python | def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.ass... | [
"def",
"submit",
"(",
"self",
",",
"value",
")",
":",
"# Add a batch dimension if necessary.",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_sum",
".",
"shape",
".",
"ndims",
":",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"... | Submit a single or batch tensor to refine the streaming mean. | [
"Submit",
"a",
"single",
"or",
"batch",
"tensor",
"to",
"refine",
"the",
"streaming",
"mean",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L51-L58 |
6,834 | google-research/batch-ppo | agents/tools/streaming_mean.py | StreamingMean.clear | def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_depend... | python | def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_depend... | [
"def",
"clear",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"self",
".",
"_dtype",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"value",
"]",
")",
":",
"reset_value... | Return the mean estimate and reset the streaming statistics. | [
"Return",
"the",
"mean",
"estimate",
"and",
"reset",
"the",
"streaming",
"statistics",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L60-L67 |
6,835 | google-research/batch-ppo | agents/tools/nested.py | zip_ | def zip_(*structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*... | python | def zip_(*structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*... | [
"def",
"zip_",
"(",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop",
"(",
"'flatten'",
",",
"False",
... | Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tu... | [
"Combine",
"corresponding",
"elements",
"in",
"multiple",
"nested",
"structure",
"to",
"tuples",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L29-L50 |
6,836 | google-research/batch-ppo | agents/tools/nested.py | map_ | def map_(function, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The neste... | python | def map_(function, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The neste... | [
"def",
"map_",
"(",
"function",
",",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop",
"(",
"'flatten'"... | Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The funct... | [
"Apply",
"a",
"function",
"to",
"every",
"element",
"in",
"a",
"nested",
"structure",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L53-L98 |
6,837 | google-research/batch-ppo | agents/tools/nested.py | flatten_ | def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Fla... | python | def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Fla... | [
"def",
"flatten_",
"(",
"structure",
")",
":",
"if",
"isinstance",
"(",
"structure",
",",
"dict",
")",
":",
"if",
"structure",
":",
"structure",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"structure",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",... | Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple. | [
"Combine",
"all",
"leaves",
"of",
"a",
"nested",
"structure",
"into",
"a",
"tuple",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L101-L125 |
6,838 | google-research/batch-ppo | agents/tools/nested.py | filter_ | def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to correspo... | python | def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to correspo... | [
"def",
"filter_",
"(",
"predicate",
",",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop... | Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predica... | [
"Select",
"elements",
"of",
"a",
"nested",
"structure",
"based",
"on",
"a",
"predicate",
"function",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L128-L192 |
6,839 | google-research/batch-ppo | agents/tools/loop.py | Loop.add_phase | def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the... | python | def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the... | [
"def",
"add_phase",
"(",
"self",
",",
"name",
",",
"done",
",",
"score",
",",
"summary",
",",
"steps",
",",
"report_every",
"=",
"None",
",",
"log_every",
"=",
"None",
",",
"checkpoint_every",
"=",
"None",
",",
"feed",
"=",
"None",
")",
":",
"done",
... | Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors c... | [
"Add",
"a",
"phase",
"to",
"the",
"loop",
"protocol",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L66-L106 |
6,840 | google-research/batch-ppo | agents/tools/loop.py | Loop.run | def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to... | python | def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to... | [
"def",
"run",
"(",
"self",
",",
"sess",
",",
"saver",
",",
"max_step",
"=",
"None",
")",
":",
"global_step",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"_step",
")",
"steps_made",
"=",
"1",
"while",
"True",
":",
"if",
"max_step",
"and",
"global_step"... | Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used ... | [
"Run",
"the",
"loop",
"schedule",
"for",
"a",
"specified",
"number",
"of",
"steps",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L108-L152 |
6,841 | google-research/batch-ppo | agents/tools/loop.py | Loop._is_every_steps | def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event sh... | python | def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event sh... | [
"def",
"_is_every_steps",
"(",
"self",
",",
"phase_step",
",",
"batch",
",",
"every",
")",
":",
"if",
"not",
"every",
":",
"return",
"False",
"covered_steps",
"=",
"range",
"(",
"phase_step",
",",
"phase_step",
"+",
"batch",
")",
"return",
"any",
"(",
"(... | Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen. | [
"Determine",
"whether",
"a",
"periodic",
"event",
"should",
"happen",
"at",
"this",
"step",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L154-L168 |
6,842 | google-research/batch-ppo | agents/tools/loop.py | Loop._find_current_phase | def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch numbe... | python | def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch numbe... | [
"def",
"_find_current_phase",
"(",
"self",
",",
"global_step",
")",
":",
"epoch_size",
"=",
"sum",
"(",
"phase",
".",
"steps",
"for",
"phase",
"in",
"self",
".",
"_phases",
")",
"epoch",
"=",
"int",
"(",
"global_step",
"//",
"epoch_size",
")",
"steps_in",
... | Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch. | [
"Determine",
"the",
"current",
"phase",
"based",
"on",
"the",
"global",
"step",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L170-L187 |
6,843 | google-research/batch-ppo | agents/tools/loop.py | Loop._define_step | def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding s... | python | def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding s... | [
"def",
"_define_step",
"(",
"self",
",",
"done",
",",
"score",
",",
"summary",
")",
":",
"if",
"done",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"done",
"=",
"done",
"[",
"None",
"]",
"if",
"score",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
... | Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
R... | [
"Combine",
"operations",
"of",
"a",
"phase",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L189-L217 |
6,844 | google-research/batch-ppo | agents/tools/loop.py | Loop._store_checkpoint | def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of th... | python | def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of th... | [
"def",
"_store_checkpoint",
"(",
"self",
",",
"sess",
",",
"saver",
",",
"global_step",
")",
":",
"if",
"not",
"self",
".",
"_logdir",
"or",
"not",
"saver",
":",
"return",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"self",
".",
"_logdir",
")",
"filename... | Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name. | [
"Store",
"a",
"checkpoint",
"if",
"a",
"log",
"directory",
"was",
"provided",
"to",
"the",
"constructor",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L219-L233 |
6,845 | google-research/batch-ppo | agents/scripts/train.py | _define_loop | def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoc... | python | def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoc... | [
"def",
"_define_loop",
"(",
"graph",
",",
"logdir",
",",
"train_steps",
",",
"eval_steps",
")",
":",
"loop",
"=",
"tools",
".",
"Loop",
"(",
"logdir",
",",
"graph",
".",
"step",
",",
"graph",
".",
"should_log",
",",
"graph",
".",
"do_report",
",",
"gra... | Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Ret... | [
"Create",
"and",
"configure",
"a",
"training",
"loop",
"with",
"training",
"and",
"evaluation",
"phases",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L70-L97 |
6,846 | google-research/batch-ppo | agents/scripts/train.py | train | def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: ... | python | def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: ... | [
"def",
"train",
"(",
"config",
",",
"env_processes",
")",
":",
"tf",
".",
"reset_default_graph",
"(",
")",
"if",
"config",
".",
"update_every",
"%",
"config",
".",
"num_agents",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"'Number of agents should divide epis... | Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separat... | [
"Training",
"and",
"evaluation",
"entry",
"point",
"yielding",
"scores",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L100-L138 |
6,847 | google-research/batch-ppo | agents/scripts/train.py | main | def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
... | python | def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
... | [
"def",
"main",
"(",
"_",
")",
":",
"utility",
".",
"set_up_logging",
"(",
")",
"if",
"not",
"FLAGS",
".",
"config",
":",
"raise",
"KeyError",
"(",
"'You must specify a configuration.'",
")",
"logdir",
"=",
"FLAGS",
".",
"logdir",
"and",
"os",
".",
"path",
... | Create or load configuration and launch the trainer. | [
"Create",
"or",
"load",
"configuration",
"and",
"launch",
"the",
"trainer",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L141-L154 |
6,848 | google-research/batch-ppo | agents/parts/iterate_sequences.py | iterate_sequences | def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
... | python | def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
... | [
"def",
"iterate_sequences",
"(",
"consumer_fn",
",",
"output_template",
",",
"sequences",
",",
"length",
",",
"chunk_length",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"num_epochs",
"=",
"1",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"not",
"... | Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
... | [
"Iterate",
"over",
"batches",
"of",
"chunks",
"of",
"sequences",
"for",
"multiple",
"epochs",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L26-L74 |
6,849 | google-research/batch-ppo | agents/parts/iterate_sequences.py | chunk_sequence | def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with ti... | python | def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with ti... | [
"def",
"chunk_sequence",
"(",
"sequence",
",",
"chunk_length",
"=",
"200",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"'length'",
"in",
"sequence",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"else",
":",
"length",
"=",
"tf",
... | Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be sp... | [
"Split",
"a",
"nested",
"dict",
"of",
"sequence",
"tensors",
"into",
"a",
"batch",
"of",
"chunks",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L77-L110 |
6,850 | google-research/batch-ppo | agents/parts/iterate_sequences.py | remove_padding | def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimensi... | python | def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimensi... | [
"def",
"remove_padding",
"(",
"sequence",
")",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"sequence",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"tensor",
":",
"tensor",
"[",
":",
"length",
"]",
",",
"sequence",
")",... | Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of ... | [
"Selects",
"the",
"used",
"frames",
"of",
"a",
"sequence",
"up",
"to",
"its",
"length",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L113-L128 |
6,851 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize.transform | def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.na... | python | def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.na... | [
"def",
"transform",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/transform'",
")",
":",
"no_batch_dim",
"=",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".... | Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor. | [
"Normalize",
"a",
"single",
"or",
"batch",
"tensor",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L50-L79 |
6,852 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize.update | def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
... | python | def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
... | [
"def",
"update",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/update'",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
":",
... | Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor. | [
"Update",
"the",
"mean",
"and",
"variance",
"estimates",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L81-L108 |
6,853 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize.reset | def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self... | python | def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self... | [
"def",
"reset",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/reset'",
")",
":",
"return",
"tf",
".",
"group",
"(",
"self",
".",
"_count",
".",
"assign",
"(",
"0",
")",
",",
"self",
".",
"_mean",
".",
... | Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation. | [
"Reset",
"the",
"estimates",
"of",
"mean",
"and",
"variance",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L110-L122 |
6,854 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize.summary | def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._coun... | python | def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._coun... | [
"def",
"summary",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/summary'",
")",
":",
"mean_summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"0",
",",
"lambda",
":",
"self",
".",
"_summa... | Summary string of mean and standard deviation.
Returns:
Summary tensor. | [
"Summary",
"string",
"of",
"mean",
"and",
"standard",
"deviation",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L124-L135 |
6,855 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize._std | def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(... | python | def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(... | [
"def",
"_std",
"(",
"self",
")",
":",
"variance",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_var_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
"-",
"1",
",",
"tf",
".",
"float32"... | Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance. | [
"Computes",
"the",
"current",
"estimate",
"of",
"the",
"standard",
"deviation",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L137-L153 |
6,856 | google-research/batch-ppo | agents/parts/normalize.py | StreamingNormalize._summary | def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else... | python | def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else... | [
"def",
"_summary",
"(",
"self",
",",
"name",
",",
"tensor",
")",
":",
"if",
"tensor",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"return",
"tf",
".",
"summary",
".",
"scalar",
"(",
"name",
",",
"tensor",
")",
"else",
":",
"return",
"tf",
".",
"s... | Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor. | [
"Create",
"a",
"scalar",
"or",
"histogram",
"summary",
"matching",
"the",
"rank",
"of",
"the",
"tensor",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L155-L168 |
6,857 | google-research/batch-ppo | agents/parts/memory.py | EpisodeMemory.length | def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows) | python | def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows) | [
"def",
"length",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"return",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"r... | Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths. | [
"Tensor",
"holding",
"the",
"current",
"length",
"of",
"episodes",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L52-L62 |
6,858 | google-research/batch-ppo | agents/parts/memory.py | EpisodeMemory.append | def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows ... | python | def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows ... | [
"def",
"append",
"(",
"self",
",",
"transitions",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==... | Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation. | [
"Append",
"a",
"batch",
"of",
"transitions",
"to",
"rows",
"of",
"the",
"memory",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L64-L92 |
6,859 | google-research/batch-ppo | agents/parts/memory.py | EpisodeMemory.replace | def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(se... | python | def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(se... | [
"def",
"replace",
"(",
"self",
",",
"episodes",
",",
"length",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",... | Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation. | [
"Replace",
"full",
"episodes",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L94-L117 |
6,860 | google-research/batch-ppo | agents/parts/memory.py | EpisodeMemory.data | def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batc... | python | def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batc... | [
"def",
"data",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"episode",
"... | Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a... | [
"Access",
"a",
"batch",
"of",
"episodes",
"from",
"the",
"memory",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L119-L136 |
6,861 | google-research/batch-ppo | agents/parts/memory.py | EpisodeMemory.clear | def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.rang... | python | def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.rang... | [
"def",
"clear",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"return",
"... | Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation. | [
"Reset",
"episodes",
"in",
"the",
"memory",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L138-L152 |
6,862 | google-research/batch-ppo | agents/tools/in_graph_env.py | InGraphEnv._parse_shape | def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(s... | python | def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(s... | [
"def",
"_parse_shape",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"(",
")",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",
")",
":... | Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple. | [
"Get",
"a",
"tensor",
"shape",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L134-L150 |
6,863 | google-research/batch-ppo | agents/tools/in_graph_env.py | InGraphEnv._parse_dtype | def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
... | python | def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
... | [
"def",
"_parse_dtype",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"tf",
".",
"int32",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",... | Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type. | [
"Get",
"a",
"tensor",
"dtype",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L152-L168 |
6,864 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO.begin_episode | def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()... | python | def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()... | [
"def",
"begin_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'begin_episode/'",
")",
":",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"reset_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
... | Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor. | [
"Reset",
"the",
"recurrent",
"states",
"and",
"stored",
"episode",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L81-L98 |
6,865 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO.perform | def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor... | python | def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor... | [
"def",
"perform",
"(",
"self",
",",
"agent_indices",
",",
"observ",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'perform/'",
")",
":",
"observ",
"=",
"self",
".",
"_observ_filter",
".",
"transform",
"(",
"observ",
")",
"if",
"self",
".",
"_last_stat... | Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor. | [
"Compute",
"batch",
"of",
"actions",
"and",
"a",
"summary",
"for",
"a",
"batch",
"of",
"observation",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L100-L144 |
6,866 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO.experience | def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
return... | python | def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
return... | [
"def",
"experience",
"(",
"self",
",",
"agent_indices",
",",
"observ",
",",
"action",
",",
"reward",
",",
"unused_done",
",",
"unused_nextob",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'experience/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"s... | Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
... | [
"Process",
"the",
"transition",
"tuple",
"of",
"the",
"current",
"step",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L146-L170 |
6,867 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO.end_episode | def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is ... | python | def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is ... | [
"def",
"end_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'end_episode/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"lambda",
":",
"self",
".",
"_define_end_episode",
"(",... | Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
... | [
"Add",
"episodes",
"to",
"the",
"memory",
"and",
"perform",
"update",
"steps",
"if",
"memory",
"is",
"full",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L199-L216 |
6,868 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._initialize_policy | def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distributio... | python | def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distributio... | [
"def",
"_initialize_policy",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"network",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"_config",
".",
"network",
",",
... | Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of ... | [
"Initialize",
"the",
"policy",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L218-L250 |
6,869 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._initialize_memory | def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episod... | python | def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episod... | [
"def",
"_initialize_memory",
"(",
"self",
",",
"policy_params",
")",
":",
"# We store observation, action, policy parameters, and reward.",
"template",
"=",
"(",
"self",
".",
"_batch_env",
".",
"observ",
"[",
"0",
"]",
",",
"self",
".",
"_batch_env",
".",
"action",
... | Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in... | [
"Initialize",
"temporary",
"and",
"permanent",
"memory",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L252-L275 |
6,870 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._training | def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu... | python | def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu... | [
"def",
"_training",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'training'",
")",
":",
"assert_full",
"=",
"tf",
".",
"assert_e... | Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor. | [
"Perform",
"multiple",
"training",
"iterations",
"of",
"both",
"policy",
"and",
"value",
"baseline",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L294-L332 |
6,871 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._perform_update_steps | def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the... | python | def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the... | [
"def",
"_perform_update_steps",
"(",
"self",
",",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"length",
")",
":",
"return_",
"=",
"utility",
".",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"self",
".",
"_config",
".",
... | Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
actio... | [
"Perform",
"multiple",
"update",
"steps",
"of",
"value",
"function",
"and",
"policy",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L334-L380 |
6,872 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._update_step | def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages... | python | def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages... | [
"def",
"_update_step",
"(",
"self",
",",
"sequence",
")",
":",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"advantage",
"=",
"sequence",
"[",
"'sequence'",
"]",
"length",
"=",
"sequence",
"[",
"'length'",
"]",
"old_policy",
"=",
... | Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of... | [
"Compute",
"the",
"current",
"combined",
"loss",
"and",
"perform",
"a",
"gradient",
"update",
"step",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L382-L415 |
6,873 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._value_loss | def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
... | python | def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
... | [
"def",
"_value_loss",
"(",
"self",
",",
"observ",
",",
"reward",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'value_loss'",
")",
":",
"value",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
".",
"value",
"return_",... | Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of s... | [
"Compute",
"the",
"loss",
"function",
"for",
"the",
"value",
"baseline",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L417-L441 |
6,874 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._policy_loss | def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the pol... | python | def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the pol... | [
"def",
"_policy_loss",
"(",
"self",
",",
"old_policy",
",",
"policy",
",",
"action",
",",
"advantage",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'policy_loss'",
")",
":",
"kl",
"=",
"tf",
".",
"contrib",
".",
"distributions",
".",
... | Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, ... | [
"Compute",
"the",
"policy",
"loss",
"composed",
"of",
"multiple",
"components",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L443-L503 |
6,875 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._adjust_penalty | def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target ... | python | def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target ... | [
"def",
"_adjust_penalty",
"(",
"self",
",",
"observ",
",",
"old_policy_params",
",",
"length",
")",
":",
"old_policy",
"=",
"self",
".",
"_policy_type",
"(",
"*",
"*",
"old_policy_params",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'adjust_penalty'",
")",
"... | Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observatio... | [
"Adjust",
"the",
"KL",
"policy",
"between",
"the",
"behavioral",
"and",
"current",
"policy",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L505-L544 |
6,876 | google-research/batch-ppo | agents/algorithms/ppo/ppo.py | PPO._mask | def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
l... | python | def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
l... | [
"def",
"_mask",
"(",
"self",
",",
"tensor",
",",
"length",
",",
"padding_value",
"=",
"0",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'mask'",
")",
":",
"range_",
"=",
"tf",
".",
"range",
"(",
"tensor",
".",
"shape",
"[",
"1",
"]",
".",
"va... | Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Va... | [
"Set",
"padding",
"elements",
"of",
"a",
"batch",
"of",
"sequences",
"to",
"a",
"constant",
"."
] | 3d09705977bae4e7c3eb20339a3b384d2a5531e4 | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L546-L568 |
6,877 | celery/cell | cell/workflow/entities.py | Server.main | def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, messa... | python | def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, messa... | [
"def",
"main",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"start",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"while",
"1",
":",
"body",
",",
"message",
"=",
"yield",
"self",
".",
"receive",
... | Implement the actor main loop by waiting forever for messages. | [
"Implement",
"the",
"actor",
"main",
"loop",
"by",
"waiting",
"forever",
"for",
"messages",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/workflow/entities.py#L73-L82 |
6,878 | celery/cell | cell/actors.py | Actor.send | def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
... | python | def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
... | [
"def",
"send",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"to",
"=",
"None",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"to",
"is",
"None",
":",
"to",
"=",
"self",
".",
"routing_key",
"r",
"=",
"se... | Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j | [
"Call",
"method",
"on",
"agent",
"listening",
"to",
"routing_key",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L259-L275 |
6,879 | celery/cell | cell/actors.py | Actor.throw | def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
... | python | def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
... | [
"def",
"throw",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"RR",
... | Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply. | [
"Call",
"method",
"on",
"one",
"of",
"the",
"agents",
"in",
"round",
"robin",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L277-L290 |
6,880 | celery/cell | cell/actors.py | Actor.scatter | def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the ... | python | def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the ... | [
"def",
"scatter",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"timeout",
"if",
"timeout",
"is",
"not",
"None",
"else",
"self",
... | Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default... | [
"Broadcast",
"method",
"to",
"all",
"agents",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L292-L320 |
6,881 | celery/cell | cell/actors.py | Actor.call_or_cast | def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword no... | python | def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword no... | [
"def",
"call_or_cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"nowait",
"and",
"self",
".",
"cast",
"or",
"self",
".",
"call",
")",
"(",
"method",
","... | Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available ... | [
"Apply",
"remote",
"method",
"asynchronously",
"or",
"synchronously",
"depending",
"on",
"the",
"value",
"of",
"nowait",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L322-L347 |
6,882 | celery/cell | cell/actors.py | Actor.cast | def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_ret... | python | def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_ret... | [
"def",
"cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"declare",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"retry_policy",
"=",
"None",
",",
"type",
"=",
"None",
",",
"exchange",
"=",
"None",
",",
"*",
"*",
"props",
")",
... | Send message to actor. Discarding replies. | [
"Send",
"message",
"to",
"actor",
".",
"Discarding",
"replies",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L398-L420 |
6,883 | celery/cell | cell/actors.py | Actor.handle_call | def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r) | python | def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r) | [
"def",
"handle_call",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"_DISPATCH",
"(",
"body",
",",
"ticket",
"=",
"message",
".",
"properties",
"[",
"'reply_to'",
"]",
")",
"except",
"self",
".",
"Next",
":",
... | Handle call message. | [
"Handle",
"call",
"message",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L434-L442 |
6,884 | celery/cell | cell/actors.py | Actor._on_message | def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
... | python | def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
... | [
"def",
"_on_message",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"if",
"message",
".",
"properties",
".",
"get",
"(",
"'reply_to'",
")",
":",
"handler",
"=",
"self",
".",
"handle_call",
"else",
":",
"handler",
"=",
"self",
".",
"handle_cast",
... | What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply. | [
"What",
"to",
"do",
"when",
"a",
"message",
"is",
"received",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L460-L489 |
6,885 | celery/cell | cell/bin/base.py | Command.parse_options | def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = ... | python | def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = ... | [
"def",
"parse_options",
"(",
"self",
",",
"prog_name",
",",
"arguments",
")",
":",
"# Don't want to load configuration to just print the version,",
"# so we handle --version manually here.",
"if",
"'--version'",
"in",
"arguments",
":",
"self",
".",
"exit_status",
"(",
"self... | Parse the available options. | [
"Parse",
"the",
"available",
"options",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/bin/base.py#L67-L75 |
6,886 | celery/cell | cell/results.py | AsyncResult.get | def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs)) | python | def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs)) | [
"def",
"get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'limit'",
",",
"1",
")",
"return",
"self",
".",
"_first",
"(",
"self",
".",
"gather",
"(",
"*",
"*",
"kwargs",
")",
")"
] | What kind of arguments should be pass here | [
"What",
"kind",
"of",
"arguments",
"should",
"be",
"pass",
"here"
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L30-L33 |
6,887 | celery/cell | cell/results.py | AsyncResult._gather | def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs)) | python | def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs)) | [
"def",
"_gather",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"propagate",
"=",
"kwargs",
".",
"pop",
"(",
"'propagate'",
",",
"True",
")",
"return",
"(",
"self",
".",
"to_python",
"(",
"reply",
",",
"propagate",
"=",
"propagate... | Generator over the results | [
"Generator",
"over",
"the",
"results"
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L47-L52 |
6,888 | celery/cell | cell/results.py | AsyncResult.to_python | def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_f... | python | def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_f... | [
"def",
"to_python",
"(",
"self",
",",
"reply",
",",
"propagate",
"=",
"True",
")",
":",
"try",
":",
"return",
"reply",
"[",
"'ok'",
"]",
"except",
"KeyError",
":",
"error",
"=",
"self",
".",
"Error",
"(",
"*",
"reply",
".",
"get",
"(",
"'nok'",
")"... | Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception th... | [
"Extracts",
"the",
"value",
"out",
"of",
"the",
"reply",
"message",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L54-L79 |
6,889 | celery/cell | cell/agents.py | dAgent.spawn | def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
... | python | def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
... | [
"def",
"spawn",
"(",
"self",
",",
"cls",
",",
"kwargs",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
")",
":",
"actor_id",
"=",
"uuid",
"(",
")",
"if",
"str",
"(",
"qualname",
"(",
"cls",
")",
")",
"==",
"'__builtin__.unicode'",
":",
"name",
"=",
... | Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
... | [
"Spawn",
"a",
"new",
"actor",
"on",
"a",
"celery",
"worker",
"by",
"sending",
"a",
"remote",
"command",
"to",
"the",
"worker",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L99-L128 |
6,890 | celery/cell | cell/agents.py | dAgent.select | def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self... | python | def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self... | [
"def",
"select",
"(",
"self",
",",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"qualname",
"(",
"cls",
")",
"id",
"=",
"first_reply",
"(",
"self",
".",
"scatter",
"(",
"'select'",
",",
"{",
"'cls'",
":",
"name",
"}",
",",
"limit",
"=",... | Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class | [
"Get",
"the",
"id",
"of",
"already",
"spawned",
"actor"
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L130-L139 |
6,891 | celery/cell | cell/agents.py | dAgent.process_message | def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
... | python | def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
... | [
"def",
"process_message",
"(",
"self",
",",
"actor",
",",
"body",
",",
"message",
")",
":",
"if",
"actor",
"is",
"not",
"self",
"and",
"self",
".",
"is_green",
"(",
")",
":",
"self",
".",
"pool",
".",
"spawn_n",
"(",
"actor",
".",
"_on_message",
",",... | Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors... | [
"Process",
"actor",
"message",
"depending",
"depending",
"on",
"the",
"the",
"worker",
"settings",
"."
] | c7f9b3a0c11ae3429eacb4114279cf2614e94a48 | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L164-L187 |
6,892 | yhat/pandasql | pandasql/sqldf.py | get_outer_frame_variables | def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
... | python | def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
... | [
"def",
"get_outer_frame_variables",
"(",
")",
":",
"cur_filename",
"=",
"inspect",
".",
"getframeinfo",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
".",
"filename",
"outer_frame",
"=",
"next",
"(",
"f",
"for",
"f",
"in",
"inspect",
".",
"getouterfram... | Get a dict of local and global variables of the first outer frame from another file. | [
"Get",
"a",
"dict",
"of",
"local",
"and",
"global",
"variables",
"of",
"the",
"first",
"outer",
"frame",
"from",
"another",
"file",
"."
] | e799c6f53be9653e8998a25adb5e2f1643442699 | https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L98-L107 |
6,893 | yhat/pandasql | pandasql/sqldf.py | extract_table_names | def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tabl... | python | def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tabl... | [
"def",
"extract_table_names",
"(",
"query",
")",
":",
"# a good old fashioned regex. turns out this worked better than actually parsing the code",
"tables_blocks",
"=",
"re",
".",
"findall",
"(",
"r'(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)'",
",",
"query",
",",
"re",
".",
"IGN... | Extract table names from an SQL query. | [
"Extract",
"table",
"names",
"from",
"an",
"SQL",
"query",
"."
] | e799c6f53be9653e8998a25adb5e2f1643442699 | https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L110-L117 |
6,894 | yhat/pandasql | pandasql/sqldf.py | write_table | def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
... | python | def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
... | [
"def",
"write_table",
"(",
"df",
",",
"tablename",
",",
"conn",
")",
":",
"with",
"catch_warnings",
"(",
")",
":",
"filterwarnings",
"(",
"'ignore'",
",",
"message",
"=",
"'The provided table name \\'%s\\' is not found exactly as such in the database'",
"%",
"tablename"... | Write a dataframe to the database. | [
"Write",
"a",
"dataframe",
"to",
"the",
"database",
"."
] | e799c6f53be9653e8998a25adb5e2f1643442699 | https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L120-L126 |
6,895 | bsmurphy/PyKrige | benchmarks/kriging_benchmarks.py | make_benchark | def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
--... | python | def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
--... | [
"def",
"make_benchark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
"=",
"2",
")",
":",
"X_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
",",
"n_dim",
")",
"y_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
")",
"X_... | Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timi... | [
"Compute",
"the",
"benchmarks",
"for",
"Ordianry",
"Kriging"
] | a4db3003b0b5688658c12faeb95a5a8b2b14b433 | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L14-L57 |
6,896 | bsmurphy/PyKrige | benchmarks/kriging_benchmarks.py | print_benchmark | def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with... | python | def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with... | [
"def",
"print_benchmark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
",",
"res",
")",
":",
"print",
"(",
"'='",
"*",
"80",
")",
"print",
"(",
"' '",
"*",
"10",
",",
"'N_dim={}, N_train={}, N_test={}'",
".",
"format",
"(",
"n_dim",
",",
"n_train",
",",
... | Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results | [
"Print",
"the",
"benchmarks"
] | a4db3003b0b5688658c12faeb95a5a8b2b14b433 | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L60-L96 |
6,897 | bsmurphy/PyKrige | pykrige/uk.py | UniversalKriging.display_variogram_model | def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters... | python | def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters... | [
"def",
"display_variogram_model",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"lags",
",",
"self",
".",
"semivariance",
",",
"'r*'",
... | Displays variogram model with the actual binned data. | [
"Displays",
"variogram",
"model",
"with",
"the",
"actual",
"binned",
"data",
"."
] | a4db3003b0b5688658c12faeb95a5a8b2b14b433 | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L608-L616 |
6,898 | bsmurphy/PyKrige | pykrige/uk.py | UniversalKriging.plot_epsilon_residuals | def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show() | python | def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show() | [
"def",
"plot_epsilon_residuals",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"scatter",
"(",
"range",
"(",
"self",
".",
"epsilon",
".",
"size",
")",
",",
"self... | Plots the epsilon residuals for the variogram fit. | [
"Plots",
"the",
"epsilon",
"residuals",
"for",
"the",
"variogram",
"fit",
"."
] | a4db3003b0b5688658c12faeb95a5a8b2b14b433 | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L647-L653 |
6,899 | bsmurphy/PyKrige | pykrige/uk.py | UniversalKriging.print_statistics | def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR) | python | def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR) | [
"def",
"print_statistics",
"(",
"self",
")",
":",
"print",
"(",
"\"Q1 =\"",
",",
"self",
".",
"Q1",
")",
"print",
"(",
"\"Q2 =\"",
",",
"self",
".",
"Q2",
")",
"print",
"(",
"\"cR =\"",
",",
"self",
".",
"cR",
")"
] | Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible. | [
"Prints",
"out",
"the",
"Q1",
"Q2",
"and",
"cR",
"statistics",
"for",
"the",
"variogram",
"fit",
".",
"NOTE",
"that",
"ideally",
"Q1",
"is",
"close",
"to",
"zero",
"Q2",
"is",
"close",
"to",
"1",
"and",
"cR",
"is",
"as",
"small",
"as",
"possible",
".... | a4db3003b0b5688658c12faeb95a5a8b2b14b433 | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L661-L668 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.