desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Extracts features from preprocessed inputs.
This function is responsible for extracting feature maps from preprocessed
images.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, heigh... | @abstractmethod
def extract_features(self, preprocessed_inputs):
| pass
|
'SSDMetaArch Constructor.
TODO: group NMS parameters + score converter into
a class and loss parameters into a class and write config protos for
postprocessing and losses.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
anchor_generator: an anchor_gen... | def __init__(self, is_training, anchor_generator, box_predictor, box_coder, feature_extractor, matcher, region_similarity_calculator, image_resizer_fn, non_max_suppression_fn, score_conversion_fn, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches... | super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes)
self._is_training = is_training
self._extract_features_scope = 'FeatureExtractor'
self._anchor_generator = anchor_generator
self._box_predictor = box_predictor
self._box_coder = box_coder
self._feature_extractor = featu... |
'Feature-extractor specific preprocessing.
See base class.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
Raises:
Val... | def preprocess(self, inputs):
| if (inputs.dtype is not tf.float32):
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
resized_inputs = tf.map_fn(self._image_resizer_fn, elems=inputs, dtype=tf.float32)
return self._feature_extractor.preprocess(resized_inputs)
|
'Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors ... | def predict(self, preprocessed_inputs):
| with tf.variable_scope(None, self._extract_features_scope, [preprocessed_inputs]):
feature_maps = self._feature_extractor.extract_features(preprocessed_inputs)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
self._anchors = self._anchor_generator.generate(feature_map_spat... |
'Adds box predictors to each feature map and returns concatenated results.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Returns:
box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
class_prediction... | def _add_box_predictions_to_feature_maps(self, feature_maps):
| num_anchors_per_location_list = self._anchor_generator.num_anchors_per_location()
if (len(feature_maps) != len(num_anchors_per_location_list)):
raise RuntimeError('the number of feature maps must match the length of self.anchors.NumAnchorsPerLocation().')
box_encodings_... |
'Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps'
| def _get_feature_map_spatial_dims(self, feature_maps):
| feature_map_shapes = [shape_utils.combined_static_and_dynamic_shape(feature_map) for feature_map in feature_maps]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
|
'Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
slicing off the background class, decoding box predictions and applying
non max suppression and clipping to the image window.
See base class for output format conventions. Note also that by de... | def postprocess(self, prediction_dict):
| if (('box_encodings' not in prediction_dict) or ('class_predictions_with_background' not in prediction_dict)):
raise ValueError('prediction_dict does not contain expected entries.')
with tf.name_scope('Postprocessor'):
box_encodings = prediction_dict['box_encodings']
class... |
'Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
bo... | def loss(self, prediction_dict, scope=None):
| with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, match_list) = self._assign_targets(self.groundtruth_lists(fields.BoxListFields.boxes), self.groundtruth_lists(fields.BoxListFields.classes))
if self._add_summarie... |
'Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
and uses target assigner to obtain regression and classification targets.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing coordinates of the groundtruth boxes.
Groundtruth boxes ar... | def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list):
| groundtruth_boxlists = [box_list.BoxList(boxes) for boxes in groundtruth_boxes_list]
groundtruth_classes_with_background_list = [tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') for one_hot_encoding in groundtruth_classes_list]
return target_assigner.batch_assign_targets(self._target_assigner, se... |
'Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a li... | def _summarize_input(self, groundtruth_boxes_list, match_list):
| num_boxes_per_image = tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack([match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack([match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack([match.n... |
'Applies hard mining to anchorwise losses.
Args:
location_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise location losses.
cls_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise classification losses.
prediction_dict: p a dictionary holding prediction tensors... | def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, match_list):
| class_pred_shape = [(-1), self.anchors.num_boxes_static(), self.num_classes]
class_predictions = tf.reshape(tf.slice(prediction_dict['class_predictions_with_background'], [0, 0, 1], class_pred_shape), class_pred_shape)
decoded_boxes = self._batch_decode(prediction_dict['box_encodings'])
decoded_box_tens... |
'Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.'
| def _batch_decode(self, box_encodings):
| combined_shape = shape_utils.combined_static_and_dynamic_shape(box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = tf.tile(tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(tf.reshape(tiled_anchor_boxes, [(-1), self._box_coder.code_size]))... |
'Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping ... | def restore_map(self, from_detection_checkpoint=True):
| variables_to_restore = {}
for variable in tf.all_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if (not from_detection_checkpoint):
var_name = re.split((('^' + self._extract_features_scope) + '/'), var_name)[... |
'RFCNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and n... | def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_positive_balance... | super(RFCNMetaArch, self).__init__(is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_st... |
'Predicts the output tensors from 2nd stage of FasterRCNN.
Args:
rpn_box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
p... | def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features, anchors, image_shape):
| (proposal_boxes_normalized, _, num_proposals) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape)
box_classifier_features = self._feature_extractor.extract_box_classifier_features(rpn_features, scope=self.second_stage_feature_extractor_scope)
box_pred... |
'Set up mock SSD model.
Here we set up a simple mock SSD model that will always predict 4
detections that happen to always be exactly the anchors that are set up
in the above MockAnchorGenerator. Because we let max_detections=5,
we will also always end up with an extra padded row in the detection
results.'
| def setUp(self):
| is_training = False
self._num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
mock_box_predictor = test_utils.MockBoxPredictor(is_training, self._num_classes)
mock_box_coder = test_utils.MockBoxCoder()
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.... |
'Create a GRU object.
Args:
num_units: Number of units in the GRU
forget_bias (optional): Hack to help learning.
weight_scale (optional): weights are scaled by ws/sqrt(#inputs), with
ws being the weight scale.
clip_value (optional): if the recurrent values grow above this value,
clip them.
collections (optional): List ... | def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0, clip_value=np.inf, collections=None):
| self._num_units = num_units
self._forget_bias = forget_bias
self._weight_scale = weight_scale
self._clip_value = clip_value
self._collections = collections
|
'Return the output portion of the state.'
| def output_from_state(self, state):
| return state
|
'Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respe... | def __call__(self, inputs, state, scope=None):
| x = inputs
h = state
if (inputs is not None):
xh = tf.concat(axis=1, values=[x, h])
else:
xh = h
with tf.variable_scope((scope or type(self).__name__)):
with tf.variable_scope('Gates'):
(r, u) = tf.split(axis=1, num_or_size_splits=2, value=linear(xh, (2 * self._nu... |
'Create a GRU object.
Args:
num_units: Number of units in the GRU
forget_bias (optional): Hack to help learning.
input_weight_scale (optional): weights are scaled ws/sqrt(#inputs), with
ws being the weight scale.
rec_weight_scale (optional): weights are scaled ws/sqrt(#inputs),
with ws being the weight scale.
clip_valu... | def __init__(self, num_units, forget_bias=1.0, input_weight_scale=1.0, rec_weight_scale=1.0, clip_value=np.inf, input_collections=None, recurrent_collections=None):
| self._num_units = num_units
self._forget_bias = forget_bias
self._input_weight_scale = input_weight_scale
self._rec_weight_scale = rec_weight_scale
self._clip_value = clip_value
self._input_collections = input_collections
self._rec_collections = recurrent_collections
|
'Return the output portion of the state.'
| def output_from_state(self, state):
| return state
|
'Gated recurrent unit (GRU) function.
Args:
inputs: A 2D batch x input_dim tensor of inputs.
state: The previous state from the last time step.
scope (optional): TF variable scope for defined GRU variables.
Returns:
A tuple (state, state), where state is the newly computed state at time t.
It is returned twice to respe... | def __call__(self, inputs, state, scope=None):
| x = inputs
h = state
with tf.variable_scope((scope or type(self).__name__)):
with tf.variable_scope('Gates'):
r_x = u_x = 0.0
if (x is not None):
(r_x, u_x) = tf.split(axis=1, num_or_size_splits=2, value=linear(x, (2 * self._num_units), alpha=self._input_weigh... |
'Create an LFADS model.
train - a model for training, sampling of posteriors is used
posterior_sample_and_average - sample from the posterior, this is used
for evaluating the expected value of the outputs of LFADS, given a
specific input, by averaging over multiple samples from the approx
posterior. Also used for the ... | def __init__(self, hps, kind='train', datasets=None):
| print('Building graph...')
all_kinds = ['train', 'posterior_sample_and_average', 'prior_sample']
assert (kind in all_kinds), 'Wrong kind'
if (hps.feedback_factors_or_rates == 'rates'):
assert (len(hps.dataset_names) == 1), 'Multiple datasets not supported for rate feedbac... |
'Build the feed dictionary, handles cases where there is no value defined.
Args:
train_name: The key into the datasets, to set the tf.case statement for
the proper readin / readout matrices.
data_bxtxd: The data tensor
ext_input_bxtxi (optional): The external input tensor
keep_prob: The drop out keep probability.
Retur... | def build_feed_dict(self, train_name, data_bxtxd, ext_input_bxtxi=None, keep_prob=None):
| feed_dict = {}
(B, T, _) = data_bxtxd.shape
feed_dict[self.dataName] = train_name
feed_dict[self.dataset_ph] = data_bxtxd
if ((self.ext_input is not None) and (ext_input_bxtxi is not None)):
feed_dict[self.ext_input] = ext_input_bxtxi
if (keep_prob is None):
feed_dict[self.keep_p... |
'Get a batch of data, either randomly chosen, or specified directly.
Args:
data_extxd: The data to model, numpy tensors with shape:
# examples x # time steps x # dimensions
ext_input_extxi (optional): The external inputs, numpy tensor with shape:
# examples x # time steps x # external input dimensions
batch_size: The ... | @staticmethod
def get_batch(data_extxd, ext_input_extxi=None, batch_size=None, example_idxs=None):
| assert ((batch_size is not None) or (example_idxs is not None)), 'Problems'
(E, T, D) = data_extxd.shape
if (example_idxs is None):
example_idxs = np.random.choice(E, batch_size)
ext_input_bxtxi = None
if (ext_input_extxi is not None):
ext_input_bxtxi = ext_input_extxi[example_idxs, ... |
'Given a number of examples, E, and a batch_size, B, generate indices
[0, 1, 2, ... B-1;
[B, B+1, ... 2*B-1;
returning those indices as a 2-dim tensor shaped like E/B x B. Note that
shape is only correct if E % B == 0. If not, then an extra row is generated
so that the remainder of examples is included. The extra exa... | @staticmethod
def example_idxs_mod_batch_size(nexamples, batch_size):
| bmrem = (batch_size - (nexamples % batch_size))
bmrem_examples = []
if (bmrem < batch_size):
ridxs = np.random.permutation(nexamples)[0:bmrem].astype(np.int32)
bmrem_examples = np.sort(ridxs)
example_idxs = (range(nexamples) + list(bmrem_examples))
example_idxs_e_x_edivb = np.reshape... |
'Indices 1:nexamples, randomized, in 2D form of
shape = (nexamples / batch_size) x batch_size. The remainder
is managed by drawing randomly from 1:nexamples.
Args:
nexamples: number of examples to randomize
batch_size: number of elements in batch
Returns:
The randomized, properly shaped indicies.'
| @staticmethod
def randomize_example_idxs_mod_batch_size(nexamples, batch_size):
| assert (nexamples > batch_size), 'Problems'
bmrem = (batch_size - (nexamples % batch_size))
bmrem_examples = []
if (bmrem < batch_size):
bmrem_examples = np.random.choice(range(nexamples), size=bmrem, replace=False)
example_idxs = (range(nexamples) + list(bmrem_examples))
mixed_example_i... |
'Shuffle the spikes in the temporal dimension. This is useful to
help the LFADS system avoid overfitting to individual spikes or fast
oscillations found in the data that are irrelevant to behavior. A
pure \'tabula rasa\' approach would avoid this, but LFADS is sensitive
enough to pick up dynamics that you may not want... | def shuffle_spikes_in_time(self, data_bxtxd):
| (B, T, N) = data_bxtxd.shape
w = self.hps.temporal_spike_jitter_width
if (w == 0):
return data_bxtxd
max_counts = np.max(data_bxtxd)
S_bxtxd = np.zeros([B, T, N])
for mc in range(1, (max_counts + 1)):
idxs = np.nonzero((data_bxtxd >= mc))
data_ones = np.zeros_like(data_bx... |
'Since LFADS supports multiple datasets in the same dynamical model,
we have to be careful to use all the data in a single training epoch. But
since the datasets my have different data dimensionality, we cannot batch
examples from data dictionaries together. Instead, we generate random
batches within each data dictio... | def shuffle_and_flatten_datasets(self, datasets, kind='train'):
| batch_size = self.hps.batch_size
ndatasets = len(datasets)
random_example_idxs = {}
epoch_idxs = {}
all_name_example_idx_pairs = []
kind_data = (kind + '_data')
for (name, data_dict) in datasets.items():
(nexamples, ntime, data_dim) = data_dict[kind_data].shape
epoch_idxs[nam... |
'Train the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
batch_size (optional): The batch_size to use
do_save_ckpt (optional): Should the routine save a checkpoint on this
training epoch?
Returns:... | def train_epoch(self, datasets, batch_size=None, do_save_ckpt=True):
| ops_to_eval = [self.cost, self.recon_cost, self.kl_cost, self.kl_weight, self.l2_cost, self.l2_weight, self.train_op]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind='train')
total_cost = total_recon_cost = total_kl_cost = 0.0
epoch_size = len(collected_op_values)
for op_values in c... |
'Run the model through the entire dataset once.
Args:
datasets: A dict of data dicts. The dataset dict is simply a
name(string)-> data dictionary mapping (See top of lfads.py).
ops_to_eval: A list of tensorflow operations that will be evaluated in
the tf.session.run() call.
batch_size (optional): The batch_size to us... | def run_epoch(self, datasets, ops_to_eval, kind='train', batch_size=None, do_collect=True, keep_prob=None):
| hps = self.hps
all_name_example_idx_pairs = self.shuffle_and_flatten_datasets(datasets, kind)
kind_data = (kind + '_data')
kind_ext_input = (kind + '_ext_input')
total_cost = total_recon_cost = total_kl_cost = 0.0
session = tf.get_default_session()
epoch_size = len(all_name_example_idx_pairs... |
'Plot and summarize stuff in tensorboard.
Note that everything done in the current function is otherwise done on
a single, randomly selected dataset (except for summary_values, which are
passed in.)
Args:
datasets, the dictionary of datasets used in the study.
summary_values: These summary values are created from the ... | def summarize_all(self, datasets, summary_values):
| hps = self.hps
tr_kl_cost = summary_values['tr_kl_cost']
tr_recon_cost = summary_values['tr_recon_cost']
tr_total_cost = summary_values['tr_total_cost']
kl_weight = summary_values['kl_weight']
l2_weight = summary_values['l2_weight']
l2_cost = summary_values['l2_cost']
has_any_valid_set =... |
'Plot an image relating to a randomly chosen, specific example. We use
posterior sample and average by taking one example, and filling a whole
batch with that example, sample from the posterior, and then average the
quantities.'
| def plot_single_example(self, datasets):
| hps = self.hps
all_data_names = datasets.keys()
data_name = np.random.permutation(all_data_names)[0]
data_dict = datasets[data_name]
has_valid_set = (True if (data_dict['valid_data'] is not None) else False)
cf = 1.0
(E, _, _) = data_dict['train_data'].shape
eidx = np.random.choice(E)
... |
'Train the model, print per-epoch information, and save checkpoints.
Loop over training epochs. The function that actually does the
training is train_epoch. This function iterates over the training
data, one epoch at a time. The learning rate schedule is such
that it will stay the same until the cost goes up in compa... | def train_model(self, datasets):
| hps = self.hps
has_any_valid_set = False
for data_dict in datasets.values():
if (data_dict['valid_data'] is not None):
has_any_valid_set = True
break
session = tf.get_default_session()
lr = session.run(self.learning_rate)
lr_stop = hps.learning_rate_stop
i = (... |
'Evaluate the cost of the epoch.
Args:
data_dict: The dictionary of data (training and validation) used for
training and evaluation of the model, respectively.
Returns:
a 3 tuple of costs:
(epoch total cost, epoch reconstruction cost, epoch KL cost)'
| def eval_cost_epoch(self, datasets, kind='train', ext_input_extxi=None, batch_size=None):
| ops_to_eval = [self.cost, self.recon_cost, self.kl_cost]
collected_op_values = self.run_epoch(datasets, ops_to_eval, kind=kind, keep_prob=1.0)
total_cost = total_recon_cost = total_kl_cost = 0.0
epoch_size = len(collected_op_values)
for op_values in collected_op_values:
total_cost += op_valu... |
'Returns all the goodies for the entire model, per batch.
Args:
data_name: The name of the data dict, to select which in/out matrices
to use.
data_bxtxd: Numpy array training data with shape:
batch_size x # time steps x # dimensions
ext_input_bxtxi: Numpy array training external input with shape:
batch_size x # time s... | def eval_model_runs_batch(self, data_name, data_bxtxd, ext_input_bxtxi=None, do_eval_cost=False, do_average_batch=False):
| session = tf.get_default_session()
feed_dict = self.build_feed_dict(data_name, data_bxtxd, ext_input_bxtxi, keep_prob=1.0)
tf_vals = [self.gen_ics, self.gen_states, self.factors, self.output_dist_params]
tf_vals.append(self.cost)
tf_vals.append(self.nll_bound_vae)
tf_vals.append(self.nll_bound_i... |
'Returns all the expected value for goodies for the entire model.
The expected value is taken over hidden (z) variables, namely the initial
conditions and the control inputs. The expected value is approximate, and
accomplished via sampling (batch_size) samples for every examples.
Args:
data_name: The name of the data ... | def eval_model_runs_avg_epoch(self, data_name, data_extxd, ext_input_extxi=None):
| hps = self.hps
batch_size = hps.batch_size
(E, T, D) = data_extxd.shape
E_to_process = hps.ps_nexamples_to_process
if (E_to_process > E):
print('Setting number of posterior samples to process to : ', E)
E_to_process = E
if (hps.ic_dim > 0):
prio... |
'Run the model on the data in data_dict, and save the computed values.
LFADS generates a number of outputs for each examples, and these are all
saved. They are:
The mean and variance of the prior of g0.
The mean and variance of approximate posterior of g0.
The control inputs (if enabled)
The initial conditions, g0, fo... | def write_model_runs(self, datasets, output_fname=None):
| hps = self.hps
kind = hps.kind
for (data_name, data_dict) in datasets.items():
data_tuple = [('train', data_dict['train_data'], data_dict['train_ext_input']), ('valid', data_dict['valid_data'], data_dict['valid_ext_input'])]
for (data_kind, data_extxd, ext_input_extxi) in data_tuple:
... |
'Use the prior distribution to generate batch_size number of samples
from the model.
LFADS generates a number of outputs for each sample, and these are all
saved. They are:
The mean and variance of the prior of g0.
The control inputs (if enabled)
The initial conditions, g0, for all examples.
The generator states for a... | def write_model_samples(self, dataset_name, output_fname=None):
| hps = self.hps
batch_size = hps.batch_size
print(('Generating %d samples' % batch_size))
tf_vals = [self.factors, self.gen_states, self.gen_ics, self.cost, self.output_dist_params]
if (hps.ic_dim > 0):
tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar]
if (hps.co_dim > 0):... |
'Evaluate and return all of the TF variables in the model.
Args:
use_nested (optional): For returning values, use a nested dictoinary, based
on variable scoping, or return all variables in a flat dictionary.
include_strs (optional): A list of strings to use as a filter, to reduce the
number of variables returned. A va... | @staticmethod
def eval_model_parameters(use_nested=True, include_strs=None):
| all_tf_vars = tf.global_variables()
session = tf.get_default_session()
all_tf_vars_eval = session.run(all_tf_vars)
vars_dict = {}
strs = ['LFADS']
if include_strs:
strs += include_strs
for (i, (var, var_eval)) in enumerate(zip(all_tf_vars, all_tf_vars_eval)):
if any(((s in in... |
'Randomly spikify underlying rates according a Poisson distribution
Args:
rates_bxtxd: a numpy tensor with shape:
Returns:
A numpy array with the same shape as rates_bxtxd, but with the event
counts.'
| @staticmethod
def spikify_rates(rates_bxtxd):
| (B, T, N) = rates_bxtxd.shape
assert all([(B > 0), (N > 0)]), 'problems'
spikes_bxtxd = np.zeros([B, T, N], dtype=np.int32)
for b in range(B):
for t in range(T):
for n in range(N):
rate = rates_bxtxd[(b, t, n)]
count = np.random.poisson(rate)
... |
'Create Poisson distributions with log_rates parameters.
Args:
log_rates: a tensor-like list of log rates underlying the Poisson dist.'
| def __init__(self, log_rates):
| self.logr = log_rates
|
'Compute the log probability for the counts in the bin, under the model.
Args:
bin_counts: array-like integer counts
Returns:
The log-probability under the Poisson models for each element of
bin_counts.'
| def logp(self, bin_counts):
| k = tf.to_float(bin_counts)
return (((k * self.logr) - tf.exp(self.logr)) - tf.lgamma((k + 1)))
|
'Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.'
| def __init__(self, batch_size, z_size, mean, logvar):
| size__xz = [None, z_size]
self.mean = mean
self.logvar = logvar
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = (mean + (tf.exp((0.5 * logvar)) * noise))
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
|
'Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.'
| def logp(self, z=None):
| if (z is None):
z = self.sample
if (z == self.sample):
return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)
return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
|
'Create a learnable diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
name: prefix name for the mean and log TF variables.
mean_init (optional): The N-D mean initialization of the distribut... | def __init__(self, batch_size, z_size, name, mean_init=0.0, var_init=1.0, var_min=0.0, var_max=1000000.0):
| size_1xn = [1, z_size]
size__xn = [None, z_size]
size_bx1 = tf.stack([batch_size, 1])
assert (var_init > 0.0), 'Problems'
assert (var_max >= var_min), 'Problems'
assert (var_init >= var_min), 'Problems'
assert (var_max >= var_init), 'Problems'
z_mean_1xn = tf.get_variable(name=(name + '/... |
'Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.'
| def logp(self, z=None):
| if (z is None):
z = self.sample
if (z == self.sample_bxn):
return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn, self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
|
'Create an input dependent diagonal Gaussian distribution.
Args:
x: The input tensor from which the mean and variance are computed,
via a linear transformation of x. I.e.
mu = Wx + b, log(var) = Mx + c
z_size: The size of the distribution.
name: The name to prefix to learned variables.
var_min (optional): Minimal var... | def __init__(self, x_bxu, z_size, name, var_min=0.0):
| size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name + '/mean'))
logvar_bxn = linear(x_bxu, z_size, name=(name + '/logvar'))
if (var_min > 0.0):
logvar_bxn = tf.log((tf.exp(logvar_bxn) + var_min))
self.logvar_bxn = logvar_bxn
self.... |
'Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.'
| def logp(self, z=None):
| if (z is None):
z = self.sample
if (z == self.sample_bxn):
return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn, self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
|
'Create a learnable autoregressive (1) process.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
autocorrelation_taus: The auto correlation time constant of the AR(1)
process.
A value of 0 is uncorrelated gaussian noise.... | def __init__(self, batch_size, z_size, autocorrelation_taus, noise_variances, do_train_prior_ar_atau, do_train_prior_ar_nvar, num_steps, name):
| size_bx1 = tf.stack([batch_size, 1])
size__xu = [None, z_size]
log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0)
self.logevars_1xu = logevars_1xu = tf.Variable(log_evar_inits_1xu, name=(name + '/logevars'), dtype=tf.float32, trainable=do_train_prior_ar_nvar)
self.logevars_bxu = logevar... |
'Compute the log-likelihood under the distribution for a given time t,
not the whole sequence.
Args:
z_t_bxu: sample to compute likelihood for at time t.
z_tm1_bxu (optional): sample condition probability of z_t upon.
Returns:
The likelihood of p_t under the model at time t. i.e.
p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)'
| def logp_t(self, z_t_bxu, z_tm1_bxu=None):
| if (z_tm1_bxu is None):
return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu, self.logpvars_bxu)
else:
means_t_bxu = (self.pmeans_bxu + (self.phis_bxu * z_tm1_bxu))
logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu, means_t_bxu, self.logevars_bxu)
return logp_tgtm1_bx... |
'Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
E_q[ln p(z_i | z_{i+1}) / q(z_i | x)
\int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_p^2) + sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2)
\int q(z) ln q(z) dz = - 0.5 ln(2pi... | def __init__(self, zs, prior_zs):
| kl_b = 0.0
for (z, prior_z) in zip(zs, prior_zs):
assert isinstance(z, Gaussian)
assert isinstance(prior_z, Gaussian)
kl_b += (0.5 * tf.reduce_sum(((((prior_z.logvar - z.logvar) + tf.exp((z.logvar - prior_z.logvar))) + tf.square(((z.mean - prior_z.mean) / tf.exp((0.5 * prior_z.logvar))))... |
'Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
Args:
post_zs: posterior z ~ q(z|x)
prior_z_process: prior AR(1) process'
| def __init__(self, post_zs, prior_z_process):
| assert (len(post_zs) > 1), 'GP is for time, need more than 1 time step.'
assert isinstance(prior_z_process, GaussianProcess), 'Must use GP.'
z0_bxu = post_zs[0].sample
logq_bxu = post_zs[0].logp(z0_bxu)
logp_bxu = prior_z_process.logp_t(z0_bxu)
z_tm1_bxu = z0_bxu... |
'ResNet constructor.
Args:
hps: Hyperparameters.
images: Batches of images. [batch_size, image_size, image_size, 3]
labels: Batches of labels. [batch_size, num_classes]
mode: One of \'train\' and \'eval\'.'
| def __init__(self, hps, images, labels, mode):
| self.hps = hps
self._images = images
self.labels = labels
self.mode = mode
self._extra_train_ops = []
|
'Build a whole graph for the model.'
| def build_graph(self):
| self.global_step = tf.contrib.framework.get_or_create_global_step()
self._build_model()
if (self.mode == 'train'):
self._build_train_op()
self.summaries = tf.summary.merge_all()
|
'Map a stride scalar to the stride array for tf.nn.conv2d.'
| def _stride_arr(self, stride):
| return [1, stride, stride, 1]
|
'Build the core model within the graph.'
| def _build_model(self):
| with tf.variable_scope('init'):
x = self._images
x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self.hps.use_bottleneck:
res_func = self._bottleneck_residual
filters = [16, 64, 128, 256]... |
'Build training specific ops for the graph.'
| def _build_train_op(self):
| self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
tf.summary.scalar('learning_rate', self.lrn_rate)
trainable_variables = tf.trainable_variables()
grads = tf.gradients(self.cost, trainable_variables)
if (self.hps.optimizer == 'sgd'):
optimizer = tf.train.GradientDescentOptimizer(sel... |
'Batch normalization.'
| def _batch_norm(self, name, x):
| with tf.variable_scope(name):
params_shape = [x.get_shape()[(-1)]]
beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
... |
'Residual unit with 2 sub layers.'
| def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
| if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = ... |
'Bottleneck residual unit with 3 sub layers.'
| def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
| if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch... |
'L2 weight decay loss.'
| def _decay(self):
| costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))
|
'Convolution.'
| def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
| with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='... |
'Relu, with optional leaky support.'
| def _relu(self, x, leakiness=0.0):
| return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
|
'FullyConnected layer for final output.'
| def _fully_connected(self, x, out_dim):
| x = tf.reshape(x, [self.hps.batch_size, (-1)])
w = tf.get_variable('DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
|
'Create the deterministic transformation between stochastic layers.
If self.hparam.nonlinear:
2 x tanh layers
Else:
1 x linear layer'
| def _create_transformation(self, input, n_output, reuse, scope_prefix):
| if self.hparams.nonlinear:
h = slim.fully_connected(input, self.hparams.n_hidden, reuse=reuse, activation_fn=tf.nn.tanh, scope=('%s_nonlinear_1' % scope_prefix))
h = slim.fully_connected(h, self.hparams.n_hidden, reuse=reuse, activation_fn=tf.nn.tanh, scope=('%s_nonlinear_2' % scope_prefix))
... |
'x values -> samples from Q and return log Q(h|x).'
| def _recognition_network(self, sampler=None, log_likelihood_func=None):
| samples = {}
reuse = (None if (not self.run_recognition_network) else True)
if (sampler is None):
sampler = self._random_sample
if (log_likelihood_func is None):
log_likelihood_func = (lambda sample, log_params: U.binary_log_likelihood(sample['activation'], log_params))
logQ = []
... |
'Returns learning signal and function.
This is the implementation for SBNs for the ELBO.
Args:
samples: dictionary of sampled latent variables
logQ: list of log q(h_i) terms
log_likelihood_func: function used to compute log probs for the latent
variables
Returns:
learning_signal: the "reward" function
function_term: pa... | def _generator_network(self, samples, logQ, log_likelihood_func=None):
| reuse = (None if (not self.run_generator_network) else True)
if (self.hparams.task in ['sbn', 'omni']):
if (log_likelihood_func is None):
log_likelihood_func = (lambda sample, log_params: U.binary_log_likelihood(sample['activation'], log_params))
logPPrior = log_likelihood_func(sampl... |
'Compute the mean per component variance.
Use a moving average to estimate the required moments.'
| def compute_tensor_variance(self, t):
| t_sq = tf.reduce_mean(tf.square(t))
self.maintain_ema_ops.append(self.ema.apply([t, t_sq]))
variance_estimator = (self.ema.average(t_sq) - tf.reduce_mean(tf.square(self.ema.average(t))))
return variance_estimator
|
'Args:
grads_and_vars: gradients to apply and compute running average variance
extra_grads_and_vars: gradients to apply (not used to compute average variance)'
| def _create_train_op(self, grads_and_vars, extra_grads_and_vars=[]):
| first_moment = U.vectorize(grads_and_vars, skip_none=True)
second_moment = tf.square(first_moment)
self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))
if (len(self.baseline_loss) > 0):
mean_baseline_loss = tf.reduce_mean(tf.add_n(self.baseline_loss))
extra_grads_a... |
'Returns mean of random variables parameterized by log_alpha.'
| def _mean_sample(self, log_alpha, _, layer):
| mu = tf.nn.sigmoid(log_alpha)
return {'preactivation': mu, 'activation': mu, 'log_param': log_alpha}
|
'Convert u to tied randomness in v.'
| def _u_to_v(self, log_alpha, u, eps=1e-08):
| u_prime = tf.nn.sigmoid((- log_alpha))
v_1 = ((u - u_prime) / tf.clip_by_value((1 - u_prime), eps, 1))
v_1 = tf.clip_by_value(v_1, 0, 1)
v_1 = tf.stop_gradient(v_1)
v_1 = ((v_1 * (1 - u_prime)) + u_prime)
v_0 = (u / tf.clip_by_value(u_prime, eps, 1))
v_0 = tf.clip_by_value(v_0, 0, 1)
v_0... |
'Returns sampled random variables parameterized by log_alpha.'
| def _random_sample(self, log_alpha, u, layer):
| if (layer not in self.uniform_samples_v):
self.uniform_samples_v[layer] = self._u_to_v(log_alpha, u)
x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u)))
samples = tf.stop_gradient(tf.to_float((x > 0)))
return {'preactivation': x, 'activation': samples, 'log_param': log_alpha}
|
'Returns sampled random variables parameterized by log_alpha.'
| def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
| if (temperature is None):
temperature = self.hparams.temperature
x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u)))
x /= tf.expand_dims(temperature, (-1))
if self.hparams.muprop_relaxation:
y = tf.nn.sigmoid((x + (log_alpha * tf.expand_dims((temperature / (temperature + 1)... |
'Returns sampled random variables parameterized by log_alpha.'
| def _random_sample_soft_v(self, log_alpha, _, layer, temperature=None):
| v = self.uniform_samples_v[layer]
return self._random_sample_soft(log_alpha, v, layer, temperature)
|
'Run partial discrete, then continuous path.
Args:
switch_layer: this layer and beyond will be continuous'
| def _random_sample_switch(self, log_alpha, u, layer, switch_layer, temperature=None):
| if (layer < switch_layer):
return self._random_sample(log_alpha, u, layer)
else:
return self._random_sample_soft(log_alpha, u, layer, temperature)
|
'Run partial discrete, then continuous path.
Args:
switch_layer: this layer and beyond will be continuous'
| def _random_sample_switch_v(self, log_alpha, u, layer, switch_layer, temperature=None):
| if (layer < switch_layer):
return self._random_sample(log_alpha, u, layer)
else:
return self._random_sample_soft_v(log_alpha, u, layer, temperature)
|
'Compute the NVIL gradient.'
| def get_nvil_gradient(self):
| (logQHard, samples) = self._recognition_network()
(ELBO, reinforce_model_grad) = self._generator_network(samples, logQHard)
logQHard = tf.add_n(logQHard)
learning_signal = (tf.stop_gradient(ELBO) - self._create_baseline())
self.baseline_loss.append(tf.square(learning_signal))
optimizerLoss = (- ... |
'Computes the simple muprop gradient.
This muprop control variate does not include the linear term.'
| def get_simple_muprop_gradient(self):
| (logQHard, hardSamples) = self._recognition_network()
(hardELBO, reinforce_model_grad) = self._generator_network(hardSamples, logQHard)
(logQ, muSamples) = self._recognition_network(sampler=self._mean_sample)
(muELBO, _) = self._generator_network(muSamples, logQ)
scaling_baseline = self._create_eta(... |
'random sample function that actually returns mean
new forward pass that returns logQ as a list
can get x_i from samples'
| def get_muprop_gradient(self):
| (logQHard, hardSamples) = self._recognition_network()
(hardELBO, reinforce_model_grad) = self._generator_network(hardSamples, logQHard)
(logQ, muSamples) = self._recognition_network(sampler=self._mean_sample)
(muELBO, _) = self._generator_network(muSamples, logQ)
muELBOGrads = tf.gradients(tf.reduce... |
'Calculate gumbel control variate.'
| def _create_gumbel_control_variate(self, logQHard, temperature=None):
| if (temperature is None):
temperature = self.hparams.temperature
(logQ, softSamples) = self._recognition_network(sampler=functools.partial(self._random_sample_soft, temperature=temperature))
(softELBO, _) = self._generator_network(softSamples, logQ)
logQ = tf.add_n(logQ)
(logQ_v, softSamples... |
'Calculate gumbel control variate.'
| def _create_gumbel_control_variate_quadratic(self, logQHard, temperature=None):
| if (temperature is None):
temperature = self.hparams.temperature
h = 0
extra = []
for layer in xrange(self.hparams.n_layer):
(logQ, softSamples) = self._recognition_network(sampler=functools.partial(self._random_sample_switch, switch_layer=layer, temperature=temperature))
(softEL... |
'Get the dynamic rebar gradient (t, eta optimized).'
| def get_dynamic_rebar_gradient(self):
| tiled_pre_temperature = tf.tile([self.pre_temperature_variable], [self.batch_size])
temperature = tf.exp(tiled_pre_temperature)
(hardELBO, nvil_gradient, logQHard) = self._create_hard_elbo()
if self.hparams.quadratic:
(gumbel_cv, extra) = self._create_gumbel_control_variate_quadratic(logQHard, t... |
'Get the rebar gradient.'
| def get_rebar_gradient(self):
| (hardELBO, nvil_gradient, logQHard) = self._create_hard_elbo()
if self.hparams.quadratic:
(gumbel_cv, _) = self._create_gumbel_control_variate_quadratic(logQHard)
else:
(gumbel_cv, _) = self._create_gumbel_control_variate(logQHard)
f_grads = self.optimizer_class.compute_gradients(tf.redu... |
'Returns sampled random variables parameterized by log_alpha.'
| def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
| if (temperature is None):
temperature = self.hparams.temperature
x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u)))
x /= temperature
if self.hparams.muprop_relaxation:
x += ((temperature / (temperature + 1)) * log_alpha)
y = tf.nn.sigmoid(x)
return {'preactivation'... |
'Add episodes to buffer.'
| def add(self, episodes, *args):
| idx = 0
while ((self.cur_size < self.max_size) and (idx < len(episodes))):
self.buffer[self.cur_size] = episodes[idx]
self.cur_size += 1
idx += 1
if (idx < len(episodes)):
remove_idxs = self.remove_n((len(episodes) - idx))
for remove_idx in remove_idxs:
se... |
'Get n items for removal.'
| def remove_n(self, n):
| idxs = random.sample(xrange(self.init_length, self.cur_size), n)
return idxs
|
'Get batch of episodes to train on.'
| def get_batch(self, n):
| idxs = random.sample(xrange(self.cur_size), n)
return ([self.buffer[idx] for idx in idxs], None)
|
'Add episodes to buffer.'
| def add(self, episodes, priorities, new_idxs=None):
| if (new_idxs is None):
idx = 0
new_idxs = []
while ((self.cur_size < self.max_size) and (idx < len(episodes))):
self.buffer[self.cur_size] = episodes[idx]
new_idxs.append(self.cur_size)
self.cur_size += 1
idx += 1
if (idx < len(episodes... |
'Get n items for removal.'
| def remove_n(self, n):
| assert ((self.init_length + n) <= self.cur_size)
if (self.eviction_strategy == 'rand'):
idxs = random.sample(xrange(self.init_length, self.cur_size), n)
elif (self.eviction_strategy == 'fifo'):
idxs = [(self.init_length + ((self.remove_idx + i) % (self.max_size - self.init_length))) for i in... |
'Get batch of episodes to train on.'
| def get_batch(self, n):
| p = self.sampling_distribution()
idxs = np.random.choice(self.cur_size, size=n, replace=False, p=p)
self.last_batch = idxs
return ([self.buffer[idx] for idx in idxs], p[idxs])
|
'Update last batch idxs with new priority.'
| def update_last_batch(self, delta):
| self.priorities[self.last_batch] = np.abs(delta)
self.priorities[0:self.init_length] = np.max(self.priorities[self.init_length:])
|
'Optimizer for gradient descent ops.'
| def get_optimizer(self, learning_rate):
| return tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=0.0002)
|
'Gradient ops.'
| def training_ops(self, loss, learning_rate=None):
| opt = self.get_optimizer(learning_rate)
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if self.clip_norm:
(grads, global_norm) = tf.clip_by_global_norm(grads, self.clip_norm)
tf.summary.scalar('grad_global_norm', global_norm)
return opt.apply_gradients(zip(grads... |
'Get objective calculations.'
| def get(self, rewards, pads, values, final_values, log_probs, prev_log_probs, target_log_probs, entropies, logits):
| raise NotImplementedError()
|
'Exploration bonus.'
| def get_bonus(self, total_rewards, total_log_probs):
| return ((- self.tau) * total_log_probs)
|
'Exploration bonus.'
| def get_bonus(self, total_rewards, total_log_probs):
| discrepancy = ((total_rewards / self.tau) - total_log_probs)
normalized_d = (self.num_samples * tf.nn.softmax(discrepancy))
return (self.tau * normalized_d)
|
'Get RNN cell.'
| def get_cell(self):
| self.cell_input_dim = (self.internal_dim // 2)
cell = tf.contrib.rnn.LSTMCell(self.cell_input_dim, state_is_tuple=False, reuse=tf.get_variable_scope().reuse)
cell = tf.contrib.rnn.OutputProjectionWrapper(cell, self.output_dim, reuse=tf.get_variable_scope().reuse)
return cell
|
'Core neural network taking in inputs and outputting sampling
distribution parameters.'
| def core(self, obs, prev_internal_state, prev_actions):
| batch_size = tf.shape(obs[0])[0]
if (not self.recurrent):
prev_internal_state = tf.zeros([batch_size, self.rnn_state_dim])
cell = self.get_cell()
b = tf.get_variable('input_bias', [self.cell_input_dim], initializer=self.vector_init)
cell_input = tf.nn.bias_add(tf.zeros([batch_size, self.cell... |
'Sample an action from a distribution.'
| def sample_action(self, logits, sampling_dim, act_dim, act_type, greedy=False):
| if self.env_spec.is_discrete(act_type):
if greedy:
act = tf.argmax(logits, 1)
else:
act = tf.reshape(tf.multinomial(logits, 1), [(-1)])
elif self.env_spec.is_box(act_type):
means = logits[:, :(sampling_dim / 2)]
std = logits[:, (sampling_dim / 2):]
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.