conv_lstm_models.py (18 lines): - line 19: # TODO: add visibility feature - line 29: # TODO: check that the gating is learned to be 0 on the last layer, - line 145: # TODO Make the sizes line up with resnets... - line 208: self.conv1x1 = nn.Conv2d(self.nchannel, self.inp_embsize, 1) # TODO do that before trunk? - line 296: if self.with_z: # TODO replace by class decorator - line 316: self.conv1x1 = nn.Conv2d(self.nchannel, self.inp_embsize, 1) # TODO do that before trunk? - line 334: if self.with_z: # TODO replace by class decorator - line 344: self.zs = {} # TODO replace by LookUpTable - line 359: # TODO decoder that starts from input embedding (after first 1x1 Conv2d) - line 360: # TODO check input/output size in features/channels - line 361: # TODO try to remove border artifacts (borders are important!) - line 362: # TODO hierarchical deconv - line 595: self.encoder = None # TODO I hope this garbage collects. - line 596: # TODO actually do it with the nn.Module correspondong to midnets+midrnn - line 597: # TODO skip connections from midnets to corresponding decoder level. - line 640: # Each midnet runs a convolution + LSTM; TODO make it do multiple conv - line 697: # TODO Automaticall calculate padding - line 698: # TODO maybe do some pooling in the temporal convolution compare_models.py (18 lines): - line 55: # TODO Hungarian (scipy.optimize.linear_sum_assignment)? How do we deal with dead units? Right now hack to have an approx of the avg bias - line 134: # TODO a structured prediction (local and or global) type of loss? - line 135: # TODO a mass conservation (except where we have factories?) loss? - line 136: # TODO the loss/model should make you pay less for small (dx, dy) in prediction - line 188: "KL": th.nn.KLDivLoss # TODO (the target in training) - line 192: # TODO becareful about size_averaging and BPTT - line 223: # TODO Consistency loss between regression and classification heads? - line 304: with th.cuda.device(self.args.gpu): # TODO wrap everything? - line 363: # TODO: move other_metrics to self.state / init, but only once we stabilize what's in it - line 559: v = [v[0], v[1], 2 * v[0] * v[1] / (v[0] + v[1] + 1E-9)] # TODO F1 score after all averaging ok? - line 587: if args.save: # TODO save on other metrics? - line 597: if recent_mean_loss > 0.99 * older_mean_loss or self.args.small: # TODO with lr_decay only - line 598: with th.cuda.device(self.args.gpu): # TODO wrap everything? - line 607: # TODO plot debug outputs on the valid set - line 626: elif state.delta_full_info: # TODO add this option, TODO change test time! - line 827: # TODO remove this multiple optimizer stuff since we just create a new one here - line 929: # TODO bptt sampled with cut ~ Bernouilli(hyperparam) - line 941: #if args.loss == 'SoftMargin': // TODO KL featurizers/forward_conv2d_state_action_featurizer.cpp (3 lines): - line 85: //TODO do targets too - line 153: // Never true for forward models TODO Change for policy models - line 184: // Never true for forward models TODO Change for policy models data.py (3 lines): - line 19: # TODO Make the data loaders all generators - line 51: self._feeder_queue = queue.Queue() # TODO check - line 146: self._feeder_queue = queue.Queue() # TODO check conv_lstm_utils.py (1 line): - line 21: # TODO: featurizers/coarse_conv_featurizer_units.h (1 line): - line 17: // TODO: The ffbs data below is used for loading frames from buffers, and baseline.py (1 line): - line 36: "TODO: make the array an hdf5 instead if we want to reuse.") utils.py (1 line): - line 112: # TODO skip_frames >= combine_frames _ext.cpp (1 line): - line 123: // TODO: assert that features have the same size setup.py (1 line): - line 51: # TODO Dynamically search for this somehow??? reduce_data.py (1 line): - line 91: # TODO: Both reduced data and overall data may be compressed