Summary: 49 instances, 45 unique Text Count # TODO Automaticall calculate padding 1 // TODO: assert that features have the same size 1 // TODO: The ffbs data below is used for loading frames from buffers, and 1 # TODO: check that the gating is learned to be 0 on the last layer, 1 # TODO maybe do some pooling in the temporal convolution 1 # TODO Hungarian (scipy.optimize.linear_sum_assignment)? How do we deal with dead units? Right now hack to have an approx of the avg bias 1 if recent_mean_loss > 0.99 * older_mean_loss or self.args.small: # TODO with lr_decay only 1 # TODO a mass conservation (except where we have factories?) loss? 1 self.zs = {} # TODO replace by LookUpTable 1 "KL": th.nn.KLDivLoss # TODO (the target in training) 1 //TODO do targets too 1 # Each midnet runs a convolution + LSTM; TODO make it do multiple conv 1 # TODO hierarchical deconv 1 // Never true for forward models TODO Change for policy models 2 # TODO bptt sampled with cut ~ Bernouilli(hyperparam) 1 # TODO becareful about size_averaging and BPTT 1 # TODO: add visibility feature 1 # TODO skip_frames >= combine_frames 1 elif state.delta_full_info: # TODO add this option, TODO change test time! 1 self.encoder = None # TODO I hope this garbage collects. 1 # TODO check input/output size in features/channels 1 with th.cuda.device(self.args.gpu): # TODO wrap everything? 1 # TODO: 1 self.conv1x1 = nn.Conv2d(self.nchannel, self.inp_embsize, 1) # TODO do that before trunk? 2 # TODO decoder that starts from input embedding (after first 1x1 Conv2d) 1 # TODO Dynamically search for this somehow??? 1 v = [v[0], v[1], 2 * v[0] * v[1] / (v[0] + v[1] + 1E-9)] # TODO F1 score after all averaging ok? 1 # TODO skip connections from midnets to corresponding decoder level. 1 # TODO Make the data loaders all generators 1 self._feeder_queue = queue.Queue() # TODO check 2 with th.cuda.device(self.args.gpu): # TODO wrap everything? 1 if self.with_z: # TODO replace by class decorator 2 # TODO actually do it with the nn.Module correspondong to midnets+midrnn 1 #if args.loss == 'SoftMargin': // TODO KL 1 # TODO Consistency loss between regression and classification heads? 1 # TODO a structured prediction (local and or global) type of loss? 1 # TODO Make the sizes line up with resnets... 1 # TODO try to remove border artifacts (borders are important!) 1 # TODO: move other_metrics to self.state / init, but only once we stabilize what's in it 1 # TODO plot debug outputs on the valid set 1 if args.save: # TODO save on other metrics? 1 # TODO: Both reduced data and overall data may be compressed 1 "TODO: make the array an hdf5 instead if we want to reuse.") 1 # TODO the loss/model should make you pay less for small (dx, dy) in prediction 1 # TODO remove this multiple optimizer stuff since we just create a new one here 1