def add_inference()

in scripts/tf_cnn_benchmarks/models/tf1_only/ssd_model.py [0:0]


  def add_inference(self, cnn):
    cnn.use_batch_norm = True
    cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,
                             'epsilon': ssd_constants.BATCH_NORM_EPSILON,
                             'scale': True}

    with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):
      self.add_backbone_model(cnn)

    # --------------------------------------------------------------------------
    # SSD additional layers
    # --------------------------------------------------------------------------

    def add_ssd_layer(cnn, depth, k_size, stride, mode):
      return cnn.conv(
          depth,
          k_size,
          k_size,
          stride,
          stride,
          mode=mode,
          use_batch_norm=False,
          kernel_initializer=contrib_layers.xavier_initializer())

    # Activations for feature maps of different layers
    self.activations = [cnn.top_layer]
    # Conv7_1, Conv7_2
    # Input 38x38, output 19x19
    add_ssd_layer(cnn, 256, 1, 1, 'valid')
    self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))

    # Conv8_1, Conv8_2
    # Input 19x19, output 10x10
    add_ssd_layer(cnn, 256, 1, 1, 'valid')
    self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))

    # Conv9_1, Conv9_2
    # Input 10x10, output 5x5
    add_ssd_layer(cnn, 128, 1, 1, 'valid')
    self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))

    # Conv10_1, Conv10_2
    # Input 5x5, output 3x3
    add_ssd_layer(cnn, 128, 1, 1, 'valid')
    self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))

    # Conv11_1, Conv11_2
    # Input 3x3, output 1x1
    add_ssd_layer(cnn, 128, 1, 1, 'valid')
    self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))

    self.loc = []
    self.conf = []

    for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):
      l = cnn.conv(
          nd * 4,
          3,
          3,
          1,
          1,
          input_layer=ac,
          num_channels_in=oc,
          activation=None,
          use_batch_norm=False,
          kernel_initializer=contrib_layers.xavier_initializer())
      scale = l.get_shape()[-1]
      # shape = [batch_size, nd * 4, scale, scale]
      l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])
      # shape = [batch_size, nd, 4, scale, scale]
      l = tf.transpose(l, [0, 1, 3, 4, 2])
      # shape = [batch_size, nd, scale, scale, 4]
      self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))
      # shape = [batch_size, nd * scale * scale, 4]

      c = cnn.conv(
          nd * self.label_num,
          3,
          3,
          1,
          1,
          input_layer=ac,
          num_channels_in=oc,
          activation=None,
          use_batch_norm=False,
          kernel_initializer=contrib_layers.xavier_initializer())
      # shape = [batch_size, nd * label_num, scale, scale]
      c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])
      # shape = [batch_size, nd, label_num, scale, scale]
      c = tf.transpose(c, [0, 1, 3, 4, 2])
      # shape = [batch_size, nd, scale, scale, label_num]
      self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))
      # shape = [batch_size, nd * scale * scale, label_num]

    # Shape of locs: [batch_size, NUM_SSD_BOXES, 4]
    # Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]
    locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)

    # Pack location and confidence outputs into a single output layer
    # Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]
    logits = tf.concat([locs, confs], 2)

    cnn.top_layer = logits
    cnn.top_size = 4 + self.label_num

    return cnn.top_layer