def model()

in multifocal/code/multifocal_decomposition_from_rgbd_fast/train/train-lmf-rgbd-c.py [0:0]


def model(x_in):
    # layer parameters::
    L = 8 # number of layers 
    fwConstant = 3
    fnumConstant = 128
    fw = np.full((L), fwConstant, dtype=int)
    fnum = np.append(np.full((L-1), fnumConstant, dtype=int), OUTPUT_DIM*(INTERLEAVE_RATE**2))

    layers_params = {}
    layers = {}
    prev_layers = {}


    for i in range(0, L):    
        if i==0: # first layer
           in_dim, out_dim = INPUT_DIM*(INTERLEAVE_RATE**2), fnum[i]
        elif i==L-1: # last layer
           in_dim, out_dim = fnum[i-1], OUTPUT_DIM*(INTERLEAVE_RATE**2)
        else:
           in_dim, out_dim = fnum[i-1], fnum[i]  

        layers_params[i] = {'weights':init_weights([fw[i],fw[i],in_dim, out_dim], 'xavier',xavier_params=(in_dim, out_dim),r=weightVarScale),
                     'bias':tf.Variable(tf.truncated_normal([out_dim],stddev=bias_stddev))}

    # build layers::
    print("input data:", x_in.shape)
    if RENORMALIZE_INPUT:
       x_in = (x_in - 0.5*RENORM_SCALE) 

    for i in range(0, L):
        if i==0:
            prev_layers[i] = x_in   
        elif (i<3) or (i%2==0): 
            prev_layers[i] = layers[i-1]
        else: 
            prev_layers[i] = layers[i-1] + layers[i-3]
            print('(low scale: skip connection: %d, %d)'%(i-1, i-3))

        # remove optional skip connection to further reduce runtime
        # with skip connection the training could be improved

        if i==L-1:
           layers[i] = tf.tanh(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        else:
           layers[i] = activationFunc(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        print("layer %d:" % i, layers[i].shape)
             
    # renormalize to desired scale    
    x_out = tf.add(0.5, tf.scalar_mul(0.5, layers[L-1]), name='x_out')     
    print("output tensor:", x_out.shape)

    return deinterleave(INTERLEAVE_RATE, x_out)