varifocal/code/sync_defocus_from_rgbd_fast/train/train-lvf-c.py [146:257]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fwConstant = 3
    fnumConstant = 128 
    fw = np.full((L), fwConstant, dtype=int)  # filter width
    fnum = np.append(np.full((L-1), fnumConstant, dtype=int), OUTPUT_DIM*(INTERLEAVE_RATE**2)) # output channels at each layer

    layers_params = {}
    layers = {}
    prev_layers = {}

    for i in range(0, L):    
        if i==0: # first layer
           in_dim, out_dim = INPUT_DIM*(INTERLEAVE_RATE**2), fnum[i]
        elif i==L-1: # last layer
           in_dim, out_dim = fnum[i-1], OUTPUT_DIM*(INTERLEAVE_RATE**2)
        else:
           in_dim, out_dim = fnum[i-1], fnum[i]  

        layers_params[i] = {'weights':init_weights([fw[i],fw[i],in_dim, out_dim], 'xavier',xavier_params=(in_dim, out_dim),r=weightVarScale),
                     'bias':tf.Variable(tf.truncated_normal([out_dim],stddev=bias_stddev))}

    # build layers::
    print("input data:", x_in.shape)
    if RENORMALIZE_INPUT:
       x_in = (x_in - 0.5*RENORM_SCALE)

    for i in range(0, L):
        if i==0:
            prev_layers[i] = x_in   
        elif (i<3) or (i%2==0): 
            prev_layers[i] = layers[i-1]
        else: 
            prev_layers[i] = layers[i-1] + layers[i-3]
            print('(skip connection: %d, %d)'%(i-1, i-3))
            
        if i==L-1: # last layer
           layers[i] = tf.nn.tanh(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        else:
           layers[i] = activationFunc(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        
        print("layer %d:" % i, layers[i].shape)
             
    # renormalize to desired scale    
    x_out = tf.add(0.5, tf.scalar_mul(0.5, layers[L-1]), name='x_out')     
    print("output tensor:", x_out.shape)

    return deinterleave(INTERLEAVE_RATE, x_out)

# construct model
model_op =  model(X)

# target, prediction
labels, predictions = Y, model_op

# shared
rmse_intensity = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
psnr_intensity = 20*log10(RENORM_SCALE) - 10*log10(rmse_intensity)
ssim_intensity = tf.reduce_mean(tf.image.ssim(tf.transpose(labels, [0,2,3,1]), tf.transpose(predictions, [0,2,3,1]), max_val = 1.0))

if USE_LOG_L2_BOTH: 
   labels_dx, labels_dy = calImageGradients(labels)
   preds_dx, preds_dy = calImageGradients(predictions)
   rmse_grad_x, rmse_grad_y = tf.losses.mean_squared_error(labels=labels_dx, predictions=preds_dx), tf.losses.mean_squared_error(labels=labels_dy, predictions=preds_dy)
   psnr_grad_x, psnr_grad_y = -20*log10(RENORM_SCALE) + 10*log10(rmse_grad_x), -20*log10(RENORM_SCALE) + 10*log10(rmse_grad_y)
   loss = -psnr_intensity + 0.5*(psnr_grad_x + psnr_grad_y)

elif USE_LOG_L1_BOTH:
   log_diff_intensity = log10(tf.reduce_mean(tf.abs(labels-predictions))) 
   labels_dx, labels_dy = calImageGradients(labels)
   preds_dx, preds_dy = calImageGradients(predictions)
   log_diff_grad_x = log10(tf.reduce_mean(tf.abs(labels_dx-preds_dx)))
   log_diff_grad_y = log10(tf.reduce_mean(tf.abs(labels_dy-preds_dy)))
   loss = log_diff_intensity + 0.5*(log_diff_grad_x + log_diff_grad_y)   

# training optimizer
optimizer =  tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize(loss=loss)
grads_and_vars = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=0.9, beta2=0.999, epsilon=1e-8).compute_gradients(loss, tf.trainable_variables()) 

# set up saver
saver = tf.train.Saver(max_to_keep=5)

# initialization
init = tf.global_variables_initializer()

# create a summary to monitor cost tensor
training_summary  = tf.summary.scalar("training loss", loss, family=TF_SUMMARY_FLAG)
validation_summary  = tf.summary.scalar("validation loss", loss, family=TF_SUMMARY_FLAG)

# start training
with tf.Session() as sess:       
    # run the initialization
    sess.run(init)  
    
    # op to write logs to Tensorboard
    summary_writer = tf.summary.FileWriter(LOGS_DIR, graph=tf.get_default_graph())
    
    # restore model if trained 
    if RESTORE_TRAINED_MODEL or MODE!="TRAIN":
        ckpt = tf.train.get_checkpoint_state(RESTORE_LOGS_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, tf.train.latest_checkpoint(RESTORE_LOGS_DIR))
            print("model restored from %s" % RESTORE_LOGS_DIR)
        else:
            input("ERROR: NO RESTORED MODEL...")

    min_ell_eval_mean = []

    #-------------------------------------------------------------------------------
    # training
    if MODE == "TRAIN":
        print("training mode")
        for e in range(0, NUM_EPOCHS):
            print("--training epoch:", e)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



varifocal/code/sync_focalstack_from_rgbd/train/train-lfs.py [146:257]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    fwConstant = 3
    fnumConstant = 128 
    fw = np.full((L), fwConstant, dtype=int)  # filter width
    fnum = np.append(np.full((L-1), fnumConstant, dtype=int), OUTPUT_DIM*(INTERLEAVE_RATE**2)) # output channels at each layer

    layers_params = {}
    layers = {}
    prev_layers = {}

    for i in range(0, L):    
        if i==0: # first layer
           in_dim, out_dim = INPUT_DIM*(INTERLEAVE_RATE**2), fnum[i]
        elif i==L-1: # last layer
           in_dim, out_dim = fnum[i-1], OUTPUT_DIM*(INTERLEAVE_RATE**2)
        else:
           in_dim, out_dim = fnum[i-1], fnum[i]  

        layers_params[i] = {'weights':init_weights([fw[i],fw[i],in_dim, out_dim], 'xavier',xavier_params=(in_dim, out_dim),r=weightVarScale),
                     'bias':tf.Variable(tf.truncated_normal([out_dim],stddev=bias_stddev))}

    # build layers::
    print("input data:", x_in.shape)
    if RENORMALIZE_INPUT:
       x_in = (x_in - 0.5*RENORM_SCALE)

    for i in range(0, L):
        if i==0:
            prev_layers[i] = x_in   
        elif (i<3) or (i%2==0): 
            prev_layers[i] = layers[i-1]
        else: 
            prev_layers[i] = layers[i-1] + layers[i-3]
            print('(skip connection: %d, %d)'%(i-1, i-3))
            
        if i==L-1: # last layer
           layers[i] = tf.nn.tanh(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        else:
           layers[i] = activationFunc(tf.layers.batch_normalization(tf.nn.bias_add(tf.nn.conv2d(prev_layers[i],layers_params[i]['weights'],strides=[1,1,1,1], padding='SAME', data_format='NCHW'), layers_params[i]['bias'], data_format='NCHW'),axis=1))    
        
        print("layer %d:" % i, layers[i].shape)
             
    # renormalize to desired scale    
    x_out = tf.add(0.5, tf.scalar_mul(0.5, layers[L-1]), name='x_out')     
    print("output tensor:", x_out.shape)

    return deinterleave(INTERLEAVE_RATE, x_out)

# construct model
model_op =  model(X)

# target, prediction
labels, predictions = Y, model_op

# shared
rmse_intensity = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
psnr_intensity = 20*log10(RENORM_SCALE) - 10*log10(rmse_intensity)
ssim_intensity = tf.reduce_mean(tf.image.ssim(tf.transpose(labels, [0,2,3,1]), tf.transpose(predictions, [0,2,3,1]), max_val = 1.0))

if USE_LOG_L2_BOTH: 
   labels_dx, labels_dy = calImageGradients(labels)
   preds_dx, preds_dy = calImageGradients(predictions)
   rmse_grad_x, rmse_grad_y = tf.losses.mean_squared_error(labels=labels_dx, predictions=preds_dx), tf.losses.mean_squared_error(labels=labels_dy, predictions=preds_dy)
   psnr_grad_x, psnr_grad_y = -20*log10(RENORM_SCALE) + 10*log10(rmse_grad_x), -20*log10(RENORM_SCALE) + 10*log10(rmse_grad_y)
   loss = -psnr_intensity + 0.5*(psnr_grad_x + psnr_grad_y)

elif USE_LOG_L1_BOTH:
   log_diff_intensity = log10(tf.reduce_mean(tf.abs(labels-predictions))) 
   labels_dx, labels_dy = calImageGradients(labels)
   preds_dx, preds_dy = calImageGradients(predictions)
   log_diff_grad_x = log10(tf.reduce_mean(tf.abs(labels_dx-preds_dx)))
   log_diff_grad_y = log10(tf.reduce_mean(tf.abs(labels_dy-preds_dy)))
   loss = log_diff_intensity + 0.5*(log_diff_grad_x + log_diff_grad_y) 
      
# training optimizer
optimizer =  tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=0.9, beta2=0.999, epsilon=1e-8).minimize(loss=loss)
grads_and_vars = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=0.9, beta2=0.999, epsilon=1e-8).compute_gradients(loss, tf.trainable_variables()) 

# set up saver
saver = tf.train.Saver(max_to_keep=5)

# initialization
init = tf.global_variables_initializer()

# create a summary to monitor cost tensor
training_summary  = tf.summary.scalar("training loss", loss, family=TF_SUMMARY_FLAG)
validation_summary  = tf.summary.scalar("validation loss", loss, family=TF_SUMMARY_FLAG)

# start training
with tf.Session() as sess:       
    # run the initialization
    sess.run(init)  
    
    # op to write logs to Tensorboard
    summary_writer = tf.summary.FileWriter(LOGS_DIR, graph=tf.get_default_graph())
    
    # restore model if trained 
    if RESTORE_TRAINED_MODEL or MODE!="TRAIN":
        ckpt = tf.train.get_checkpoint_state(RESTORE_LOGS_DIR)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, tf.train.latest_checkpoint(RESTORE_LOGS_DIR))
            print("model restored from %s" % RESTORE_LOGS_DIR)
        else:
            input("ERROR: NO RESTORED MODEL...")

    min_ell_eval_mean = []

    #-------------------------------------------------------------------------------
    # training
    if MODE == "TRAIN":
        print("training mode")
        for e in range(0, NUM_EPOCHS):
            print("--training epoch:", e)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



