def test_task_sequence()

in conv_split_awa.py [0:0]


def test_task_sequence(model, sess, test_data, all_task_labels, task, cross_validate_mode):
    """
    Snapshot the current performance
    """
    final_acc = np.zeros(model.num_tasks)
    test_set = 'test'
    if model.imp_method == 'A-GEM':
        logit_mask = np.zeros([model.num_tasks, model.total_classes])
    else:
        logit_mask = np.zeros(model.total_classes)

    for tt, labels in enumerate(all_task_labels):

        if tt > task:
            return final_acc

        samples_at_a_time = 10
        task_images, task_labels = load_task_specific_data(test_data, labels)
        global_class_indices = np.column_stack(np.nonzero(task_labels))
        logit_mask_offset = tt * TOTAL_CLASSES
        classes_adjusted_for_head = [cls + logit_mask_offset for cls in labels]
        logit_mask[:] = 0
        if model.imp_method == 'A-GEM':
            logit_mask[tt][classes_adjusted_for_head] = 1.0
            logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
        else:
            logit_mask[classes_adjusted_for_head] = 1.0
        acc = np.zeros(len(labels))
        final_train_labels = np.zeros([samples_at_a_time, model.total_classes])
        head_offset = tt * TOTAL_CLASSES

        for cli, cls in enumerate(labels):
            class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == cls][:,np.array([True, False])])
            class_indices = np.sort(class_indices, axis=None)
            task_test_images = task_images[class_indices]
            task_test_labels = task_labels[class_indices]
            total_test_samples = task_test_images.shape[0]
            total_corrects = 0
            if total_test_samples < samples_at_a_time:
                i = -1

            for i in range(total_test_samples/ samples_at_a_time):
                offset = i*samples_at_a_time
                final_train_labels[:, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+samples_at_a_time]
                feed_dict = {model.x: task_test_images[offset:offset+samples_at_a_time], 
                        model.y_: final_train_labels, 
                        model.keep_prob: 1.0, model.train_phase: False}
                if model.imp_method == 'A-GEM':
                    feed_dict.update(logit_mask_dict)
                    total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
                else:
                    feed_dict[model.output_mask] = logit_mask
                    total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
            # Compute the corrects on residuals
            offset = (i+1)*samples_at_a_time
            num_residuals = total_test_samples % samples_at_a_time
            final_train_labels[:num_residuals, head_offset:head_offset+TOTAL_CLASSES] = task_test_labels[offset:offset+num_residuals]
            feed_dict = {model.x: task_test_images[offset:offset+num_residuals], 
                    model.y_: final_train_labels[:num_residuals], 
                    model.keep_prob: 1.0, model.train_phase: False}
            if model.imp_method == 'A-GEM':
                feed_dict.update(logit_mask_dict)
                total_corrects += np.sum(sess.run(model.correct_predictions[tt], feed_dict=feed_dict))
            else:
                feed_dict[model.output_mask] = logit_mask
                total_corrects += np.sum(sess.run(model.correct_predictions, feed_dict=feed_dict))
            # Accuracy
            if total_test_samples != 0:
                acc[cli] = total_corrects/ float(total_test_samples)

        final_acc[tt] = np.mean(acc)
    
    return final_acc