def _dataset_parser()

in source_directory/training/training_script.py [0:0]


def _dataset_parser(value):
    
    # create a dictionary describing the features    
    sample_feature_description = {
        'image': tf.io.FixedLenFeature([], tf.string),
        'label': tf.io.FixedLenFeature([], tf.int64),
    }

    # parse to tf
    example = tf.io.parse_single_example(value, sample_feature_description)
    
    # decode from bytes to tf types
    # NOTE: example key must match the name of the Input layer in the keras model
    example['image'] = tf.io.decode_raw(example['image'], tf.uint8)
    example['image'] = tf.reshape(example['image'], (32,32,3))
    
    # preprocess for resnset
    # see https://www.tensorflow.org/api_docs/python/tf/keras/applications/resnet_v2/preprocess_input
    example['image'] = tf.cast(example['image'], tf.float32)
    example['image'] = tf.keras.applications.resnet_v2.preprocess_input(example['image'])
    
    # parse for input to neural network and loss function
    sample_data = {'image_input': example['image']}

    label = tf.cast(example['label'], tf.int32)
    label = tf.one_hot(indices=label, depth=10)
    
    return sample_data, label