in src/rekognition/index-faces/storage.py [0:0]
def convert_to_item(self, face_metadata:FaceMetadata, face_id:str) -> dict:
'''
Encodes this object as Amazon DyanmoDB Item.
'''
assert face_metadata is not None, "faceMetadata not specified"
assert face_id is not None, "face_id not specified"
item = {
'PartitionKey': {'S': 'User::{}'.format(face_metadata.user_id.lower())},
'SortKey': {'S': 'Face::{}'.format(face_id.lower()) },
#'image': {'B': str(b64encode(face_metadata.image_bytes),encoding='utf-8') },
'property_bag': {'M': FaceMetadata.ddb_encode_dict(face_metadata.property_bag) }
}
'''
Default behavior is to store all data in DynamoDB.
This strategy provides more consistent response times to end-users.
'''
if not self.image_bucket_enabled:
item['image'] = {'B': str(b64encode(face_metadata.image_bytes),encoding='utf-8') }
return item
'''
Alternatively customers can place the images into an S3 bucket
This strategy is potentially more cost-efficient with longer latest byte retrieval times
'''
key = '{}{}/{}.bin'.format(self.prefix, face_metadata.user_id, face_id)
self.s3_client.put_object(
Bucket=self.bucket_name,
Key=key,
Body = face_metadata.image_bytes,
Tagging="Indexed=True")
'''
Update the item to point at the key
'''
item['s3_uri'] = {'S': 's3://{}/{}'.format(self.bucket_name, key)}
return item