in source/containers/face-comparison/recognizer/predictor.py [0:0]
def predict(cls, source_image_base64, target_image_base64, min_confidence_thresh=0.40):
source_image = cv2.imdecode(np.frombuffer(base64.b64decode(source_image_base64), np.uint8), cv2.IMREAD_COLOR)
target_image = cv2.imdecode(np.frombuffer(base64.b64decode(target_image_base64), np.uint8), cv2.IMREAD_COLOR)
t1 = time.time()
source_detected_face = cls.detect_and_align(source_image, is_source_image=True, threshold=min_confidence_thresh)
target_detected_faces = cls.detect_and_align(target_image, is_source_image=False, threshold=min_confidence_thresh)
t2 = time.time()
print('Time Cost of Face Detecting & Aligning for 2 Images = {} seconds'.format(t2 - t1))
response = {
'SourceImageFace': None,
'FaceMatches': []
}
if source_detected_face is not None:
[x_min, y_min, x_max, y_max] = source_detected_face.bbox
response['SourceImageFace'] = {
'BoundingBox': [x_min, y_min, x_max, y_max],
'Confidence': source_detected_face.confidence,
'KeyPoints': source_detected_face.key_points
}
else:
return response
for target_comp_face in target_detected_faces:
base_feat_representation = cls.get_feature(source_detected_face.aligned_face_img)
target_feat_representation = cls.get_feature(target_comp_face.aligned_face_img)
similarity_score = np.dot(base_feat_representation, target_feat_representation)
# add comparison to response body
[x_min, y_min, x_max, y_max] = target_comp_face.bbox
response['FaceMatches'].append({
'Similarity': float(similarity_score),
'Face': {
'BoundingBox': [x_min, y_min, x_max, y_max],
'Confidence': target_comp_face.confidence,
'KeyPoints': target_comp_face.key_points
}
})
return response