in code_examples/dotnet_examples/image/net-detect-faces.cs [11:54]
public static void Example()
{
String photo = "input.jpg";
String bucket = "bucket";
AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient();
DetectFacesRequest detectFacesRequest = new DetectFacesRequest()
{
Image = new Image()
{
S3Object = new S3Object()
{
Name = photo,
Bucket = bucket
},
},
// Attributes can be "ALL" or "DEFAULT".
// "DEFAULT": BoundingBox, Confidence, Landmarks, Pose, and Quality.
// "ALL": See https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Rekognition/TFaceDetail.html
Attributes = new List<String>() { "ALL" }
};
try
{
DetectFacesResponse detectFacesResponse = rekognitionClient.DetectFaces(detectFacesRequest);
bool hasAll = detectFacesRequest.Attributes.Contains("ALL");
foreach(FaceDetail face in detectFacesResponse.FaceDetails)
{
Console.WriteLine("BoundingBox: top={0} left={1} width={2} height={3}", face.BoundingBox.Left,
face.BoundingBox.Top, face.BoundingBox.Width, face.BoundingBox.Height);
Console.WriteLine("Confidence: {0}\nLandmarks: {1}\nPose: pitch={2} roll={3} yaw={4}\nQuality: {5}",
face.Confidence, face.Landmarks.Count, face.Pose.Pitch,
face.Pose.Roll, face.Pose.Yaw, face.Quality);
if (hasAll)
Console.WriteLine("The detected face is estimated to be between " +
face.AgeRange.Low + " and " + face.AgeRange.High + " years old.");
}
}
catch (Exception e)
{
Console.WriteLine(e.Message);
}
}