in video-intelligence/analyze-person-detection.js [39:92]
async function detectPerson() {
const request = {
inputContent: inputContent,
features: ['PERSON_DETECTION'],
videoContext: {
personDetectionConfig: {
// Must set includeBoundingBoxes to true to get poses and attributes.
includeBoundingBoxes: true,
includePoseLandmarks: true,
includeAttributes: true,
},
},
};
// Detects faces in a video
// We get the first result because we only process 1 video
const [operation] = await video.annotateVideo(request);
const results = await operation.promise();
console.log('Waiting for operation to complete...');
// Gets annotations for video
const personAnnotations =
results[0].annotationResults[0].personDetectionAnnotations;
for (const {tracks} of personAnnotations) {
console.log('Person detected:');
for (const {segment, timestampedObjects} of tracks) {
console.log(
`\tStart: ${segment.startTimeOffset.seconds}` +
`.${(segment.startTimeOffset.nanos / 1e6).toFixed(0)}s`
);
console.log(
`\tEnd: ${segment.endTimeOffset.seconds}.` +
`${(segment.endTimeOffset.nanos / 1e6).toFixed(0)}s`
);
// Each segment includes timestamped objects that
// include characteristic--e.g. clothes, posture
// of the person detected.
const [firstTimestampedObject] = timestampedObjects;
// Attributes include unique pieces of clothing, poses (i.e., body
// landmarks) of the person detected.
for (const {name, value} of firstTimestampedObject.attributes) {
console.log(`\tAttribute: ${name}; Value: ${value}`);
}
// Landmarks in person detection include body parts.
for (const {name, point} of firstTimestampedObject.landmarks) {
console.log(`\tLandmark: ${name}; Vertex: ${point.x}, ${point.y}`);
}
}
}
}