in jetson_inference/artifacts/aws.greengrass.JetsonDLRImageClassification/1.0.0/inference.py [0:0]
def predict_from_cam():
if camera is None:
print("Unable to support camera")
return
if platform.machine() == "armv7l": # RaspBerry Pi
stream = io.BytesIO()
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='jpeg')
# Construct a numpy array from the stream
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, preserving colour
cvimage = cv2.imdecode(data, 1)
elif platform.machine() == "aarch64": # Nvidia Jetson TX
if camera.isOpened():
ret, cvimage = camera.read()
cv2.destroyAllWindows()
else:
raise RuntimeError("Cannot open the camera")
elif platform.machine() == "x86_64": # Deeplens
ret, cvimage = camera.getLastFrame()
if ret == False:
raise RuntimeError("Failed to get frame from the stream")
return predict_from_image(cvimage)