def main()

in jetson_inference/artifacts/aws.greengrass.JetsonDLRImageClassification/1.0.0/jetson_inference.py [0:0]


def main():
    jetsonGPU = 0
    print("argv[0]=",sys.argv[0])
    print("argv[1]=",sys.argv[1])
    print("argv[2]=",sys.argv[2])
    print("argv[3]=",sys.argv[3])
    models = demjson.decode(sys.argv[3])
    print("models=",str(models))
    print("argv[4]=",sys.argv[4])
    print("argv[5]=",sys.argv[5])
    print("argv[6]=",sys.argv[6])
    hostname = os.getenv("AWS_GG_NUCLEUS_DOMAIN_SOCKET_FILEPATH_FOR_COMPONENT")
    print("hostname=", hostname)
    print("env vars:", os.environ)
    print("svcid=", os.getenv("SVCUID"))
    if path.exists('/sys/module/tegra_fuse/parameters/tegra_chip_id'):
       jetsonGPU = Path('/sys/module/tegra_fuse/parameters/tegra_chip_id').read_text().rstrip()
       print("Found Jetson GPU id:",str(jetsonGPU))
       currentPath = sys.argv[0][:sys.argv[0].rindex("/")]
       print("Current path:",currentPath)
       inferenceCall = f"TVM_TENSORRT_CACHE_DIR=/tmp python3 {currentPath}/inference.py -a {sys.argv[1]} -m {sys.argv[2]}/resnet18_v1-jetson/{models[str(jetsonGPU)]} -p {sys.argv[4]} -i {sys.argv[5]} -s {sys.argv[6]}"
       print("Calling inference:",inferenceCall)
       p = subprocess.Popen(inferenceCall, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
       while p.poll() is None:
          line = p.stdout.readline()
          print(line)
          line = p.stderr.readline()
          print(line)
    else:
       print("Cannot identify Jetson device! Falling existing. This is intended only for NVIDIA Jetson Devices.")