distributed_training/train_pytorch_single_maskrcnn.py [59:68]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
    from apex import amp
    use_amp = True
except ImportError:
    print('Use APEX for multi-precision via apex.amp')
    use_amp = False
use_apex_ddp = False

def test_and_exchange_map(tester, model, distributed):
    results = tester(model=model, distributed=distributed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



distributed_training/train_pytorch_smdataparallel_maskrcnn.py [57:71]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
    from apex import amp
    use_amp = True
except ImportError:
    print('Use APEX for multi-precision via apex.amp')
    use_amp = False
# try:
#     from apex.parallel import DistributedDataParallel as DDP
#     use_apex_ddp = True
# except ImportError:
#     print('Use APEX for better performance')
use_apex_ddp = False

def test_and_exchange_map(tester, model, distributed):
    results = tester(model=model, distributed=distributed)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



