in bench_cluster/communication/p2p.py [0:0]
def run_p2p(local_rank, trials, warmups, maxsize, async_op, bw_unit, scan, raw, dtype, mem_factor, debug=False):
# Prepare benchmark header
print_header(bw_unit, raw, 'p2p')
global_rank = dist.get_rank()
world_size = dist.get_world_size()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
if scan:
# Create list of message sizes
M_LIST = [2**p for p in range(1, maxsize)]
sync_all()
# loop over various tensor sizes
for M in M_LIST:
try:
mat = torch.ones(M, dtype=getattr(torch, dtype)).cuda(local_rank)
sync_all()
input = mat.mul_(float(global_rank))
del mat
torch.cuda.empty_cache()
except RuntimeError as e:
if 'out of memory' in str(e):
print_rank_0('WARNING: Ran out of GPU memory. Exiting comm op.')
sync_all()
break
else:
raise e
sync_all()
timed_p2p(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw)
else:
# Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor
# Don't need output tensor, so double mem_factor
elements_per_gpu = max_numel('p2p', getattr(torch, dtype), mem_factor * 2, local_rank)
try:
mat = torch.ones(elements_per_gpu, dtype=getattr(torch, dtype)).cuda(local_rank)
input = mat.mul_(float(global_rank))
except RuntimeError as e:
if 'out of memory' in str(e):
print_rank_0('WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!')
sync_all()
return
sync_all()
timed_p2p(input, start_event, end_event, warmups, trials, async_op, bw_unit, raw)