in step8_pipeline_parallel_1f1b/pipeline_parallel.py [0:0]
def train_step_pipeline_afab(model, data_loader, tensor_shapes, device, dtype):
"""
Executes a training step using Activation Forward - Activation Backward (AFAB) pipeline parallelism.
Implements separate forward and backward passes to optimize memory usage.
"""
logging_loss: torch.float32 = 0.0
input_tensors, output_tensors = [], []
requires_grad_sync = pgm.process_group_manager.dp_world_size > 1
# === All Forward Pass Phase ===
for _ in range(data_loader.grad_acc_steps):
input_tensor = pipeline_communicate(operation='recv_forward', shapes=tensor_shapes, device=device, dtype=dtype)
batch = next(data_loader)
batch["hidden_states"] = input_tensor.to(device) if input_tensor is not None else input_tensor
output_tensor = model.forward(input_ids=batch["input_ids"].to(device), position_ids=batch["position_ids"].to(device), hidden_states=batch["hidden_states"])
pipeline_communicate(operation='send_forward', tensor=output_tensor, device=device, dtype=dtype)
# calculate loss on the last stage
if pgm.process_group_manager.pp_is_last_stage:
output_tensor = F.cross_entropy(output_tensor.transpose(1, 2), batch["target_ids"].to(device), reduction='mean')
logging_loss += output_tensor.item() / data_loader.grad_acc_steps
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
# === All Backward Pass Phase ===
for ith_microbatch in range(data_loader.grad_acc_steps):
if requires_grad_sync:
is_last_iteration = (ith_microbatch == data_loader.grad_acc_steps - 1)
model.require_backward_grad_sync = is_last_iteration
output_tensor_grad = pipeline_communicate(operation='recv_backward', shapes=tensor_shapes, device=device, dtype=dtype)
input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0)
input_tensor_grad = model.backward(input_tensor, output_tensor, output_tensor_grad)
pipeline_communicate(operation='send_backward', tensor=input_tensor_grad, device=device, dtype=dtype)
return logging_loss