torchrec/distributed/planner/shard_estimators.py [223:248]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    global_batch_size: float,
    input_lengths: List[float],
    emb_dim: int,
    input_data_type_size: float,
    output_data_type_size: float,
    device_bw: float,
    bw_inter_host: int,
) -> Tuple[float, float, float]:
    input_perf = (
        global_batch_size * sum(input_lengths) * input_data_type_size / bw_inter_host
    )
    compute_perf = (
        global_batch_size
        * sum(input_lengths)
        * emb_dim
        * output_data_type_size
        / device_bw
    )
    output_perf = (
        global_batch_size
        * emb_dim
        * len(input_lengths)
        * output_data_type_size
        / bw_inter_host
    )
    return (input_perf, compute_perf, output_perf)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



torchrec/distributed/planner/shard_estimators.py [252:277]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    global_batch_size: float,
    input_lengths: List[float],
    emb_dim: int,
    input_data_type_size: float,
    output_data_type_size: float,
    device_bw: float,
    bw_inter_host: int,
) -> Tuple[float, float, float]:
    input_perf = (
        global_batch_size * sum(input_lengths) * input_data_type_size / bw_inter_host
    )
    compute_perf = (
        global_batch_size
        * sum(input_lengths)
        * emb_dim
        * output_data_type_size
        / device_bw
    )
    output_perf = (
        global_batch_size
        * emb_dim
        * len(input_lengths)
        * output_data_type_size
        / bw_inter_host
    )
    return (input_perf, compute_perf, output_perf)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



