torchrec/distributed/rw_sharding.py [172:190]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    EmbeddingSharding[
        SparseFeatures, torch.Tensor, SparseFeaturesList, List[torch.Tensor]
    ]
):
    """
    Shards embedding bags row-wise, i.e.. a given embedding table is evenly distributed
    by rows and table slices are placed on all ranks.
    """

    def __init__(
        self,
        embedding_configs: List[
            Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
        ],
        pg: dist.ProcessGroup,
        device: Optional[torch.device] = None,
        is_sequence: bool = False,
    ) -> None:
        super().__init__()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



torchrec/distributed/twrw_sharding.py [288:305]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    EmbeddingSharding[
        SparseFeatures, torch.Tensor, SparseFeaturesList, List[torch.Tensor]
    ]
):
    """
    Shards embedding bags table-wise then row-wise.
    """

    def __init__(
        self,
        embedding_configs: List[
            Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
        ],
        pg: dist.ProcessGroup,
        device: Optional[torch.device] = None,
        is_sequence: bool = False,
    ) -> None:
        super().__init__()
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



