The distribution of size of files (measured in lines of code).
File | # lines | # units |
---|---|---|
__init__.pyi in stubs/torch |
1856 | 1232 |
fully_sharded_data_parallel.py in fairscale/nn/data_parallel |
1260 | 74 |
distributed.py in fairscale/experimental/nn/data_parallel/gossip |
758 | 47 |
experimental_async_approaches.py in benchmarks/experimental |
597 | 48 |
testing.py in fairscale/utils |
495 | 35 |
layer_memory_tracker.py in fairscale/experimental/tooling |
465 | 49 |
async_schedule.py in fairscale/nn/pipe |
364 | 24 |
sharded_ddp.py in fairscale/nn/data_parallel |
352 | 20 |
oss.py in fairscale/optim |
339 | 19 |
offload.py in benchmarks/experimental |
328 | 20 |
mevo.py in fairscale/experimental/nn |
317 | 24 |
ampnet.py in fairscale/experimental/nn/ampnet_pipe |
303 | 13 |
flatten_params_wrapper.py in fairscale/nn/misc |
303 | 29 |
offload.py in fairscale/experimental/nn |
281 | 18 |
oss.py in benchmarks |
254 | 6 |
grad_scaler.py in fairscale/optim |
247 | 12 |
fused_adam_cuda_kernel.cu in fairscale/clib/fused_adam_cuda |
246 | - |
pipe.py in benchmarks |
235 | 13 |
adascale.py in fairscale/optim |
226 | 23 |
async_pipe.py in fairscale/nn/pipe |
225 | 15 |
layers.py in fairscale/nn/model_parallel |
225 | 11 |
rpc.py in fairscale/nn/pipe |
200 | 18 |
graph_manager.py in fairscale/experimental/nn/data_parallel/gossip |
197 | 45 |
ssd_offload.py in fairscale/experimental/nn |
197 | 24 |
adam.py in fairscale/optim |
192 | 10 |
pipe.py in fairscale/nn/pipe |
182 | 13 |
gossiper.py in fairscale/experimental/nn/data_parallel/gossip |
181 | 13 |
skippable.py in fairscale/nn/pipe/skip |
179 | 13 |
partition_handler.py in fairscale/experimental/nn/distributed_pipeline |
178 | 15 |
functional.pyi in stubs/torch/nn |
177 | 80 |
checkpoint_activations.py in fairscale/nn/checkpoint |
167 | 14 |
checkpoint.py in fairscale/nn/pipe |
165 | 16 |
pooling.pyi in stubs/torch/nn/modules |
147 | 50 |
param_bucket.py in fairscale/nn/misc |
145 | 19 |
layerwise_gradient_scaler.py in fairscale/optim |
145 | 13 |
pipeline.py in fairscale/nn/pipe |
143 | 11 |
fsdp_optim_utils.py in fairscale/nn/data_parallel |
137 | 6 |
sync_batchnorm.py in fairscale/experimental/nn |
133 | 8 |
dynamic_loss_scaler.py in fairscale/experimental/optim |
124 | 11 |
utils.py in benchmarks |
124 | 10 |
auto_shard.py in fairscale/experimental/nn |
121 | 7 |
portal.py in fairscale/nn/pipe/skip |
119 | 16 |
activation.pyi in stubs/torch/nn/modules |
117 | 62 |
pipeline.py in fairscale/experimental/nn/distributed_pipeline |
117 | 8 |
messages.py in fairscale/nn/pipe |
115 | 16 |
transformer_lm.py in benchmarks/models |
115 | 15 |
graph.py in fairscale/experimental/nn/distributed_pipeline |
114 | 13 |
random.py in fairscale/nn/model_parallel |
114 | 12 |
multi_tensor_apply.cuh in fairscale/clib/fused_adam_cuda |
111 | - |
auto_wrap.py in fairscale/nn/wrap |
108 | 11 |
File | # lines | # units |
---|---|---|
__init__.pyi in stubs/torch |
1856 | 1232 |
functional.pyi in stubs/torch/nn |
177 | 80 |
fully_sharded_data_parallel.py in fairscale/nn/data_parallel |
1260 | 74 |
activation.pyi in stubs/torch/nn/modules |
117 | 62 |
container.pyi in stubs/torch/nn/modules |
81 | 59 |
loss.pyi in stubs/torch/nn/modules |
100 | 57 |
pooling.pyi in stubs/torch/nn/modules |
147 | 50 |
layer_memory_tracker.py in fairscale/experimental/tooling |
465 | 49 |
experimental_async_approaches.py in benchmarks/experimental |
597 | 48 |
distributed.py in fairscale/experimental/nn/data_parallel/gossip |
758 | 47 |
graph_manager.py in fairscale/experimental/nn/data_parallel/gossip |
197 | 45 |
__init__.pyi in stubs/torch/cuda |
69 | 43 |
rnn.pyi in stubs/torch/nn/modules |
81 | 40 |
module.pyi in stubs/torch/nn/modules |
57 | 38 |
testing.py in fairscale/utils |
495 | 35 |
flatten_params_wrapper.py in fairscale/nn/misc |
303 | 29 |
__init__.pyi in stubs/torch/distributed |
68 | 28 |
mevo.py in fairscale/experimental/nn |
317 | 24 |
ssd_offload.py in fairscale/experimental/nn |
197 | 24 |
async_schedule.py in fairscale/nn/pipe |
364 | 24 |
There are 30 files with lines longer than 120 characters. In total, there are 275 long lines.
File | # lines | # units | # long lines |
---|---|---|---|
__init__.pyi in stubs/torch |
1856 | 1232 | 225 |
__init__.pyi in stubs/torch/distributed |
68 | 28 | 6 |
lr_scheduler.pyi in stubs/torch/optim |
31 | 17 | 5 |
__init__.pyi in stubs/torch/autograd |
37 | 21 | 4 |
fully_sharded_data_parallel.py in fairscale/nn/data_parallel |
1260 | 74 | 4 |
module.pyi in stubs/torch/nn/modules |
57 | 38 | 3 |
loss.pyi in stubs/torch/nn/modules |
100 | 57 | 2 |
rnn.pyi in stubs/torch/nn/modules |
81 | 40 | 2 |
grad_scaler.pyi in stubs/torch/cuda/amp |
12 | 5 | 2 |
ssd_offload.py in fairscale/experimental/nn |
197 | 24 | 2 |
data_parallel.pyi in stubs/torch/nn/parallel |
18 | 4 | 1 |
functional.pyi in stubs/torch/nn |
177 | 80 | 1 |
__init__.pyi in stubs/torch/nn/modules |
48 | - | 1 |
linear.pyi in stubs/torch/nn/modules |
28 | 11 | 1 |
activation.pyi in stubs/torch/nn/modules |
117 | 62 | 1 |
adam.pyi in stubs/torch/optim |
4 | 1 | 1 |
sgd.pyi in stubs/torch/optim |
3 | 1 | 1 |
__init__.pyi in stubs/torch/utils/data |
6 | - | 1 |
__init__.pyi in stubs/torch/testing |
2 | 1 | 1 |
functional.pyi in stubs/torch |
5 | 3 | 1 |