configs/imagenet.yaml (84 lines of code) (raw):
wandb:
entity: null
experiment:
project: "muse"
name: "imagenet"
output_dir: "imagenet"
max_train_examples: 1281167 # total number of imagenet examples
max_eval_examples: 12800
save_every: 1000
eval_every: 500
generate_every: 1000
log_every: 50
log_grad_norm_every: 100
resume_from_checkpoint: False
resume_lr_scheduler: True
model:
vq_model:
type: "maskgit_vqgan"
pretrained: "openMUSE/maskgit-vqgan-imagenet-f16-256"
transformer:
vocab_size: 2048 # (1024 + 1000 + 1 = 2025 -> Vq + Imagenet + <mask>, use 2048 for even division by 8)
max_position_embeddings: 264 # (256 + 1 for class id, use 264 for even division by 8)
hidden_size: 768
num_hidden_layers: 24
num_attention_heads: 16
intermediate_size: 3072
codebook_size: 1024
num_vq_tokens: 256
num_classes: 1000
initializer_range: 0.02
norm_type: "layernorm"
layer_norm_eps: 1e-6
use_normformer: True
use_encoder_layernorm: True
use_mlm_layer: True
use_mlm_layernorm: True
use_bias: False
hidden_dropout: 0.0
attention_dropout: 0.0
gradient_checkpointing: True
enable_xformers_memory_efficient_attention: True
dataset:
params:
train_shards_path_or_url: "pipe:aws s3 cp s3://muse-datasets/imagenet-wds/imagenet-train-{000000..000320}.tar -"
eval_shards_path_or_url: "pipe:aws s3 cp s3://muse-datasets/imagenet-wds/imagenet-val-{000000..000012}.tar -"
batch_size: ${training.batch_size}
shuffle_buffer_size: 1000
num_workers: 4
resolution: 256
pin_memory: True
persistent_workers: True
preprocessing:
resolution: 256
center_crop: True
random_flip: False
optimizer:
name: fused_adamw
params: # default adamw params
learning_rate: 1e-4
scale_lr: False # scale learning rate by total batch size
beta1: 0.9
beta2: 0.999
weight_decay: 0.01
epsilon: 1e-8
lr_scheduler:
scheduler: "constant_with_warmup"
params:
learning_rate: ${optimizer.params.learning_rate}
warmup_steps: 1000
training:
gradient_accumulation_steps: 1
batch_size: 192 # on 80GB A100
mixed_precision: "bf16"
enable_tf32: True
use_ema: False
seed: 42
max_train_steps: 50000
overfit_one_batch: False
min_masking_rate: 0.0
label_smoothing: 0.0
max_grad_norm: null
# related to vae code sampling
use_soft_code_target: False
use_stochastic_code: False
soft_code_temp: 1.0