def generate_csv_dict()

in utils/benchmarking_utils.py [0:0]


def generate_csv_dict(pipeline_cls: str, args, time: float) -> Dict[str, Union[str, bool, float]]:
    """Packs benchmarking data into a dictionary for latter serialization."""
    data_dict = {
        "pipeline_cls": pipeline_cls,
        "ckpt_id": args.ckpt,
        "bf16": not args.no_bf16,
        "sdpa": not args.no_sdpa,
        "fused_qkv_projections": args.enable_fused_projections,
        "upcast_vae": "NA" if "PixArt" in pipeline_cls else args.upcast_vae,
        "batch_size": args.batch_size,
        "num_inference_steps": args.num_inference_steps,
        "compile_unet": args.compile_transformer if "PixArt" in pipeline_cls else args.compile_unet,
        "compile_vae": args.compile_vae,
        "compile_mode": args.compile_mode,
        "change_comp_config": args.change_comp_config,
        "do_quant": args.do_quant,
        "time (secs)": time,
        "tag": args.tag,
    }
    if args.device == "cuda":
        memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated())  # in GBs.
        TOTAL_GPU_MEMORY = torch.cuda.get_device_properties(0).total_memory / (1024**3)
        data_dict["memory (gbs)"] = memory
        data_dict["actual_gpu_memory (gbs)"] = f"{(TOTAL_GPU_MEMORY):.3f}"
    if "PixArt" in pipeline_cls:
        data_dict["compile_transformer"] = data_dict.pop("compile_unet")
    return data_dict