run_benchmark.py [4:29]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
torch.set_float32_matmul_precision("high")

import sys  # noqa: E402


sys.path.append(".")
from utils.benchmarking_utils import (  # noqa: E402
    benchmark_fn,
    create_parser,
    generate_csv_dict,
    write_to_csv,
)
from utils.pipeline_utils import load_pipeline  # noqa: E402


def run_inference(pipe, args):
    _ = pipe(
        prompt=args.prompt,
        num_inference_steps=args.num_inference_steps,
        num_images_per_prompt=args.batch_size,
    )


def main(args) -> dict:
    pipeline = load_pipeline(
        ckpt=args.ckpt,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



run_benchmark_pixart.py [4:29]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
torch.set_float32_matmul_precision("high")

import sys  # noqa: E402


sys.path.append(".")
from utils.benchmarking_utils import (  # noqa: E402
    benchmark_fn,
    create_parser,
    generate_csv_dict,
    write_to_csv,
)
from utils.pipeline_utils_pixart import load_pipeline  # noqa: E402


def run_inference(pipe, args):
    _ = pipe(
        prompt=args.prompt,
        num_inference_steps=args.num_inference_steps,
        num_images_per_prompt=args.batch_size,
    )


def main(args) -> dict:
    pipeline = load_pipeline(
        ckpt=args.ckpt,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



