in optimum_benchmark/backends/base.py [0:0]
def __init__(self, config: BackendConfigT):
self.config = config
self.logger = getLogger(self.NAME)
self.logger.info(f"Allocating {self.NAME} backend")
self.logger.info(f"\t+ Seeding backend with {self.config.seed}")
self.seed()
if self.config.library == "diffusers":
self.logger.info("\t+ Benchmarking a Diffusers pipeline")
self.pretrained_config = get_diffusers_pretrained_config(self.config.model, **self.config.model_kwargs)
self.automodel_loader = get_diffusers_auto_pipeline_class_for_task(self.config.task)
self.model_shapes = extract_diffusers_shapes_from_model()
self.pretrained_processor = None
self.generation_config = None
elif self.config.library == "timm":
self.logger.info("\t+ Benchmarking a Timm model")
self.pretrained_config = get_timm_pretrained_config(self.config.model)
self.model_shapes = extract_timm_shapes_from_config(self.pretrained_config)
self.automodel_loader = get_timm_model_creator()
self.pretrained_processor = None
self.generation_config = None
elif self.config.library == "llama_cpp":
self.logger.info("\t+ Benchmarking a LlamaCpp model")
self.pretrained_processor = None
self.pretrained_config = None
self.generation_config = None
self.automodel_loader = None
self.model_shapes = {}
else:
self.logger.info("\t+ Benchmarking a Transformers model")
self.automodel_loader = get_transformers_auto_model_class_for_task(self.config.task, self.config.model_type)
self.generation_config = get_transformers_generation_config(self.config.model, **self.config.model_kwargs)
self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs)
self.pretrained_processor = get_transformers_pretrained_processor(
self.config.processor, **self.config.processor_kwargs
)
self.model_shapes = extract_transformers_shapes_from_artifacts(
self.pretrained_config, self.pretrained_processor
)