def Run()

in perfkitbenchmarker/linux_benchmarks/object_storage_service_benchmark.py [0:0]


def Run(benchmark_spec):
  """Run storage benchmark and publish results.

  Args:
    benchmark_spec: The benchmark specification. Contains all data that is
      required to run the benchmark.

  Returns:
    Total throughput in the form of tuple. The tuple contains
        the sample metric (string), value (float), unit (string).
  """
  logging.info(
      'Start benchmarking object storage service, '
      'scenario is %s, storage provider is %s.',
      FLAGS.object_storage_scenario,
      FLAGS.storage,
  )

  service = benchmark_spec.service
  bucket_name = benchmark_spec.bucket_name

  metadata = {'storage_provider': FLAGS.storage}

  vms = benchmark_spec.vms

  if zone := OBJECT_STORAGE_ZONE.value:
    metadata['bucket_location'] = zone
    metadata['bucket_locality'] = 'zonal'
  elif region := OBJECT_STORAGE_REGION.value:
    metadata['bucket_location'] = region
    metadata['bucket_locality'] = 'regional'
    metadata['regional_bucket_location'] = region
  elif multiregion := OBJECT_STORAGE_GCS_MULTIREGION.value:
    metadata['bucket_location'] = multiregion
    metadata['bucket_locality'] = 'multiregional'
  else:
    metadata['bucket_location'] = DEFAULT
    metadata['bucket_locality'] = 'regional'

  metadata.update(service.Metadata(vms[0]))

  if object_storage_service.STORAGE_CLASS.value:
    metadata['object_storage_class'] = (
        object_storage_service.STORAGE_CLASS.value
    )

  results = []
  test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
  try:
    command_builder = APIScriptCommandBuilder(
        test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service
    )
  except KeyError:
    command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)

  for name, benchmark in [
      ('cli', CLIThroughputBenchmark),
      ('api_data', OneByteRWBenchmark),
      ('api_data', SingleStreamThroughputBenchmark),
      ('api_namespace', ListConsistencyBenchmark),
  ]:
    if FLAGS.object_storage_scenario in {name, 'all'}:
      benchmark(
          results, metadata, vms[0], command_builder, service, bucket_name
      )

  # MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
  # slightly different calling convention than the others.
  for name, benchmark in [
      ('api_multistream', MultiStreamRWBenchmark),
      ('api_multistream_writes', MultiStreamWriteBenchmark),
  ]:
    if FLAGS.object_storage_scenario in {name, 'all'}:
      benchmark(results, metadata, vms, command_builder, service, bucket_name)

  # MultiStreamRead has the additional 'read_objects' parameter
  if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
    metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
    metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
    MultiStreamReadBenchmark(
        results,
        metadata,
        vms,
        command_builder,
        service,
        bucket_name,
        benchmark_spec.read_objects['objects_written'],
    )

  # Clear the bucket if we're not saving the objects for later
  keep_bucket = (
      FLAGS.object_storage_objects_written_file_prefix is not None
      or FLAGS.object_storage_dont_delete_bucket
  )
  if not keep_bucket:
    service.EmptyBucket(bucket_name)

  service.UpdateSampleMetadata(results)

  return results