in tensorflow_examples/lite/model_maker/core/task/model_spec/object_detector_spec.py [0:0]
def __init__(self,
model_name: str,
uri: str,
hparams: str = '',
model_dir: Optional[str] = None,
epochs: int = 50,
batch_size: int = 64,
steps_per_execution: int = 1,
moving_average_decay: int = 0,
var_freeze_expr: str = '(efficientnet|fpn_cells|resample_p6)',
tflite_max_detections: int = 25,
strategy: Optional[str] = None,
tpu: Optional[str] = None,
gcp_project: Optional[str] = None,
tpu_zone: Optional[str] = None,
use_xla: bool = False,
profile: bool = False,
debug: bool = False,
tf_random_seed: int = 111111,
verbose: int = 0) -> None:
"""Initialze an instance with model paramaters.
Args:
model_name: Model name.
uri: TF-Hub path/url to EfficientDet module.
hparams: Hyperparameters used to overwrite default configuration. Can be
1) Dict, contains parameter names and values; 2) String, Comma separated
k=v pairs of hyperparameters; 3) String, yaml filename which's a module
containing attributes to use as hyperparameters.
model_dir: The location to save the model checkpoint files.
epochs: Default training epochs.
batch_size: Training & Evaluation batch size.
steps_per_execution: Number of steps per training execution.
moving_average_decay: Float. The decay to use for maintaining moving
averages of the trained parameters.
var_freeze_expr: Expression to freeze variables.
tflite_max_detections: The max number of output detections in the TFLite
model.
strategy: A string specifying which distribution strategy to use.
Accepted values are 'tpu', 'gpus', None. tpu' means to use TPUStrategy.
'gpus' mean to use MirroredStrategy for multi-gpus. If None, use TF
default with OneDeviceStrategy.
tpu: The Cloud TPU to use for training. This should be either the name
used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470
url.
gcp_project: Project name for the Cloud TPU-enabled project. If not
specified, we will attempt to automatically detect the GCE project from
metadata.
tpu_zone: GCE zone where the Cloud TPU is located in. If not specified, we
will attempt to automatically detect the GCE project from metadata.
use_xla: Use XLA even if strategy is not tpu. If strategy is tpu, always
use XLA, and this flag has no effect.
profile: Enable profile mode.
debug: Enable debug mode.
tf_random_seed: Fixed random seed for deterministic execution across runs
for debugging.
verbose: verbosity mode for `tf.keras.callbacks.ModelCheckpoint`, 0 or 1.
"""
self.model_name = model_name
self.uri = uri
self.batch_size = batch_size
config = hparams_config.get_efficientdet_config(model_name)
config.override(hparams)
config.image_size = utils.parse_image_size(config.image_size)
config.var_freeze_expr = var_freeze_expr
config.moving_average_decay = moving_average_decay
config.tflite_max_detections = tflite_max_detections
if epochs:
config.num_epochs = epochs
if use_xla and strategy != 'tpu':
tf.config.optimizer.set_jit(True)
for gpu in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
if debug:
tf.config.experimental_run_functions_eagerly(True)
tf.debugging.set_log_device_placement(True)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
tf.random.set_seed(tf_random_seed)
logging.set_verbosity(logging.DEBUG)
if strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu, zone=tpu_zone, project=gcp_project)
tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
tf.config.set_soft_device_placement(True)
elif strategy == 'gpus':
ds_strategy = tf.distribute.MirroredStrategy()
logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
else:
if tf.config.list_physical_devices('GPU'):
ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
else:
ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')
self.ds_strategy = ds_strategy
if model_dir is None:
model_dir = tempfile.mkdtemp()
params = dict(
profile=profile,
model_name=model_name,
steps_per_execution=steps_per_execution,
model_dir=model_dir,
strategy=strategy,
batch_size=batch_size,
tf_random_seed=tf_random_seed,
debug=debug,
verbose=verbose)
config.override(params, True)
self.config = config
# set mixed precision policy by keras api.
precision = utils.get_precision(config.strategy, config.mixed_precision)
policy = tf.keras.mixed_precision.experimental.Policy(precision)
tf.keras.mixed_precision.experimental.set_policy(policy)