def build()

in easy_rec/python/builders/optimizer_builder.py [0:0]


def build(optimizer_config):
  """Create optimizer based on config.

  Args:
    optimizer_config: A Optimizer proto message.

  Returns:
    An optimizer and a list of variables for summary.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  optimizer_type = optimizer_config.WhichOneof('optimizer')
  optimizer = None

  summary_vars = []
  if optimizer_type == 'rms_prop_optimizer':
    config = optimizer_config.rms_prop_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.RMSPropOptimizer(
        learning_rate,
        decay=config.decay,
        momentum=config.momentum_optimizer_value,
        epsilon=config.epsilon)

  if optimizer_type == 'momentum_optimizer':
    config = optimizer_config.momentum_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.MomentumOptimizer(
        learning_rate, momentum=config.momentum_optimizer_value)

  if optimizer_type == 'adam_optimizer':
    config = optimizer_config.adam_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.AdamOptimizer(
        learning_rate, beta1=config.beta1, beta2=config.beta2)

  if optimizer_type == 'adamw_optimizer':
    config = optimizer_config.adamw_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    logging.info('adamw_optimizer weight_decay = %.8f' % config.weight_decay)
    optimizer = weight_decay_optimizers.AdamWOptimizer(
        weight_decay=config.weight_decay,
        learning_rate=learning_rate,
        beta1=config.beta1,
        beta2=config.beta2)

  if optimizer_type == 'adam_asyncw_optimizer':
    config = optimizer_config.adam_asyncw_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    logging.info('adam_asyncw_optimizer weight_decay = %.8f' %
                 config.weight_decay)
    optimizer = weight_decay_optimizers.AdamAsyncWOptimizer(
        weight_decay=config.weight_decay,
        learning_rate=learning_rate,
        beta1=config.beta1,
        beta2=config.beta2)

  if optimizer_type == 'lazy_adam_optimizer':
    config = optimizer_config.lazy_adam_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    from easy_rec.python.compat.adam_s import AdamOptimizerS
    optimizer = AdamOptimizerS(
        learning_rate=learning_rate, beta1=config.beta1, beta2=config.beta2)

  if optimizer_type == 'momentumw_optimizer':
    config = optimizer_config.momentumw_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    logging.info('momentumw_optimizer weight_decay = %.8f' %
                 config.weight_decay)
    optimizer = weight_decay_optimizers.MomentumWOptimizer(
        weight_decay=config.weight_decay,
        learning_rate=learning_rate,
        momentum=config.momentum_optimizer_value)

  if optimizer_type == 'adagrad_optimizer':
    config = optimizer_config.adagrad_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.AdagradOptimizer(
        learning_rate,
        initial_accumulator_value=config.initial_accumulator_value)

  if optimizer_type == 'adam_async_optimizer':
    config = optimizer_config.adam_async_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.AdamAsyncOptimizer(
        learning_rate, beta1=config.beta1, beta2=config.beta2)

  if optimizer_type == 'ftrl_optimizer':
    config = optimizer_config.ftrl_optimizer
    learning_rate = _create_learning_rate(config.learning_rate)
    summary_vars.append(learning_rate)
    optimizer = tf.train.FtrlOptimizer(
        learning_rate=learning_rate,
        learning_rate_power=config.learning_rate_power,
        initial_accumulator_value=config.initial_accumulator_value,
        l1_regularization_strength=config.l1_reg,
        l2_regularization_strength=config.l2_reg,
        l2_shrinkage_regularization_strength=config.l2_shrinkage_reg)

  if optimizer is None:
    raise ValueError('Optimizer %s not supported.' % optimizer_type)

  if optimizer_config.use_moving_average:
    optimizer = tf.contrib.opt.MovingAverageOptimizer(
        optimizer, average_decay=optimizer_config.moving_average_decay)

  return optimizer, summary_vars