kfac/python/ops/layer_collection.py [1656:1695]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                                                       logits,
                                                       seed=None,
                                                       targets=None,
                                                       name=None,
                                                       coeff=1.0,
                                                       reuse=VARIABLE_SCOPE):
    """Registers a multi-Bernoulli predictive distribution.

    Corresponds to losses computed using
    tf.nn.sigmoid_cross_entropy_with_logits.

    Note that this is distinct from
    register_categorical_predictive_distribution and should not be confused
    with it.


    Args:
      logits: The logits of the distribution (i.e. its parameters). The first
        dimension must be the batch size.
      seed: The seed for the RNG (for debugging) (Default: None)
      targets: (OPTIONAL) The targets for the loss function.  Only required if
        one wants to use the "empirical Fisher" instead of the true Fisher
        (which is controlled by the 'estimation_mode' to the optimizer).
        (Default: None)
      name: (OPTIONAL) str or None. Unique name for this loss function. If None,
        a new name is generated. (Default: None)
      coeff: (OPTIONAL) a float or TF scalar. A coefficient to multiply the
        log prob loss associated with this distribution. The Fisher will be
        multiplied by the corresponding factor. This is NOT equivalent to
        changing the temperature of the distribution since we don't renormalize
        the log prob in the objective function. (Default: 1.0)
      reuse: (OPTIONAL) bool or str.  If True, this adds 'logits' as an
        additional mini-batch/tower of inputs to the loss-function/predictive
        distribution (which must have already been registered). If
        "VARIABLE_SCOPE", use tf.get_variable_scope().reuse.
        (Default: "VARIABLE_SCOPE")
    """
    loss = lf.MultiBernoulliNegativeLogProbLoss(logits, targets=targets,
                                                seed=seed)
    self._register_loss_function(loss, logits,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



kfac/python/ops/layer_collection.py [1700:1735]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                                          logits,
                                          seed=None,
                                          targets=None,
                                          name=None,
                                          coeff=1.0,
                                          reuse=VARIABLE_SCOPE):
    """Registers a sigmoid cross-entropy loss function.

    Corresponds to losses computed using
    tf.nn.sigmoid_cross_entropy_with_logits.

    Note that this is distinct from register_softmax_cross_entropy_loss and
    should not be confused with it. It is similar to
    register_multi_bernoulli_predictive_distribution but without the explicit
    probabilistic interpretation. It behaves identically for now.

    Args:
      logits: The logits tensor. The first dimension must be the batch size.
      seed: The seed for the RNG (for debugging) (Default: None)
      targets: (OPTIONAL) The targets for the loss function.  Only required if
        one wants to use the "empirical Fisher" instead of the true Fisher
        (which is controlled by the 'estimation_mode' to the optimizer).
        (Default: None)
      name: (OPTIONAL) str or None. Unique name for this loss function. If None,
        a new name is generated. (Default: None)
      coeff: (OPTIONAL) a float or TF scalar. A coefficient to multiply the
        loss function by. (Default: 1.0)
      reuse: (OPTIONAL) bool or str.  If True, this adds 'logits' as an
        additional mini-batch/tower of inputs to the loss-function/predictive
        distribution (which must have already been registered). If
        "VARIABLE_SCOPE", use tf.get_variable_scope().reuse.
        (Default: "VARIABLE_SCOPE")
    """
    loss = lf.MultiBernoulliNegativeLogProbLoss(logits, targets=targets,
                                                seed=seed)
    self._register_loss_function(loss, logits,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



