tensorflow_ranking/python/keras/losses.py [491:548]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  r"""Computes Softmax cross-entropy loss between `y_true` and `y_pred`.

  For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:

  ```
  loss = - sum_i y_i * log(softmax(s_i))
  ```

  Standalone usage:

  >>> y_true = [[1., 0.]]
  >>> y_pred = [[0.6, 0.8]]
  >>> loss = tfr.keras.losses.SoftmaxLoss()
  >>> loss(y_true, y_pred).numpy()
  0.7981389

  >>> # Using ragged tensors
  >>> y_true = tf.ragged.constant([[1., 0.], [0., 1., 0.]])
  >>> y_pred = tf.ragged.constant([[0.6, 0.8], [0.5, 0.8, 0.4]])
  >>> loss = tfr.keras.losses.SoftmaxLoss(ragged=True)
  >>> loss(y_true, y_pred).numpy()
  0.83911896

  Usage with the `compile()` API:

  ```python
  model.compile(optimizer='sgd', loss=tfr.keras.losses.SoftmaxLoss())
  ```

  Definition:

  $$
  \mathcal{L}(\{y\}, \{s\}) =
  - \sum_i y_i \cdot \log\left(\frac{exp(s_i)}{\sum_j exp(s_j)}\right)
  $$
  """

  def __init__(self,
               reduction=tf.losses.Reduction.AUTO,
               name=None,
               lambda_weight=None,
               temperature=1.0,
               ragged=False):
    """Softmax cross-entropy loss.

    Args:
      reduction: (Optional) The `tf.keras.losses.Reduction` to use (see
        `tf.keras.losses.Loss`).
      name: (Optional) The name for the op.
      lambda_weight: (Optional) A lambdaweight to apply to the loss. Can be one
        of `tfr.keras.losses.DCGLambdaWeight`,
        `tfr.keras.losses.NDCGLambdaWeight`, or,
        `tfr.keras.losses.PrecisionLambdaWeight`.
      temperature: (Optional) The temperature to use for scaling the logits.
      ragged: (Optional) If True, this loss will accept ragged tensors. If
        False, this loss will accept dense tensors.
    """
    super().__init__(reduction, name, lambda_weight, temperature, ragged)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_ranking/python/keras/losses.py [628:703]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  r"""Computes ListMLE loss between `y_true` and `y_pred`.

  Implements ListMLE loss ([Xia et al, 2008][xia2008]). For each list of scores
  `s` in `y_pred` and list of labels `y` in `y_true`:

  ```
  loss = - log P(permutation_y | s)
  P(permutation_y | s) = Plackett-Luce probability of permutation_y given s
  permutation_y = permutation of items sorted by labels y.
  ```

  NOTE: This loss is stochastic and may return different values for identical
  inputs.

  Standalone usage:

  >>> tf.random.set_seed(42)
  >>> y_true = [[1., 0.]]
  >>> y_pred = [[0.6, 0.8]]
  >>> loss = tfr.keras.losses.ListMLELoss()
  >>> loss(y_true, y_pred).numpy()
  0.7981389

  >>> # Using ragged tensors
  >>> tf.random.set_seed(42)
  >>> y_true = tf.ragged.constant([[1., 0.], [0., 1., 0.]])
  >>> y_pred = tf.ragged.constant([[0.6, 0.8], [0.5, 0.8, 0.4]])
  >>> loss = tfr.keras.losses.ListMLELoss(ragged=True)
  >>> loss(y_true, y_pred).numpy()
  1.1613163

  Usage with the `compile()` API:

  ```python
  model.compile(optimizer='sgd', loss=tfr.keras.losses.ListMLELoss())
  ```

  Definition:

  $$
  \mathcal{L}(\{y\}, \{s\}) = - \log(P(\pi_y | s))
  $$

  where $P(\pi_y | s)$ is the Plackett-Luce probability of a permutation
  $\pi_y$ conditioned on scores $s$. Here $\pi_y$ represents a permutation
  of items ordered by the relevance labels $y$ where ties are broken randomly.

  References:
    - [Listwise approach to learning to rank: theory and algorithm, Xia et al,
       2008][xia2008]

  [xia2008]: https://dl.acm.org/doi/10.1145/1390156.1390306
  """

  def __init__(self,
               reduction=tf.losses.Reduction.AUTO,
               name=None,
               lambda_weight=None,
               temperature=1.0,
               ragged=False):
    """ListMLE loss.

    Args:
      reduction: (Optional) The `tf.keras.losses.Reduction` to use (see
        `tf.keras.losses.Loss`).
      name: (Optional) The name for the op.
      lambda_weight: (Optional) A lambdaweight to apply to the loss. Can be one
        of `tfr.keras.losses.DCGLambdaWeight`,
        `tfr.keras.losses.NDCGLambdaWeight`,
        `tfr.keras.losses.PrecisionLambdaWeight`, or,
        `tfr.keras.losses.ListMLELambdaWeight`.
      temperature: (Optional) The temperature to use for scaling the logits.
      ragged: (Optional) If True, this loss will accept ragged tensors. If
        False, this loss will accept dense tensors.
    """
    super().__init__(reduction, name, lambda_weight, temperature, ragged)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



