def _compute_epsilon()

in tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py [0:0]


def _compute_epsilon(orders, rdp, delta):
  """Compute epsilon given a list of RDP values and target delta.

  Args:
    orders: An array of orders.
    rdp: An array of RDP guarantees.
    delta: The target delta. Must be >= 0.

  Returns:
    Optimal epsilon.

  Raises:
    ValueError: If input is malformed.

  """
  if delta < 0:
    raise ValueError(f'Delta cannot be negative. Found {delta}.')

  if delta == 0:
    if all(r == 0 for r in rdp):
      return 0
    else:
      return np.inf

  if len(orders) != len(rdp):
    raise ValueError('Input lists must have the same length.')

  # Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
  #   epsilon = min( rdp - math.log(delta) / (orders - 1) )

  # Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4).
  # Also appears in https://arxiv.org/abs/2001.05990 Equation 20 (in v1).
  eps = []
  for (a, r) in zip(orders, rdp):
    if a < 1:
      raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
    if r < 0:
      raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')

    if delta**2 + math.expm1(-r) > 0:
      # In this case, we can simply bound via KL divergence:
      # delta <= sqrt(1-exp(-KL)).
      epsilon = 0  # No need to try further computation if we have epsilon = 0.
    elif a > 1.01:
      # This bound is not numerically stable as alpha->1.
      # Thus we have a min value of alpha.
      # The bound is also not useful for small alpha, so doesn't matter.
      epsilon = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
    else:
      # In this case we can't do anything. E.g., asking for delta = 0.
      epsilon = np.inf
    eps.append(epsilon)

  return max(0, np.min(eps))