tensorflow_similarity/losses/pn_loss.py [34:97]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            embeddings: FloatTensor,
            distance: Callable,
            positive_mining_strategy: str = 'hard',
            negative_mining_strategy: str = 'semi-hard',
            soft_margin: bool = False,
            margin: float = 1.0) -> Any:
    """Positive Negative loss computations

    Based on the pn loss used in IVIS.


    Args:
        labels: labels associated with the embed

        embeddings: Embedded examples.

        distance: Which distance function to use to compute the pairwise
        distances between embeddings. Defaults to 'cosine'.

        positive_mining_strategy: What mining strategy to use to select
        embedding from the same class. Defaults to 'hard'.
        Available: {'easy', 'hard'}

        negative_mining_strategy: What mining strategy to use for select the
        embedding from the different class. Defaults to 'semi-hard'.
        Available: {'hard', 'semi-hard', 'easy'}

        soft_margin: [description]. Defaults to True. Use a soft margin
        instead of an explicit one.

        margin: Use an explicit value for the margin term. Defaults to 1.0.

    Returns:
        Loss: The loss value for the current batch.
    """

    # [Label]
    # ! Weirdness to be investigated
    # do not remove this code. It is actually needed for specific situation
    # Reshape label tensor to [batch_size, 1] if not already in that format.
    # labels = tf.reshape(labels, (labels.shape[0], 1))
    batch_size = tf.size(labels)

    # [distances]
    pairwise_distances = distance(embeddings)

    # [masks]
    positive_mask, negative_mask = build_masks(labels, batch_size)

    # [Positive distance computation]
    pos_distances, pos_idxs = positive_distances(
            positive_mining_strategy,
            pairwise_distances,
            positive_mask,
    )

    # [Negative distances computation]
    neg_distances, neg_idxs = negative_distances(
            negative_mining_strategy,
            pairwise_distances,
            negative_mask,
            positive_mask,
            batch_size,
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



tensorflow_similarity/losses/triplet_loss.py [33:93]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                 embeddings: FloatTensor,
                 distance: Callable,
                 positive_mining_strategy: str = 'hard',
                 negative_mining_strategy: str = 'semi-hard',
                 soft_margin: bool = False,
                 margin: float = 1.0) -> Any:
    """Triplet loss computations

    Args:
        labels: labels associated with the embed

        embeddings: Embedded examples.

        distance: Which distance function to use to compute the pairwise
        distances between embeddings. Defaults to 'cosine'.

        positive_mining_strategy: What mining strategy to use to select
        embedding from the same class. Defaults to 'hard'.
        Available: {'easy', 'hard'}

        negative_mining_strategy: What mining strategy to use for select the
        embedding from the different class. Defaults to 'semi-hard'.
        Available: {'hard', 'semi-hard', 'easy'}

        soft_margin: [description]. Defaults to True. Use a soft margin
        instead of an explicit one.

        margin: Use an explicit value for the margin term. Defaults to 1.0.

    Returns:
        Loss: The loss value for the current batch.
    """

    # [Label]
    # ! Weirdness to be investigated
    # do not remove this code. It is actually needed for specific situation
    # Reshape label tensor to [batch_size, 1] if not already in that format.
    # labels = tf.reshape(labels, (labels.shape[0], 1))
    batch_size = tf.size(labels)

    # [distances]
    pairwise_distances = distance(embeddings)

    # [masks]
    positive_mask, negative_mask = build_masks(labels, batch_size)

    # [Positive distance computation]
    pos_distances, pos_idxs = positive_distances(
            positive_mining_strategy,
            pairwise_distances,
            positive_mask,
    )

    # [Negative distances computation]
    neg_distances, neg_idxs = negative_distances(
            negative_mining_strategy,
            pairwise_distances,
            negative_mask,
            positive_mask,
            batch_size,
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



