def make_token_graph()

in transducer.py [0:0]


def make_token_graph(token_list, blank="none", allow_repeats=True):
    """
    Constructs a graph with all the individual
    token transition models.
    """
    if not allow_repeats and blank != "optional":
        raise ValueError("Must use blank='optional' if disallowing repeats.")

    ntoks = len(token_list)
    graph = gtn.Graph(False)

    # Creating nodes
    graph.add_node(True, True)
    for i in range(ntoks):
        # We can consume one or more consecutive
        # word pieces for each emission:
        # E.g. [ab, ab, ab] transduces to [ab]
        graph.add_node(False, blank != "forced")

    if blank != "none":
        graph.add_node()

    # Creating arcs
    if blank != "none":
        # blank index is assumed to be last (ntoks)
        graph.add_arc(0, ntoks + 1, ntoks, gtn.epsilon)
        graph.add_arc(ntoks + 1, 0, gtn.epsilon)

    for i in range(ntoks):
        graph.add_arc((ntoks + 1) if blank == "forced" else 0, i + 1, i)
        graph.add_arc(i + 1, i + 1, i, gtn.epsilon)

        if allow_repeats:
            if blank == "forced":
                # allow transition from token to blank only
                graph.add_arc(i + 1, ntoks + 1, ntoks, gtn.epsilon)
            else:
                # allow transition from token to blank and all other tokens
                graph.add_arc(i + 1, 0, gtn.epsilon)
        else:
            # allow transitions to blank and all other tokens except the same token
            graph.add_arc(i + 1, ntoks + 1, ntoks, gtn.epsilon)
            for j in range(ntoks):
                if i != j:
                    graph.add_arc(i + 1, j + 1, j, j)
    return graph