def parallelize()

in optimum/graphcore/models/bart/modeling_bart.py [0:0]


    def parallelize(self, **kwargs):
        """
        Transform the model to run in an IPU pipeline.
        - Adds pipeline stages to the model
        - Adds recomputation checkpoints

        Recommended usage:
        ```
        model = PipelinedBartForSequenceClassification(config).parallelize().half()
        ```
        """
        super().parallelize()

        self.model.__class__ = _BartModelWithSharedEmbedding
        self.model.encoder_and_decoder_embeddings_computation(use_shared_embedding=True)
        self.model.change_bart_encoder_and_decoder_classes(restore=False)
        self.model.change_bart_attention_class(restore=False)
        self.model.quantize_linear_layers(restore=not kwargs.get("use_group_quantized_linears", False), num_groups=16)

        logger.info("-------------------- Device Allocation --------------------")
        logger.info("Embedding --> IPU 0")
        self.model.shared = poptorch.BeginBlock(self.model.shared, "Embedding", ipu_id=0)
        self.model.encoder.embed_positions = poptorch.BeginBlock(
            self.model.encoder.embed_positions, "Embedding", ipu_id=0
        )
        self.model.encoder.layernorm_embedding = poptorch.BeginBlock(
            self.model.encoder.layernorm_embedding, "Embedding", ipu_id=0
        )
        number_of_layers = len(self.model.encoder.layers) + len(self.model.decoder.layers)
        layer_ipu = get_layer_ipu(self.ipu_config, number_of_layers)
        for index, layer in enumerate(self.model.encoder.layers):
            ipu = layer_ipu[index]
            if self.ipu_config.recompute_checkpoint_every_layer and index != self.config.num_hidden_layers - 1:
                self._hooks.append(recomputation_checkpoint(layer))
            self.model.encoder.layers[index] = poptorch.BeginBlock(layer, f"Encoder{index}", ipu_id=ipu)
            logger.info(f"Encoder {index:<2} --> IPU {ipu}")

        self.model.decoder.embed_positions = poptorch.BeginBlock(
            self.model.decoder.embed_positions, "Embedding", ipu_id=0
        )
        self.model.decoder.layernorm_embedding = poptorch.BeginBlock(
            self.model.decoder.layernorm_embedding, "Embedding", ipu_id=0
        )
        shift = len(self.model.encoder.layers)
        for index, layer in enumerate(self.model.decoder.layers):
            ipu = layer_ipu[index + shift]
            if self.ipu_config.recompute_checkpoint_every_layer and index != self.config.num_hidden_layers - 1:
                self._hooks.append(recomputation_checkpoint(layer))
            self.model.decoder.layers[index] = poptorch.BeginBlock(layer, f"Decoder{index}", ipu_id=ipu)
            logger.info(f"Decoder {index:<2} --> IPU {ipu}")

        last_ipu = layer_ipu[-1]
        logger.info(f"Classification Head Output --> IPU {last_ipu}")
        self.classification_head = poptorch.BeginBlock(
            self.classification_head, "Classification Head Output", ipu_id=last_ipu
        )
        logger.info("-----------------------------------------------------------")
        return self