def _load_textual_embeddings()

in docker_images/diffusers/app/lora.py [0:0]


    def _load_textual_embeddings(self, adapter, model_data):
        if self._is_pivotal_tuning_lora(model_data):
            embedding_path = self._hub_repo_file(
                repo_id=adapter,
                filename="embeddings.safetensors"
                if self._is_safetensors_pivotal(model_data)
                else "embeddings.pti",
                repo_type="model",
            )

            embeddings = load_file(embedding_path)
            state_dict_clip_l = (
                embeddings.get("text_encoders_0")
                if "text_encoders_0" in embeddings
                else embeddings.get("clip_l", None)
            )
            state_dict_clip_g = (
                embeddings.get("text_encoders_1")
                if "text_encoders_1" in embeddings
                else embeddings.get("clip_g", None)
            )
            tokens_to_add = 0 if state_dict_clip_l is None else len(state_dict_clip_l)
            tokens_to_add_2 = 0 if state_dict_clip_g is None else len(state_dict_clip_g)
            if tokens_to_add == tokens_to_add_2 and tokens_to_add > 0:
                if state_dict_clip_l is not None and len(state_dict_clip_l) > 0:
                    token_list = [f"<s{i}>" for i in range(tokens_to_add)]
                    self.ldm.load_textual_inversion(
                        state_dict_clip_l,
                        token=token_list,
                        text_encoder=self.ldm.text_encoder,
                        tokenizer=self.ldm.tokenizer,
                    )

                if state_dict_clip_g is not None and len(state_dict_clip_g) > 0:
                    token_list = [f"<s{i}>" for i in range(tokens_to_add_2)]
                    self.ldm.load_textual_inversion(
                        state_dict_clip_g,
                        token=token_list,
                        text_encoder=self.ldm.text_encoder_2,
                        tokenizer=self.ldm.tokenizer_2,
                    )
                logger.info("Text embeddings loaded for adapter %s", adapter)
            else:
                logger.info(
                    "No text embeddings were loaded due to invalid embeddings or a mismatch of token sizes "
                    "for adapter %s",
                    adapter,
                )
            self.current_tokens_loaded = tokens_to_add