fastchat/model/model_adapter.py [1601:1619]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def load_model(self, model_path: str, from_pretrained_kwargs: dict):
        revision = from_pretrained_kwargs.get("revision", "main")
        model = AutoModel.from_pretrained(
            model_path,
            **from_pretrained_kwargs,
        )
        tokenizer = AutoTokenizer.from_pretrained(
            model_path, trust_remote_code=True, revision=revision
        )
        if hasattr(model.config, "max_position_embeddings") and hasattr(
            tokenizer, "model_max_length"
        ):
            model.config.max_sequence_length = min(
                model.config.max_position_embeddings, tokenizer.model_max_length
            )
        return model, tokenizer

    def get_default_conv_template(self, model_path: str) -> Conversation:
        return get_conv_template("one_shot")
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



fastchat/model/model_adapter.py [1630:1648]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    def load_model(self, model_path: str, from_pretrained_kwargs: dict):
        revision = from_pretrained_kwargs.get("revision", "main")
        model = AutoModel.from_pretrained(
            model_path,
            **from_pretrained_kwargs,
        )
        tokenizer = AutoTokenizer.from_pretrained(
            model_path, trust_remote_code=True, revision=revision
        )
        if hasattr(model.config, "max_position_embeddings") and hasattr(
            tokenizer, "model_max_length"
        ):
            model.config.max_sequence_length = min(
                model.config.max_position_embeddings, tokenizer.model_max_length
            )
        return model, tokenizer

    def get_default_conv_template(self, model_path: str) -> Conversation:
        return get_conv_template("one_shot")
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



