chatlearn/models/vllm_module_v2.py [108:127]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            model=self.model_args['tokenizer'],
            tokenizer=self.model_args['tokenizer'],
            max_seq_len_to_capture=self.model_args.get("max_seq_len_to_capture", 32768),
            seed=seed,
            # load model: 'dummy' for megatron ckpt or mock weight; others for hf ckpt.
            load_format=load_format,
            model_loader_extra_config=model_loader_extra_config,
            # parallelism strategy
            tensor_parallel_size=self.module_args.tensor_model_parallel_size,
            pipeline_parallel_size=self.module_args.pipeline_model_parallel_size,
            dtype=dtype,
            # scheduling strategy
            max_num_seqs=self.module_args.generation_batch_size,
            max_num_batched_tokens = self.model_args.get("max_num_batched_tokens", None),
            num_scheduler_steps=self.model_args.get("num_scheduler_steps", 1),
            gpu_memory_utilization=self.model_args.get("gpu_memory_utilization", 0.90),
            # logger
            disable_log_requests=self.model_args.get("disable_log_requests", True),
            disable_log_stats=self.model_args.get("disable_log_stats", True),
            trust_remote_code=True,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



chatlearn/models/vllm_module_v2.py [175:194]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            model=self.model_args['tokenizer'],
            tokenizer=self.model_args['tokenizer'],
            max_seq_len_to_capture=self.model_args.get("max_seq_len_to_capture", 32768),
            seed=seed,
            # load model: 'dummy' for megatron ckpt or mock weight; others for hf ckpt.
            load_format=load_format,
            model_loader_extra_config=model_loader_extra_config,
            # parallelism strategy
            tensor_parallel_size=self.module_args.tensor_model_parallel_size,
            pipeline_parallel_size=self.module_args.pipeline_model_parallel_size,
            dtype=dtype,
            # scheduling strategy
            max_num_seqs=self.module_args.generation_batch_size,
            max_num_batched_tokens = self.model_args.get("max_num_batched_tokens", None),
            num_scheduler_steps=self.model_args.get("num_scheduler_steps", 1),
            gpu_memory_utilization=self.model_args.get("gpu_memory_utilization", 0.90),
            # logger
            disable_log_requests=self.model_args.get("disable_log_requests", True),
            disable_log_stats=self.model_args.get("disable_log_stats", True),
            trust_remote_code=True,
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



