in web_tool/SessionHandler.py [0:0]
def create_session(self, session_id, dataset_key, model_key, checkpoint_idx):
if session_id in self._SESSION_MAP:
raise ValueError("session_id %s has already been created" % (session_id))
if not is_valid_dataset(dataset_key):
raise ValueError("%s is not a valid dataset, check the keys in datasets.json and datasets.mine.json" % (model_key))
if model_key not in self.model_configs:
raise ValueError("%s is not a valid model, check the keys in models.json and models.mine.json" % (model_key))
try:
worker = self._WORKER_POOL.get_nowait()
except Empty:
worker = {"type": "local", "gpu_id": -1} # by convention, a GPU id of -1 means that we should use the CPU. We do this if there are no resources in the worker pool
if worker["type"] == "local":
gpu_id = worker["gpu_id"]
# Create local worker and ModelSession object to pass to the Session()
random_port = get_free_tcp_port()
process = self._spawn_local_worker(random_port, gpu_id, model_key)
if checkpoint_idx > -1:
checkpoints = Checkpoints.list_checkpoints()
model = ModelSessionRPC(gpu_id, session_id=session_id, port=random_port, load_dir=checkpoints[checkpoint_idx]["directory"])
else:
model = ModelSessionRPC(gpu_id, session_id=session_id, port=random_port)
# Create Session object
session = Session(session_id, model)
# Assosciate the front-end session with the Session and Worker
self._SESSION_MAP[session_id] = session
self._SESSION_INFO[session_id] = {
"worker": worker,
"process": process
}
if gpu_id == -1:
LOGGER.info("Created a local worker for (%s) on CPU" % (session_id))
else:
LOGGER.info("Created a local worker for (%s) on GPU %s" % (session_id, str(gpu_id)))
elif worker["type"] == "remote":
raise NotImplementedError("Remote workers aren't implemented yet")
else:
raise ValueError("Worker type %s isn't recognized" % (worker["type"]))