in utils/symbol/score.py [0:0]
def score(model, dataset, metrics, gpus, batch_size, rgb_mean, network,
data_shape, epoch, scale=0.0167):
# create data iterator
rgb_mean = [float(i) for i in rgb_mean.split(',')]
data_shape = tuple([int(i) for i in data_shape.split(',')])
data = mx.io.ImageRecordIter(
data_name = 'data',
label_name = 'softmax_label',
# ------------------------------------
path_imgrec = dataset,
label_width = 1,
data_shape = data_shape,
preprocess_threads = 16,
# ------------------------------------
batch_size = batch_size,
# ------------------------------------
mean_r = rgb_mean[0],
mean_g = rgb_mean[1],
mean_b = rgb_mean[2],
scale = scale,
# ------------------------------------
rand_crop = False,
resize = 256,
inter_method = 2 # bicubic
)
# load parameters
sym, arg_params, aux_params = mx.model.load_checkpoint(model, epoch)
logging.info('loading {}-{:04d}.params'.format(model, epoch))
logging.info('loading {}-symbol.json'.format(model, epoch))
# bind
devs = mx.cpu() if not gpus else [mx.gpu(int(i)) for i in gpus.split(',')]
mod = mx.mod.Module(symbol=sym, context=devs)
mod.bind(for_training=False,
data_shapes=data.provide_data,
label_shapes=data.provide_label)
mod.set_params(arg_params, aux_params)
if not isinstance(metrics, list):
metrics = [metrics,]
# testing
num = 0
for batch in data:
tic = time.time()
mod.forward(batch, is_train=False)
for m in metrics[::-1]:
mod.update_metric(m, batch.label)
num += batch_size
if num%10000==0:
cost = 1000 * (time.time() - tic) / batch_size # ms
logging.info('{}: {}, {:.1f} ms/im'.format(num,m.get(),cost))
return