def load_dataset()

in data_loader.py [0:0]


def load_dataset(opt,train=True):
	if train:
		data_path = opt.data_path
	else:
		data_path = opt.val_data_path
	image_path_all = []
	lm_path_all = []
	mask_path_all = []

	for dataset in data_path:
		image_path = glob.glob(dataset + '/' + '*.png')
		image_path.sort()
		lm_path_ = [os.path.join(dataset,'lm',f.split('/')[-1].replace('png','txt')) for f in image_path]
		lm_path_.sort()
		mask_path = [os.path.join(dataset,'mask',f.split('/')[-1]) for f in image_path]
		mask_path.sort()

		# check if landmark binary files exist
		check_lm_bin(dataset,lm_path_)

		lm_path = [os.path.join(dataset,'lm_bin',f.split('/')[-1].replace('png','bin')) for f in image_path]
		lm_path.sort()

		image_path_all += image_path
		mask_path_all += mask_path
		lm_path_all += lm_path

	dataset_num = len(image_path_all)

	dataset = tf.data.Dataset.from_tensor_slices((image_path_all,lm_path_all,mask_path_all))
	dataset = dataset. \
	apply(shuffle_and_repeat(dataset_num)). \
	apply(map_and_batch(_parse_function, opt.batch_size, num_parallel_batches=4, drop_remainder=True)). \
	apply(prefetch_to_device('/gpu:0', None)) # When using dataset.prefetch, use buffer_size=None to let it detect optimal buffer size

	inputs_iterator = dataset.make_one_shot_iterator()
	return inputs_iterator