in easycv/predictors/feature_extractor.py [0:0]
def predict(self, input_data_list, batch_size=-1):
"""
using session run predict a number of samples using batch_size
Args:
input_data_list: a list of numpy array, each array is a sample to be predicted
batch_size: batch_size passed by the caller, you can also ignore this param and
use a fixed number if you do not want to adjust batch_size in runtime
Return:
result: a list of dict, each dict is the prediction result of one sample
eg, {"output1": value1, "output2": value2}, the value type can be
python int str float, and numpy array
"""
num_image = len(input_data_list)
assert len(
input_data_list) > 0, 'input images should not be an empty list'
if batch_size > 0:
num_batches = int(math.ceil(float(num_image) / batch_size))
image_list = input_data_list
else:
num_batches = 1
batch_size = len(input_data_list)
image_list = input_data_list
outputs_list = []
for batch_idx in range(num_batches):
batch_image_list = image_list[
batch_idx *
batch_size:min(len(image_list), (batch_idx + 1) * batch_size)]
face_image_list = []
face_bbox_list = []
faceidx_by_imageidx = {}
for idx, img in enumerate(batch_image_list):
# this try except only happens to no face detected
bbox, ld = self.detector.safe_detect(img)
if len(bbox) == 0:
print('batch %d , %dth image has no face detected' %
(batch_idx, idx))
elif len(bbox) >= 1:
if len(bbox) > 1:
print('batch %d , %dth image has more then %d face' %
(batch_idx, idx, len(bbox)))
_bbox = []
_ld = []
for idx, b in enumerate(bbox):
if b[-1] > self.face_threshold:
_bbox.append(b)
_ld.append(ld[idx])
bbox = _bbox
ld = _ld
# this is for muti face detectd in one img
faceidx_by_imageidx[idx] = []
for bbox_idx, face_box in enumerate(bbox):
face_image_list.append(
glint360k_align(img, ld[bbox_idx]))
face_bbox_list.append(face_box)
face_idx = len(face_image_list) - 1
faceidx_by_imageidx[idx].append(face_idx)
# else:
# batch_image_list[idx] = np.array(glint360k_align(img, ld[0]))
if len(face_image_list) > 0:
image_tensor_list = self.predictor.preprocess(face_image_list)
input_data = self.batch(image_tensor_list)
outputs = self.predictor.predict_batch(
input_data, mode='extract')
neck_output_dict = {}
for neck_idx, attr_method in enumerate(self.attr_method):
neck_output = outputs['neck_%d_0' % neck_idx]
neck_output = torch.nn.Softmax(dim=1)(neck_output)
if attr_method == 'softmax':
neck_output = torch.argmax(neck_output, dim=1)
elif attr_method == 'distribute_sum':
n, c = neck_output.size()
distribute = torch.arange(0, c).repeat(n, 1).to(
neck_output.device)
neck_output = (distribute * neck_output).sum(dim=1)
else:
raise ValueError(
'TorchFaceAttrExtractor for neck %d only support attr_method softmax/distributed sum'
% (neck_idx))
neck_output = torch.argmax(neck_output, dim=1)
neck_output_dict[neck_idx] = neck_output.cpu().numpy()
for imgidx in faceidx_by_imageidx.keys():
single_result = {}
for k in neck_output_dict.keys():
single_result['face_' + self.attr_name[k]] = []
single_result['face_bbox'] = []
for fn, faceidx in enumerate(faceidx_by_imageidx[imgidx]):
for k in neck_output_dict.keys():
out = np.squeeze(neck_output_dict[k][faceidx])
if self.attr_method[k] == 'softmax':
label_map = getattr(
self, '%s_map' % self.attr_name[k])
out = label_map[out]
single_result['face_' +
self.attr_name[k]].append(out)
single_result['face_bbox'].append(
face_bbox_list[faceidx])
outputs_list.append(single_result)
return outputs_list