in lib/roi_data/fast_rcnn_rel.py [0:0]
def add_fast_rcnn_blobs(
blobs, im_scales, landb, roidb, roidb_inds, proposals, split, low_shot_helper):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
if split != 'train':
assert proposals is not None, \
'proposals should not be None during val/test.'
for im_i, entry in enumerate(roidb):
scale = im_scales[im_i]
# labels start from 1
sbj_gt_labels = entry['sbj_max_classes']
obj_gt_labels = entry['obj_max_classes']
rel_gt_labels = entry['rel_max_classes']
sbj_gt_boxes = entry['sbj_boxes'] * scale
obj_gt_boxes = entry['obj_boxes'] * scale
rel_gt_boxes = entry['rel_boxes'] * scale
num_proposals = proposals[im_i]['boxes_sbj'].shape[0]
# logger.info('num_proposals: {}'.format(num_proposals))
if num_proposals > 0:
all_sbj_rois = np.zeros((num_proposals, 5), dtype=np.float32)
all_obj_rois = np.zeros((num_proposals, 5), dtype=np.float32)
all_rel_rois = np.zeros((num_proposals, 5), dtype=np.float32)
all_sbj_rois[:, 1:5] = proposals[im_i]['boxes_sbj'] * scale
all_obj_rois[:, 1:5] = proposals[im_i]['boxes_obj'] * scale
all_rel_rois[:, 1:5] = proposals[im_i]['boxes_rel'] * scale
else: # create dummy rois
all_sbj_rois = np.zeros((1, 5), dtype=np.float32)
all_obj_rois = np.zeros((1, 5), dtype=np.float32)
all_rel_rois = np.zeros((1, 5), dtype=np.float32)
all_sbj_rois[:, 3:5] = 1.0
all_obj_rois[:, 3:5] = 1.0
all_rel_rois[:, 3:5] = 1.0
subbatch_id = proposals[im_i]['subbatch_id']
frcn_blobs = {}
frcn_blobs['sbj_rois'] = all_sbj_rois
frcn_blobs['obj_rois'] = all_obj_rois
frcn_blobs['rel_rois_sbj'] = all_sbj_rois
frcn_blobs['rel_rois_obj'] = all_obj_rois
frcn_blobs['rel_rois_prd'] = all_rel_rois
frcn_blobs['sbj_pos_labels_int32'] = sbj_gt_labels.astype(np.int32)
frcn_blobs['obj_pos_labels_int32'] = obj_gt_labels.astype(np.int32)
frcn_blobs['rel_pos_labels_int32'] = rel_gt_labels.astype(np.int32)
frcn_blobs['sbj_gt_boxes'] = sbj_gt_boxes.astype(np.float32)
frcn_blobs['obj_gt_boxes'] = obj_gt_boxes.astype(np.float32)
frcn_blobs['rel_gt_boxes'] = rel_gt_boxes.astype(np.float32)
frcn_blobs['image_idx'] = np.array(roidb_inds[im_i])[np.newaxis].astype(np.int32)
frcn_blobs['image_id'] = \
np.array(float(entry['image'].split('/')[-1][:-4]))[np.newaxis].astype(np.float32)
frcn_blobs['image_scale'] = np.array(scale)[np.newaxis].astype(np.float32)
frcn_blobs['subbatch_id'] = \
np.array(subbatch_id)[np.newaxis].astype(np.float32)
frcn_blobs['num_proposals'] = \
np.array(num_proposals)[np.newaxis].astype(np.float32)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
return True
else:
for im_i, entry in enumerate(roidb):
scale = im_scales[im_i]
sbj_gt_inds = np.where((entry['gt_sbj_classes'] > 0))[0]
obj_gt_inds = np.where((entry['gt_obj_classes'] > 0))[0]
sbj_gt_rois = entry['sbj_boxes'][sbj_gt_inds, :] * scale
obj_gt_rois = entry['obj_boxes'][obj_gt_inds, :] * scale
sbj_gt_rois = sbj_gt_rois.astype(np.float32)
obj_gt_rois = obj_gt_rois.astype(np.float32)
sbj_gt_boxes = np.zeros((len(sbj_gt_inds), 6), dtype=np.float32)
sbj_gt_boxes[:, 0] = im_i # batch inds
sbj_gt_boxes[:, 1:5] = sbj_gt_rois
sbj_gt_boxes[:, 5] = entry['gt_sbj_classes'][sbj_gt_inds]
obj_gt_boxes = np.zeros((len(obj_gt_inds), 6), dtype=np.float32)
obj_gt_boxes[:, 0] = im_i # batch inds
obj_gt_boxes[:, 1:5] = obj_gt_rois
obj_gt_boxes[:, 5] = entry['gt_obj_classes'][obj_gt_inds]
# labels start from 1
sbj_gt_labels = entry['sbj_max_classes']
obj_gt_labels = entry['obj_max_classes']
rel_gt_labels = entry['rel_max_classes']
sbj_gt_vecs = entry['sbj_vecs']
obj_gt_vecs = entry['obj_vecs']
rel_gt_vecs = entry['prd_vecs']
if proposals is None:
# Get unique boxes
rows = set()
unique_sbj_gt_inds = []
for idx, row in enumerate(sbj_gt_boxes):
if tuple(row) not in rows:
rows.add(tuple(row))
unique_sbj_gt_inds.append(idx)
unique_sbj_gt_boxes = sbj_gt_boxes[unique_sbj_gt_inds, :]
rows = set()
unique_obj_gt_inds = []
for idx, row in enumerate(obj_gt_boxes):
if tuple(row) not in rows:
rows.add(tuple(row))
unique_obj_gt_inds.append(idx)
unique_obj_gt_boxes = obj_gt_boxes[unique_obj_gt_inds, :]
# use better sampling by default
im_width = entry['width'] * scale
im_height = entry['height'] * scale
_rois_sbj = _augment_gt_boxes_by_perturbation(
unique_sbj_gt_boxes[:, 1:5], im_width, im_height)
rois_sbj = np.zeros((_rois_sbj.shape[0], 5), dtype=np.float32)
rois_sbj[:, 0] = im_i
rois_sbj[:, 1:5] = _rois_sbj
_rois_obj = _augment_gt_boxes_by_perturbation(
unique_obj_gt_boxes[:, 1:5], im_width, im_height)
rois_obj = np.zeros((_rois_obj.shape[0], 5), dtype=np.float32)
rois_obj[:, 0] = im_i
rois_obj[:, 1:5] = _rois_obj
rows = set()
unique_sbj_rois_inds = []
for idx, row in enumerate(rois_sbj):
if tuple(row) not in rows:
rows.add(tuple(row))
unique_sbj_rois_inds.append(idx)
unique_rois_sbj = rois_sbj[unique_sbj_rois_inds, :]
rows = set()
unique_obj_rois_inds = []
for idx, row in enumerate(rois_obj):
if tuple(row) not in rows:
rows.add(tuple(row))
unique_obj_rois_inds.append(idx)
unique_rois_obj = rois_obj[unique_obj_rois_inds, :]
unique_all_rois_sbj = \
np.vstack((unique_rois_sbj, unique_sbj_gt_boxes[:, :-1]))
unique_all_rois_obj = \
np.vstack((unique_rois_obj, unique_obj_gt_boxes[:, :-1]))
sbj_gt_boxes = sbj_gt_boxes[:, 1:] # strip off batch index
obj_gt_boxes = obj_gt_boxes[:, 1:]
unique_sbj_gt_boxes = unique_sbj_gt_boxes[:, 1:] # strip off batch index
unique_obj_gt_boxes = unique_obj_gt_boxes[:, 1:]
unique_sbj_gt_vecs = sbj_gt_vecs[unique_sbj_gt_inds]
unique_obj_gt_vecs = obj_gt_vecs[unique_obj_gt_inds]
unique_sbj_gt_labels = sbj_gt_labels[unique_sbj_gt_inds]
unique_obj_gt_labels = obj_gt_labels[unique_obj_gt_inds]
else:
unique_all_rois_sbj = proposals[im_i]['unique_all_rois_sbj'] * scale
unique_all_rois_obj = proposals[im_i]['unique_all_rois_obj'] * scale
unique_all_rois_sbj[:, 0] = im_i
unique_all_rois_obj[:, 0] = im_i
unique_sbj_gt_inds = proposals[im_i]['unique_sbj_gt_inds']
unique_obj_gt_inds = proposals[im_i]['unique_obj_gt_inds']
sbj_gt_boxes = sbj_gt_boxes[:, 1:] # strip off batch index
obj_gt_boxes = obj_gt_boxes[:, 1:]
unique_sbj_gt_boxes = sbj_gt_boxes[unique_sbj_gt_inds, :]
unique_obj_gt_boxes = obj_gt_boxes[unique_obj_gt_inds, :]
unique_sbj_gt_vecs = sbj_gt_vecs[unique_sbj_gt_inds]
unique_obj_gt_vecs = obj_gt_vecs[unique_obj_gt_inds]
unique_sbj_gt_labels = sbj_gt_labels[unique_sbj_gt_inds]
unique_obj_gt_labels = obj_gt_labels[unique_obj_gt_inds]
if cfg.MODEL.LOSS_TYPE.find('TRIPLET') >= 0:
frcn_blobs = _sample_rois_triplet_yall(
unique_all_rois_sbj, unique_all_rois_obj,
unique_sbj_gt_boxes, unique_obj_gt_boxes,
unique_sbj_gt_vecs, unique_obj_gt_vecs,
unique_sbj_gt_labels, unique_obj_gt_labels,
sbj_gt_boxes, obj_gt_boxes,
sbj_gt_vecs, obj_gt_vecs, rel_gt_vecs,
rel_gt_labels,
low_shot_helper)
elif cfg.MODEL.LOSS_TYPE == 'SOFTMAX':
frcn_blobs = _sample_rois_softmax_yall(
unique_all_rois_sbj, unique_all_rois_obj,
unique_sbj_gt_boxes, unique_obj_gt_boxes,
unique_sbj_gt_vecs, unique_obj_gt_vecs,
unique_sbj_gt_labels, unique_obj_gt_labels,
sbj_gt_boxes, obj_gt_boxes,
sbj_gt_vecs, obj_gt_vecs, rel_gt_vecs,
rel_gt_labels,
low_shot_helper)
else:
raise KeyError('Unknown loss type: {}'.format(cfg.MODEL.LOSS_TYPE))
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
return True