in evaluation/tiny_benchmark/third/Cityscapes/evaluation/eval_script/origin_eval_MR_multisetup.py [0:0]
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.fppiThrs)
K = len(p.catIds) if p.useCats else 1
M = len(p.maxDets)
ys = -np.ones((T,R,K,M)) # -1 for the precision of absent categories
# create dictionary for future indexing
_pe = self._paramsEval
catIds = [1] #_pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds]
dtIg = np.concatenate([e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:, inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
inds = np.where(dtIg==0)[1]
tps = tps[:,inds]
fps = fps[:,inds]
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fppi = np.array(fp)/I0
nd = len(tp)
recall = tp / npig
q = np.zeros((R,))
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
recall = recall.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if recall[i] < recall[i - 1]:
recall[i - 1] = recall[i]
inds = np.searchsorted(fppi, p.fppiThrs, side='right') - 1
try:
for ri, pi in enumerate(inds):
q[ri] = recall[pi]
except:
pass
ys[t,:,k,m] = np.array(q)
self.eval = {
'params': p,
'counts': [T, R, K, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'TP': ys,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))