in lib/utils/misc.py [0:0]
def get_flops_params(model):
model_ops = model.net.Proto().op
master_gpu = 'gpu_{}'.format(cfg.ROOT_GPU_ID)
param_ops = []
for idx in range(len(model_ops)):
op_type = model.net.Proto().op[idx].type
op_input = model.net.Proto().op[idx].input[0]
if op_type in ['Conv', 'FC', 'BatchMatMul'] and op_input.find(master_gpu) >= 0:
param_ops.append(model.net.Proto().op[idx])
num_flops = 0
num_params = 0
for idx in range(len(param_ops)):
op = param_ops[idx]
op_type = op.type
op_inputs = param_ops[idx].input
op_output = param_ops[idx].output[0]
layer_flops = 0
layer_params = 0
if op_type == 'Conv':
for op_input in op_inputs:
if '_w' in op_input:
param_blob = op_input
param_shape = np.array(
workspace.FetchBlob(str(param_blob))).shape
layer_params = np.prod(param_shape)
output_shape = np.array(
workspace.FetchBlob(str(op_output))).shape
layer_flops = layer_params * np.prod(output_shape[2:])
elif op_type == 'FC':
for op_input in op_inputs:
if '_w' in op_input:
param_blob = op_input
param_shape = np.array(
workspace.FetchBlob(str(param_blob))).shape
layer_params = np.prod(param_shape)
layer_flops = layer_params
elif op_type == 'BatchMatMul':
blob_params = []
for op_input in op_inputs:
param_shape = np.array(workspace.FetchBlob(str(op_input))).shape
blob_params.append(param_shape[1])
blob_params.append(param_shape[2])
if 'grad' in op_inputs[0] or 'grad' in op_inputs[1]:
continue
if 'shared' in op_inputs[0] or 'shared' in op_inputs[1]:
continue
blob_params = np.array(blob_params)
blob_params = np.unique(blob_params)
if len(blob_params) == 3:
layer_flops = blob_params[0] * blob_params[1] * blob_params[2]
elif len(blob_params) == 1:
layer_flops = blob_params[0] * blob_params[0] * blob_params[0]
else:
layer_flops = 0
print('confused with matmul dimensions, ignore it for now')
num_flops += layer_flops
num_params += layer_params
return num_flops, num_params