in d2go/modeling/meta_arch/rcnn.py [0:0]
def _apply_eager_mode_quant(cfg, model):
if isinstance(model, GeneralizedRCNN):
"""Wrap each quantized part of the model to insert Quant and DeQuant in-place"""
# Wrap backbone and proposal_generator
if isinstance(model.backbone, FPN):
# HACK: currently the quantization won't pick up D2's the Conv2d, which is
# used by D2's default FPN (same as FBNetV2FPN), this causes problem if we
# warpping entire backbone as whole. The current solution is only quantizing
# bottom_up and leaving other parts un-quantized. TODO (T109761730): However
# we need to re-visit this if using other (fbnet-based) FPN module since the
# new FPN module might be pikced by quantization.
model.backbone.bottom_up = wrap_quant_subclass(
model.backbone.bottom_up,
n_inputs=1,
n_outputs=len(model.backbone.bottom_up._out_features),
)
else:
model.backbone = wrap_quant_subclass(
model.backbone, n_inputs=1, n_outputs=len(model.backbone._out_features)
)
model.proposal_generator.rpn_head = wrap_quant_subclass(
model.proposal_generator.rpn_head,
n_inputs=len(cfg.MODEL.RPN.IN_FEATURES),
n_outputs=len(cfg.MODEL.RPN.IN_FEATURES) * 2,
)
# Wrap the roi_heads, box_pooler is not quantized
if hasattr(model.roi_heads, "box_head"):
model.roi_heads.box_head = wrap_quant_subclass(
model.roi_heads.box_head,
n_inputs=1,
n_outputs=1,
)
# for faster_rcnn_R_50_C4
if hasattr(model.roi_heads, "res5"):
model.roi_heads.res5 = wrap_quant_subclass(
model.roi_heads.res5,
n_inputs=1,
n_outputs=1,
)
model.roi_heads.box_predictor = wrap_quant_subclass(
model.roi_heads.box_predictor, n_inputs=1, n_outputs=2
)
# Optionally wrap keypoint and mask heads, pools are not quantized
if hasattr(model.roi_heads, "keypoint_head"):
model.roi_heads.keypoint_head = wrap_quant_subclass(
model.roi_heads.keypoint_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
if hasattr(model.roi_heads, "mask_head"):
model.roi_heads.mask_head = wrap_quant_subclass(
model.roi_heads.mask_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
# StandardROIHeadsWithSubClass uses a subclass head
if hasattr(model.roi_heads, "subclass_head"):
q_subclass_head = QuantWrapper(model.roi_heads.subclass_head)
model.roi_heads.subclass_head = q_subclass_head
else:
raise NotImplementedError(
"Eager mode for {} is not supported".format(type(model))
)
# TODO: wrap the normalizer and make it quantizable
# NOTE: GN is not quantizable, assuming all GN follows a quantized conv,
# wrap them with dequant-quant
model = wrap_non_quant_group_norm(model)
return model