def compute_updated_feature()

in classification/data_dict/shape_and_feature/update_demo.py [0:0]


def compute_updated_feature(input_feature):
    ''' Use the pre-trained classifier as editing module,
        and optimize the input_feature towards the positive direction.
        Args: input_feature (InputFeature object), the outfit's original feature before editing
        Return: updated_feature (list of InputFeature object), in the order of iterative updates
    '''
    global swapped_partID
    global swapped_type
    auto_swapped_partID = -1
    auto_swapped_type = -1
    loss = None
    updated_feature = []
    n_iter=0
    # Prepare mask, original_feat for computing smooth term; opt_feat for actual update
    mask = torch.from_numpy(get_mask(input_feature.feature)).cuda().byte()
    opt_feat = torch.from_numpy(input_feature.feature).cuda().float()
    opt_feat.requires_grad = True
    original_feat = copy.deepcopy(opt_feat)
    original_feat.requires_grad = False
    # Note: feature--numpy feature to keep parts unchanged;
    #       original_feat--tensor to compute smooth term
    #       opt_feat--tensor we update
    # Prepare positive label == 1 to be the direction to update
    label = torch.tensor([1]).cuda()
    # Prepare optimizer that optimizes the features
    optimizer = optim.SGD([opt_feat], lr = argopt.lr, momentum=0.9)
    # Prepare for stop criterion
    initial_loss = None
    meet_stop_criterion = StopCriterion(argopt.stop_criterion).delegate_child()
    while (n_iter <= argopt.max_iter_hr):
        if loss is not None: # Update is computed
            if swapped_partID >= 0: # Which part to swap out is either specified or computed
                #### Manual update ####
                opt_feat = manually_update_feature(opt_feat, input_feature, swapped_partID)
            else: # Has not specified or computed which part to swap out
                #### Auto decide which part to update ####
                auto_swapped_partID = decide_which_part(opt_feat, input_feature.feature)
                swapped_partID = auto_swapped_partID
                swapped_type = type_part_dict[swapped_partID]
                print('automatically decide part %d to swap out' % swapped_partID)
                #### Manual update ####
                opt_feat = manually_update_feature(opt_feat, input_feature, swapped_partID)
                # print(opt_feat)
        optimizer = optim.SGD([opt_feat], lr = argopt.lr, momentum=0.9)
        optimizer.zero_grad()
        ###### Forward ######
        output = model(opt_feat.unsqueeze(0)) # Add additional dimension for batchsize
        classify_loss = classify_criterion(output, label)
        smooth_loss = 0.0
        if argopt.lambda_smooth > 0:
            smooth_loss = smooth_criterion(opt_feat[mask], original_feat[mask]) # MSELoss(input, target), target should have requires_grad=False
        loss = classify_loss +  argopt.lambda_smooth * smooth_loss
        if (argopt.stop_criterion == 'deltaloss') and (initial_loss is None):
            initial_loss  = loss.data.cpu().numpy()
            meet_stop_criterion.set_initial_loss(initial_loss)

        if meet_stop_criterion(loss.data.cpu().numpy()):
            break
        ##### Backward #####
        loss.backward()
        if n_iter % argopt.display_freq == (argopt.display_freq-1):
            print('Iteration: %d, loss: %f'%(n_iter+1, loss.data[0]))
            # toTensor covert float64 to float32, while float32 cannot be converted to tensor again
            updated_feature.append(InputFeature(SHAPE_FEAT_NUM, TEXTURE_FEAT_NUM, \
                                              PART_NUM, np.float64(opt_feat.data.cpu().numpy())))
        n_iter += 1
        optimizer.step()
    return updated_feature