in generation/decode_clothing_features.py [0:0]
def generation_from_decoded_mask(epoch, save_path, fname, features, checkpoints_dir, \
color_mode, netG, model_type, classname, feat_num=3, \
original_mask=False, update=True, debug=True, from_avg=False, remove_background=False):
''' Generate final output image from decoded mask and texture features
Args: epoch (int), edit module result at epoch
save_path (str), save output image to path
fname (str), save output image to filename
checkpoints_dir (str), load generator weights from checkpoint
color_mode (str), we use Lab color space for stability here
netG (str), local for output image size 256x256;
global for output image higher-res image; not supported
model_type (str), only support pix2pixHD here
classname (str), label taxonomy from dataset classname
feat_num (int), latent code feature dimension
original_mask (boolean), whether to use decoded mask or original mask
not supporting using original mask to avoid confusion
update (boolean), use updated decoded mask
debug (boolean), deprecated
from_avg (boolean), use average feature values for missing parts
remove_background (boolean), use white background
'''
gan_opt = initialize_option(classname, decode = not original_mask)
gan_opt.checkpoints_dir = checkpoints_dir
gan_opt.model = model_type
gan_opt.feat_num = feat_num
if model_type.startswith('cvae') or model_type.startswith('bicycle'):
gan_opt.use_vae = True
if netG == 'local':
gan_opt.netG = 'local'
print('local')
gan_util.mkdirs(save_path)
model = gan_create_model(gan_opt)
if from_avg:
with open(os.path.join(checkpoints_dir, classname, 'train_avg_features.p'), 'rb') as readfile:
avg_features = pickle.load(readfile)
model.set_avg_features(avg_features)
if original_mask:
raise NotImplementedError
else:
if update:
path = os.path.join(os.path.abspath(save_path).replace('generation', 'separate_vae'), '%s_%s.png' % (epoch, fname))
else:
raise NotImplementedError
label = Image.open(path)
if label:
params = get_params(gan_opt, label.size)
transform_label = get_transform(gan_opt, params, method=Image.NEAREST, normalize=False)
label_tensor = transform_label(label) * 255.0
inst_tensor = transform_label(label)
else:
print('%s not exist!' % fname)
exit()
generated = model.inference_given_feature(label_tensor.unsqueeze(0), inst_tensor.unsqueeze(0), features, from_avg=from_avg)
if color_mode == 'Lab':
if remove_background:
gan_util.save_image(gan_util.tensor2LABim_nobackground(generated.data[0], label_tensor.data), os.path.join(save_path, '%s_%s.jpg' % (epoch, fname)))
else:
gan_util.save_image(gan_util.tensor2LABim(generated.data[0]), os.path.join(save_path, '%s_%s.jpg' % (epoch, fname)))
else:
gan_util.save_image(gan_util.tensor2im(generated.data[0]), os.path.join(save_path, '%s_%s.jpg' % (epoch, fname)))