in run_nerf_helpers.py [0:0]
def __init__(self, input_ch, ray_bending_latent_size, ray_bending_mode, embed_fn):
super(ray_bending, self).__init__()
self.use_positionally_encoded_input = False
self.input_ch = input_ch if self.use_positionally_encoded_input else 3
self.output_ch = 3 # don't change
self.ray_bending_latent_size = ray_bending_latent_size
self.ray_bending_mode = ray_bending_mode
self.embed_fn = embed_fn
self.use_rigidity_network = True
# simple scene editing. set to None during training.
self.rigidity_test_time_cutoff = None
self.test_time_scaling = None
if self.ray_bending_mode == "simple_neural":
self.activation_function = F.relu # F.relu, torch.sin
self.hidden_dimensions = 64 # 32
self.network_depth = 5 # 3 # at least 2: input -> hidden -> output
self.skips = [] # do not include 0 and do not include depth-1
use_last_layer_bias = False
self.network = nn.ModuleList(
[
nn.Linear(
self.input_ch + self.ray_bending_latent_size,
self.hidden_dimensions,
)
]
+ [
nn.Linear(
self.input_ch + self.hidden_dimensions, self.hidden_dimensions
)
if i + 1 in self.skips
else nn.Linear(self.hidden_dimensions, self.hidden_dimensions)
for i in range(self.network_depth - 2)
]
+ [
nn.Linear(
self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias
)
]
)
# initialize weights
with torch.no_grad():
for i, layer in enumerate(self.network[:-1]):
if self.activation_function.__name__ == "sin":
# SIREN ( Implicit Neural Representations with Periodic Activation Functions https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2)
if type(layer) == nn.Linear:
a = (
1.0 / layer.in_features
if i == 0
else np.sqrt(6.0 / layer.in_features)
)
layer.weight.uniform_(-a, a)
elif self.activation_function.__name__ == "relu":
torch.nn.init.kaiming_uniform_(
layer.weight, a=0, mode="fan_in", nonlinearity="relu"
)
torch.nn.init.zeros_(layer.bias)
# initialize final layer to zero weights to start out with straight rays
self.network[-1].weight.data *= 0.0
if use_last_layer_bias:
self.network[-1].bias.data *= 0.0
if self.use_rigidity_network:
self.rigidity_activation_function = F.relu # F.relu, torch.sin
self.rigidity_hidden_dimensions = 32 # 32
self.rigidity_network_depth = 3 # 3 # at least 2: input -> hidden -> output
self.rigidity_skips = [] # do not include 0 and do not include depth-1
use_last_layer_bias = True
self.rigidity_tanh = nn.Tanh()
self.rigidity_network = nn.ModuleList(
[nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)]
+ [
nn.Linear(
self.input_ch + self.rigidity_hidden_dimensions,
self.rigidity_hidden_dimensions,
)
if i + 1 in self.rigidity_skips
else nn.Linear(
self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions
)
for i in range(self.rigidity_network_depth - 2)
]
+ [
nn.Linear(
self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias
)
]
)
# initialize weights
with torch.no_grad():
for i, layer in enumerate(self.rigidity_network[:-1]):
if self.rigidity_activation_function.__name__ == "sin":
# SIREN ( Implicit Neural Representations with Periodic Activation Functions https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2)
if type(layer) == nn.Linear:
a = (
1.0 / layer.in_features
if i == 0
else np.sqrt(6.0 / layer.in_features)
)
layer.weight.uniform_(-a, a)
elif self.rigidity_activation_function.__name__ == "relu":
torch.nn.init.kaiming_uniform_(
layer.weight, a=0, mode="fan_in", nonlinearity="relu"
)
torch.nn.init.zeros_(layer.bias)
# initialize final layer to zero weights
self.rigidity_network[-1].weight.data *= 0.0
if use_last_layer_bias:
self.rigidity_network[-1].bias.data *= 0.0