in source/render/RigScene.cpp [65:312]
void RigScene::createPrograms() {
// input is depth
// texVar is computed from the instance id and scale and offset
// position is computed by a lookup in the direction texture
const std::string cameraVS = R"(
#version 330 core
uniform mat4 transform;
uniform vec3 camera;
uniform int modulo;
uniform vec2 scale;
uniform sampler2D directions;
in vec2 offset; // per-vertex offset
uniform bool isDepth;
in float depth; // per-instance depth
out vec2 texVar;
uniform float kIPD = 0; // positive for left eye, negative for right eye
const float kPi = 3.1415926535897932384626433832795;
float ipd(const float lat) {
const float kA = 25;
const float kB = 0.17;
return kIPD * exp(
-exp(kA * (kB - 0.5 - lat / kPi))
-exp(kA * (kB - 0.5 + lat / kPi)));
}
float sq(const float x) { return x * x; }
float sq(const vec2 x) { return dot(x, x); }
float error(const vec2 xy, const float z, const float dEst) {
// xy^2 = (ipd(atan(z/dEst))/2)^2 + dEst^2 + error <=>
return sq(xy) - sq(ipd(atan(z / dEst)) / 2) - sq(dEst);
}
float solve(const vec3 p) {
// for initial estimate, assume lat = atan(z/d) ~ atan(z/xy)
// p.xy^2 = ipd(atan(z/d)^2 + d^2 ~
// p.xy^2 = ipd(atan(z/xy)^2 + d^2 <=>
float d0 = sqrt(sq(p.xy) - sq(ipd(atan(p.z / length(p.xy)))));
// refine with a few iterations of newton-raphson
// two iterations get error below 2.4e-07 radians at 0.2 m
// one iteration gets the same result at 0.7 m
// and no iterations are required beyond 6.3 meters
const int iterations = 2;
for (int i = 0; i < iterations; ++i) {
const float kSmidgen = 1e-3;
float d1 = (1 + kSmidgen) * d0;
float e0 = error(p.xy, p.z, d0);
float e1 = error(p.xy, p.z, d1);
float de = (e1 - e0) / (d1 - d0);
d0 -= e0 / de;
}
return d0;
}
vec3 eye(const vec3 p) {
float dEst = solve(p);
float ipdEst = ipd(atan(p.z / dEst));
float eNorm = ipdEst / 2;
float k = -dEst / eNorm;
mat2 A = mat2(1.0, k, -k, 1.0); // column major!
return vec3(inverse(A) * p.xy, 0);
}
void main() {
ivec2 instance = ivec2(gl_InstanceID % modulo, gl_InstanceID / modulo);
vec2 dirVar = scale * (instance + offset);
texVar = scale * (instance + (isDepth ? vec2(0.5) : offset));
// We want the direction texture to align the first and last values to the
// edge of each row/column, it'll have (kDirections - 1) texels instead of
// kDirections, so we need to scale by (kDirections - 1) / kDirections
// Also, kDirections buckets are originally defined at the leftmost edge
// of pixels, not the center. We make up for this by shifting the input
// locations by 0.5 / (num texels), where num texels = kDirections - 1
const float kDirections = 128;
vec2 texVarScaled = (0.5 + dirVar * (kDirections - 1)) / kDirections;
vec3 direction = texture(directions, texVarScaled).xyz;
vec3 rig = camera + depth * direction;
if (kIPD != 0) { // adjust rig when rendering stereo
rig -= eye(rig);
}
gl_Position = transform * vec4(rig, 1);
}
)";
// an error, e, ortho to the ray will result in an angular error of
// x ~ tan x = 1/d * e
// if the error is parallel to the ray - and the viewer is r away from the
// ray origin - it will result in an angular error of
// x ~ tan x ~ r / d^2 * e
// we can simply do a mesh simplification with these error metrics
// errors in depth are scaled by r / d^2
// errors orthogonal to depth are scaled by 1 / d
// or we can do mesh simplification in an equi-error space
// this could be optimized using standard mesh simplification
// the actual mesh consists of points at direction(x, y) * depth(x, y)
// approximation of equi-error mesh:
// (a, b, c) = (x, y, k * r / depth(x, y))
// real coordinates can be recovered as:
// x = a, y = b, depth = k * r / c
// error introduced in (a, b) will be scaled on the direction sphere by
// 1 / focal // increasingly inaccurate for large fov lenses
// and when projected out to depth, (a, b) errors will be scaled by
// d / focal
// error introduced in c will cause errors in depth of
// k * r / c^2 = // using 1/c' = -1/c^2
// d^2 / (k * r) // using d = k * r / c <=> c = k * r / d
// from this we can compute k
// we want the angular error from ortho to be the same as from parallel
// so we want
// 1 / d * ortho error = r / d^2 * parallel error <=>
// 1 / d * d / focal = r / d^2 * d^2 / (k * r) <=>
// k = focal
// input is a, b, c
// s, t, and depth are recovered from a, b, c
// position is computed by a lookup in the direction texture
const std::string cameraMeshVS = R"(
#version 330 core
uniform mat4 transform;
uniform vec3 camera;
uniform float focalR;
uniform vec2 scale;
uniform sampler2D directions;
uniform bool forceMono;
in vec3 abc;
out vec2 texVar;
void main() {
// recover (s,t) from (a,b)
texVar = scale * abc.xy;
// recover depth from c
float depth = forceMono ? focalR / 50.0 : focalR / abc.z;
// scale direction texture coordinates; see cameraVS for discussion
const float kDirections = 128;
vec2 texVarScaled = (0.5 + texVar * (kDirections - 1)) / kDirections;
vec3 direction = texture(directions, texVarScaled).xyz;
gl_Position = transform * vec4(camera + depth * direction, 1);
}
)";
const std::string fullscreenVS = R"(
#version 330 core
in vec2 tex;
out vec2 texVar;
void main() {
gl_Position = vec4(2 * tex - 1, 0, 1);
texVar = tex;
}
)";
const std::string passthroughFS = R"(
#version 330 core
uniform sampler2D sampler;
in vec2 texVar;
out vec4 color;
void main() {
color = texture(sampler, texVar);
}
)";
const std::string cameraFS = R"(
#version 330 core
uniform int debug;
uniform sampler2D sampler;
in vec2 texVar;
out vec4 color;
void main() {
color = texture(sampler, texVar);
// alpha is a cone, 1 in the center, epsilon at edges
const float eps = 1.0f / 255.0f; // max granularity
float cone = max(eps, 1 - 2 * length(texVar - 0.5));
color.a = cone;
}
)";
const std::string effectFS = R"(
#version 330 core
uniform float effect;
uniform sampler2D sampler;
in vec2 texVar;
out vec4 color;
void main() {
color = texture(sampler, texVar);
vec4 cyan = vec4(0.5, 1.0, 1.0, 1.0);
color += cyan
* smoothstep(1/(effect - 0.5), 1/effect, gl_FragCoord.w)
* smoothstep(1/(effect + 0.5), 1/effect, gl_FragCoord.w);
// alpha is a cone, 1 in the center, 0 at edges
float cone = max(0, 1 - 2 * length(texVar - 0.5));
color.a = cone;
}
)";
const std::string exponentialFS = R"(
#version 330 core
uniform sampler2D sampler;
in vec2 texVar;
out vec4 color;
void main() {
color = texture(sampler, texVar);
color.a = exp(30 * color.a) - 1;
}
)";
const std::string resolveFS = R"(
#version 330 core
uniform float fade;
uniform sampler2D sampler;
in vec2 texVar;
out vec4 color;
void main() {
vec4 premul = texture(sampler, texVar);
color.rgb = fade * premul.rgb / premul.a;
color.a = premul.a;
}
)";
cameraProgram = createProgram(cameraVS, cameraFS);
cameraMeshProgram = createProgram(cameraMeshVS, cameraFS);
effectMeshProgram = createProgram(cameraMeshVS, effectFS);
updateProgram = createProgram(fullscreenVS, exponentialFS);
resolveProgram = createProgram(fullscreenVS, resolveFS);
}