in src/quaternion.py [0:0]
def up_forward_to_quat(up: torch.tensor, forward: torch.tensor, normalize=True):
# TODO check if that really works
vector = math_helper.normalize_batch(forward, dim=1) if normalize else forward
vector2 = math_helper.normalize_batch(torch.cross(up, vector), dim=1) if normalize else torch.cross(up, vector)
vector3 = torch.cross(vector, vector2)
m00 = vector2[:, 0]
m01 = vector2[:, 1]
m02 = vector2[:, 2]
m10 = vector3[:, 0]
m11 = vector3[:, 1]
m12 = vector3[:, 2]
m20 = vector[:, 0]
m21 = vector[:, 1]
m22 = vector[:, 2]
num8 = (m00 + m11) + m22
q = torch.zeros((len(up), 4), device=up.device)
type1 = num8 > 0.0
num = torch.sqrt(num8[type1] + 1.0)
q[type1, 0] = (num * 0.5)
num = 0.5 / num
q[type1, 1] = ((m12 - m21)[type1] * num)
q[type1, 2] = ((m20 - m02)[type1] * num)
q[type1, 3] = ((m01 - m10)[type1] * num)
type2 = ((m00 >= m11) * (m00 >= m22)) & ~type1
num7 = torch.sqrt((((1.0 + m00) - m11) - m22)[type2])
num4 = 0.5 / num7
q[type2, 1] = (0.5 * num7)
q[type2, 2] = ((m01 + m10)[type2] * num4)
q[type2, 3] = ((m02 + m20)[type2] * num4)
q[type2, 0] = ((m12 - m21)[type2] * num4)
type3 = (m11 > m22) & ~(type1 + type2)
num6 = torch.sqrt((((1.0 + m11) - m00) - m22)[type3])
num3 = 0.5 / num6
q[type3, 2] = (0.5 * num6)
q[type3, 1] = ((m10 + m01)[type3] * num3)
q[type3, 3] = ((m21 + m12)[type3] * num3)
q[type3, 0] = ((m20 - m02)[type3] * num3)
type4 = ~(type1 + type2 + type3)
num5 = torch.sqrt((((1.0 + m22) - m00) - m11)[type4])
num2 = 0.5 / num5
q[type4, 3] = (0.5 * num5)
q[type4, 1] = ((m20 + m02)[type4] * num2)
q[type4, 2] = ((m21 + m12)[type4] * num2)
q[type4, 0] = ((m01 - m10)[type4] * num2)
return math_helper.normalize_batch(q) if normalize else q