bgc-c/basic-geometry/rigid-pose3.h

491 lines
21 KiB
C

#ifndef _BGC_RIGID_POSE3_H_INCLUDED_
#define _BGC_RIGID_POSE3_H_INCLUDED_
#include <math.h>
#include "types.h"
#include "affine3.h"
#include "quaternion.h"
#include "dual-quaternion.h"
// ==================== Reset =================== //
inline void bgc_fp32_rigid_pose3_reset(BGC_FP32_RigidPose3* pose)
{
pose->_versor.real_part.s0 = 1.0f;
pose->_versor.real_part.x1 = 0.0f;
pose->_versor.real_part.x2 = 0.0f;
pose->_versor.real_part.x3 = 0.0f;
pose->_versor.dual_part.s0 = 0.0f;
pose->_versor.dual_part.x1 = 0.0f;
pose->_versor.dual_part.x2 = 0.0f;
pose->_versor.dual_part.x3 = 0.0f;
}
inline void bgc_fp64_rigid_pose3_reset(BGC_FP64_RigidPose3* pose)
{
pose->_versor.real_part.s0 = 1.0;
pose->_versor.real_part.x1 = 0.0;
pose->_versor.real_part.x2 = 0.0;
pose->_versor.real_part.x3 = 0.0;
pose->_versor.dual_part.s0 = 0.0;
pose->_versor.dual_part.x1 = 0.0;
pose->_versor.dual_part.x2 = 0.0;
pose->_versor.dual_part.x3 = 0.0;
}
// ================= Normalize ================== //
inline void _bgc_fp32_rigid_pose3_normalize(BGC_FP32_RigidPose3* pose)
{
const float square_magnitude = bgc_fp32_quaternion_get_square_magnitude(&pose->_versor.real_part);
if (square_magnitude <= BGC_FP32_SQUARE_EPSILON || isnan(square_magnitude)) {
bgc_fp32_rigid_pose3_reset(pose);
return;
}
if (!bgc_fp32_is_square_unit(square_magnitude)) {
const float multiplier = sqrtf(1.0f / square_magnitude);
bgc_fp32_dual_quaternion_multiply_by_real_number(&pose->_versor, &pose->_versor, multiplier);
}
const float dot_product = bgc_fp32_quaternion_get_dot_product(&pose->_versor.real_part, &pose->_versor.dual_part);
bgc_fp32_quaternion_subtract_scaled(&pose->_versor.dual_part, &pose->_versor.dual_part, &pose->_versor.real_part, dot_product);
}
inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose)
{
const double square_magnitude = bgc_fp64_quaternion_get_square_magnitude(&pose->_versor.real_part);
if (square_magnitude <= BGC_FP64_SQUARE_EPSILON || isnan(square_magnitude)) {
bgc_fp64_rigid_pose3_reset(pose);
return;
}
if (!bgc_fp64_is_square_unit(square_magnitude)) {
const double multiplier = sqrt(1.0 / square_magnitude);
bgc_fp64_dual_quaternion_multiply_by_real_number(&pose->_versor, &pose->_versor, multiplier);
}
const double dot_product = bgc_fp64_quaternion_get_dot_product(&pose->_versor.real_part, &pose->_versor.dual_part);
bgc_fp64_quaternion_subtract_scaled(&pose->_versor.dual_part, &pose->_versor.dual_part, &pose->_versor.real_part, dot_product);
}
// ============ Get Dual Quaternion ============= //
inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_dual_quaternion_copy(quaternion, &pose->_versor);
}
inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_dual_quaternion_copy(quaternion, &pose->_versor);
}
// ============ Get Dual Quaternion ============= //
inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* const quaternion)
{
bgc_fp32_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp32_rigid_pose3_normalize(pose);
}
inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* const quaternion)
{
bgc_fp64_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp64_rigid_pose3_normalize(pose);
}
// =============== Get Real Part ================ //
inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.real_part);
}
// =============== Get Dual Part ================ //
inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.dual_part);
}
// ==================== Copy ==================== //
inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* const source)
{
bgc_fp32_dual_quaternion_copy(&destination->_versor, &source->_versor);
}
inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* const source)
{
bgc_fp64_dual_quaternion_copy(&destination->_versor, &source->_versor);
}
// ==================== Swap ==================== //
inline void bgc_fp32_rigid_pose3_swap(BGC_FP32_RigidPose3* pose1, BGC_FP32_RigidPose3* pose2)
{
bgc_fp32_dual_quaternion_swap(&pose1->_versor, &pose2->_versor);
}
inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_RigidPose3* pose2)
{
bgc_fp64_dual_quaternion_swap(&pose1->_versor, &pose2->_versor);
}
// ================== Convert =================== //
inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* const source)
{
bgc_fp32_dual_quaternion_convert_to_fp64(&destination->_versor, &source->_versor);
_bgc_fp64_rigid_pose3_normalize(destination);
}
inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* const source)
{
bgc_fp64_dual_quaternion_convert_to_fp32(&destination->_versor, &source->_versor);
_bgc_fp32_rigid_pose3_normalize(destination);
}
// ================== Shorten =================== //
inline void bgc_fp32_rigid_pose3_shorten(BGC_FP32_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0f) {
bgc_fp32_quaternion_revert(&pose->_versor.real_part);
bgc_fp32_quaternion_revert(&pose->_versor.dual_part);
}
}
inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0) {
bgc_fp64_quaternion_revert(&pose->_versor.real_part);
bgc_fp64_quaternion_revert(&pose->_versor.dual_part);
}
}
// =============== Get Shortened ================ //
inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* const pose)
{
if (pose->_versor.real_part.s0 < 0.0f) {
bgc_fp32_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_reverse(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
else {
bgc_fp32_quaternion_copy(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_copy(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
}
inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* const pose)
{
if (pose->_versor.real_part.s0 < 0.0) {
bgc_fp64_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_reverse(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
else {
bgc_fp64_quaternion_copy(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_copy(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
}
// ================= Alternate ================== //
inline void bgc_fp32_rigid_pose3_alternate(BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_revert(&pose->_versor.real_part);
bgc_fp32_quaternion_revert(&pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_revert(&pose->_versor.real_part);
bgc_fp64_quaternion_revert(&pose->_versor.dual_part);
}
// ============== Get Alternative =============== //
inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
}
// =================== Revert =================== //
inline void bgc_fp32_rigid_pose3_revert(BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_conjugate(&pose->_versor.real_part);
bgc_fp32_quaternion_conjugate(&pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_conjugate(&pose->_versor.real_part);
bgc_fp64_quaternion_conjugate(&pose->_versor.dual_part);
}
// ================ Get Reverse ================= //
inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
}
// ================== Combine =================== //
inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* const first, const BGC_FP32_RigidPose3* const second)
{
bgc_fp32_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
_bgc_fp32_rigid_pose3_normalize(combination);
}
inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* const first, const BGC_FP64_RigidPose3* const second)
{
bgc_fp64_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
_bgc_fp64_rigid_pose3_normalize(combination);
}
// ================== Exclude =================== //
inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* const base, const BGC_FP32_RigidPose3* const excludant)
{
BGC_FP32_Quaternion dual_part1, dual_part2;
_bgc_fp32_restrict_quaternion_multiply_by_conjugate(&dual_part1, &base->_versor.real_part, &excludant->_versor.dual_part);
_bgc_fp32_restrict_quaternion_multiply_by_conjugate(&dual_part2, &base->_versor.dual_part, &excludant->_versor.real_part);
bgc_fp32_quaternion_multiply_by_conjugate(&difference->_versor.real_part, &base->_versor.real_part, &excludant->_versor.real_part);
bgc_fp32_quaternion_add(&difference->_versor.dual_part, &dual_part1, &dual_part2);
_bgc_fp32_rigid_pose3_normalize(difference);
}
inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* const base, const BGC_FP64_RigidPose3* const excludant)
{
BGC_FP64_Quaternion dual_part1, dual_part2;
_bgc_fp64_restrict_quaternion_multiply_by_conjugate(&dual_part1, &base->_versor.real_part, &excludant->_versor.dual_part);
_bgc_fp64_restrict_quaternion_multiply_by_conjugate(&dual_part2, &base->_versor.dual_part, &excludant->_versor.real_part);
bgc_fp64_quaternion_multiply_by_conjugate(&difference->_versor.real_part, &base->_versor.real_part, &excludant->_versor.real_part);
bgc_fp64_quaternion_add(&difference->_versor.dual_part, &dual_part1, &dual_part2);
_bgc_fp64_rigid_pose3_normalize(difference);
}
// ============= Get Outward Matrix ============= //
inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose)
{
_bgc_fp32_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose)
{
_bgc_fp64_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
}
// ============= Get Inward Matrix ============== //
inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose)
{
_bgc_fp32_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose)
{
_bgc_fp64_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
}
// ============== Get Outer Shift =============== //
inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose)
{
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0f * ((dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3));
shift->x2 = 2.0f * ((dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1));
shift->x3 = 2.0f * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2));
}
inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose)
{
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0 * ((dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3));
shift->x2 = 2.0 * ((dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1));
shift->x3 = 2.0 * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2));
}
// ============== Get Inner Shift ============== //
inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose)
{
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0f * ((dual->s0 * real->x1 - dual->x1 * real->s0) + (dual->x3 * real->x2 - dual->x2 * real->x3));
shift->x2 = 2.0f * ((dual->s0 * real->x2 - dual->x2 * real->s0) + (dual->x1 * real->x3 - dual->x3 * real->x1));
shift->x3 = 2.0f * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2));
}
inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose)
{
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0 * ((dual->s0 * real->x1 - dual->x1 * real->s0) + (dual->x3 * real->x2 - dual->x2 * real->x3));
shift->x2 = 2.0 * ((dual->s0 * real->x2 - dual->x2 * real->s0) + (dual->x1 * real->x3 - dual->x3 * real->x1));
shift->x3 = 2.0 * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2));
}
// ============ Get Outward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose)
{
_bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
}
inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose)
{
_bgc_fp64_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp64_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
}
// ============= Get Inward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose)
{
_bgc_fp32_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
}
inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose)
{
_bgc_fp64_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp64_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
}
// ============ Get Outer Position3 ============= //
inline void bgc_fp32_rigid_pose3_get_outer_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_copy(&position->turn._versor, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_outer_shift(&position->shift, pose);
}
inline void bgc_fp64_rigid_pose3_get_outer_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_copy(&position->turn._versor, &pose->_versor.real_part);
bgc_fp64_rigid_pose3_get_outer_shift(&position->shift, pose);
}
// ============ Set Outer Position3 ============= //
inline void bgc_fp32_rigid_pose3_set_outer_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position)
{
const BGC_FP32_Quaternion* const versor = &position->turn._versor;
const BGC_FP32_Vector3* const shift = &position->shift;
bgc_fp32_quaternion_copy(&pose->_versor.real_part, versor);
pose->_versor.dual_part.s0 = -0.5f * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3);
pose->_versor.dual_part.x1 = -0.5f * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0);
pose->_versor.dual_part.x2 = -0.5f * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0);
pose->_versor.dual_part.x3 = -0.5f * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0);
}
inline void bgc_fp64_rigid_pose3_set_outer_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position)
{
const BGC_FP64_Quaternion* const versor = &position->turn._versor;
const BGC_FP64_Vector3* const shift = &position->shift;
bgc_fp64_quaternion_copy(&pose->_versor.real_part, versor);
pose->_versor.dual_part.s0 = -0.5 * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3);
pose->_versor.dual_part.x1 = -0.5 * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0);
pose->_versor.dual_part.x2 = -0.5 * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0);
pose->_versor.dual_part.x3 = -0.5 * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0);
}
// ============ Get Inner Position3 ============= //
inline void bgc_fp32_rigid_pose3_get_inner_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose)
{
bgc_fp32_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_inner_shift(&position->shift, pose);
}
inline void bgc_fp64_rigid_pose3_get_inner_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose)
{
bgc_fp64_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part);
bgc_fp64_rigid_pose3_get_inner_shift(&position->shift, pose);
}
// ============ Set Inner Position3 ============= //
inline void bgc_fp32_rigid_pose3_set_inner_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position)
{
const BGC_FP32_Quaternion* const versor = &position->turn._versor;
const BGC_FP32_Vector3* const shift = &position->shift;
bgc_fp32_quaternion_get_conjugate(&pose->_versor.real_part, versor);
pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3);
pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3);
pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1);
pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2);
}
inline void bgc_fp64_rigid_pose3_set_inner_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position)
{
const BGC_FP64_Quaternion* const versor = &position->turn._versor;
const BGC_FP64_Vector3* const shift = &position->shift;
bgc_fp64_quaternion_get_conjugate(&pose->_versor.real_part, versor);
pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3);
pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3);
pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1);
pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2);
}
#endif