bgc-c/basic-geometry/rigid-pose3.h

301 lines
12 KiB
C

#ifndef _BGC_RIGID_POSE3_H_INCLUDED_
#define _BGC_RIGID_POSE3_H_INCLUDED_
#include <math.h>
#include "types.h"
#include "affine3.h"
#include "quaternion.h"
#include "dual-quaternion.h"
// ==================== Reset =================== //
inline void bgc_fp32_rigid_pose3_reset(BGC_FP32_RigidPose3* pose)
{
pose->_versor.real_part.s0 = 1.0f;
pose->_versor.real_part.x1 = 0.0f;
pose->_versor.real_part.x2 = 0.0f;
pose->_versor.real_part.x3 = 0.0f;
pose->_versor.dual_part.s0 = 0.0f;
pose->_versor.dual_part.x1 = 0.0f;
pose->_versor.dual_part.x2 = 0.0f;
pose->_versor.dual_part.x3 = 0.0f;
}
inline void bgc_fp64_rigid_pose3_reset(BGC_FP64_RigidPose3* pose)
{
pose->_versor.real_part.s0 = 1.0;
pose->_versor.real_part.x1 = 0.0;
pose->_versor.real_part.x2 = 0.0;
pose->_versor.real_part.x3 = 0.0;
pose->_versor.dual_part.s0 = 0.0;
pose->_versor.dual_part.x1 = 0.0;
pose->_versor.dual_part.x2 = 0.0;
pose->_versor.dual_part.x3 = 0.0;
}
// ================= Normalize ================== //
inline void _bgc_fp32_rigid_pose3_normalize(BGC_FP32_RigidPose3* pose)
{
const float square_magnitude = bgc_fp32_quaternion_get_square_magnitude(&pose->_versor.real_part);
if (square_magnitude <= BGC_FP32_SQUARE_EPSILON || isnan(square_magnitude)) {
bgc_fp32_rigid_pose3_reset(pose);
return;
}
if (!bgc_fp32_is_square_unit(square_magnitude)) {
const float multiplier = sqrtf(1.0f / square_magnitude);
bgc_fp32_dual_quaternion_multiply_by_real_number(&pose->_versor, &pose->_versor, multiplier);
}
const float dot_product = bgc_fp32_quaternion_get_dot_product(&pose->_versor.real_part, &pose->_versor.dual_part);
bgc_fp32_quaternion_subtract_scaled(&pose->_versor.dual_part, &pose->_versor.dual_part, &pose->_versor.real_part, dot_product);
}
inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose)
{
const double square_magnitude = bgc_fp64_quaternion_get_square_magnitude(&pose->_versor.real_part);
if (square_magnitude <= BGC_FP64_SQUARE_EPSILON || isnan(square_magnitude)) {
bgc_fp64_rigid_pose3_reset(pose);
return;
}
if (!bgc_fp64_is_square_unit(square_magnitude)) {
const double multiplier = sqrt(1.0 / square_magnitude);
bgc_fp64_dual_quaternion_multiply_by_real_number(&pose->_versor, &pose->_versor, multiplier);
}
const double dot_product = bgc_fp64_quaternion_get_dot_product(&pose->_versor.real_part, &pose->_versor.dual_part);
bgc_fp64_quaternion_subtract_scaled(&pose->_versor.dual_part, &pose->_versor.dual_part, &pose->_versor.real_part, dot_product);
}
// ============ Get Dual Quaternion ============= //
inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* pose)
{
bgc_fp32_dual_quaternion_copy(quaternion, &pose->_versor);
}
inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* pose)
{
bgc_fp64_dual_quaternion_copy(quaternion, &pose->_versor);
}
// ============ Get Dual Quaternion ============= //
inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion)
{
bgc_fp32_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp32_rigid_pose3_normalize(destination);
}
inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion)
{
bgc_fp64_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp64_rigid_pose3_normalize(destination);
}
// =============== Get Real Part ================ //
inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.real_part);
}
// =============== Get Dual Part ================ //
inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.dual_part);
}
// ==================== Copy ==================== //
inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
{
bgc_fp32_dual_quaternion_copy(&destination->_versor, &source->_versor);
}
inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
{
bgc_fp64_dual_quaternion_copy(&destination->_versor, &source->_versor);
}
// ==================== Swap ==================== //
inline void bgc_fp32_rigid_pose3_swap(BGC_FP32_RigidPose3* pose1, BGC_FP32_RigidPose3* pose2)
{
bgc_fp32_dual_quaternion_swap(&pose1->_versor, &pose2->_versor);
}
inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_RigidPose3* pose2)
{
bgc_fp64_dual_quaternion_swap(&pose1->_versor, &pose2->_versor);
}
// ================== Convert =================== //
inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
{
bgc_fp32_dual_quaternion_convert_to_fp64(&destination->_versor, &source->_versor);
_bgc_fp64_rigid_pose3_normalize(destination);
}
inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
{
bgc_fp64_dual_quaternion_convert_to_fp32(&destination->_versor, &source->_versor);
_bgc_fp32_rigid_pose3_normalize(destination);
}
// =================== Revert =================== //
inline void bgc_fp32_rigid_pose3_revert(BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_conjugate(&pose->_versor.real_part);
bgc_fp32_quaternion_conjugate(&pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_conjugate(&pose->_versor.real_part);
bgc_fp64_quaternion_conjugate(&pose->_versor.dual_part);
}
// ================ Get Reverse ================= //
inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
}
// ================== Combine =================== //
inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second)
{
bgc_fp32_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
_bgc_fp32_rigid_pose3_normalize(combination);
}
inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second)
{
bgc_fp64_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
_bgc_fp64_rigid_pose3_normalize(combination);
}
// ================== Exclude =================== //
inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* base, const BGC_FP32_RigidPose3* excludant)
{
BGC_FP32_Quaternion dual_part1, dual_part2;
_bgc_fp32_restrict_quaternion_multiply_by_conjugate(&dual_part1, &base->_versor.real_part, &excludant->_versor.dual_part);
_bgc_fp32_restrict_quaternion_multiply_by_conjugate(&dual_part2, &base->_versor.dual_part, &excludant->_versor.real_part);
bgc_fp32_quaternion_multiply_by_conjugate(&difference->_versor.real_part, &base->_versor.real_part, &excludant->_versor.real_part);
bgc_fp32_quaternion_add(&difference->_versor.dual_part, &dual_part1, &dual_part2);
_bgc_fp32_rigid_pose3_normalize(difference);
}
inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* base, const BGC_FP64_RigidPose3* excludant)
{
BGC_FP64_Quaternion dual_part1, dual_part2;
_bgc_fp64_restrict_quaternion_multiply_by_conjugate(&dual_part1, &base->_versor.real_part, &excludant->_versor.dual_part);
_bgc_fp64_restrict_quaternion_multiply_by_conjugate(&dual_part2, &base->_versor.dual_part, &excludant->_versor.real_part);
bgc_fp64_quaternion_multiply_by_conjugate(&difference->_versor.real_part, &base->_versor.real_part, &excludant->_versor.real_part);
bgc_fp64_quaternion_add(&difference->_versor.dual_part, &dual_part1, &dual_part2);
_bgc_fp64_rigid_pose3_normalize(difference);
}
// ============= Get Outward Matrix ============= //
inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose)
{
_bgc_fp64_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
}
// ============= Get Inward Matrix ============== //
inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
}
inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose)
{
_bgc_fp64_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
}
// ============ Get Outward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_outward_affine3(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
const BGC_FP32_Quaternion * real = &pose->_versor.real_part;
const BGC_FP32_Quaternion * dual = &pose->_versor.dual_part;
affine_map->shift.x1 = (dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3);
affine_map->shift.x2 = (dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1);
affine_map->shift.x3 = (dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2);
}
inline void bgc_fp64_rigid_pose3_get_outward_affine3(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
{
_bgc_fp64_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
const BGC_FP64_Quaternion * real = &pose->_versor.real_part;
const BGC_FP64_Quaternion * dual = &pose->_versor.dual_part;
affine_map->shift.x1 = (dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3);
affine_map->shift.x2 = (dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1);
affine_map->shift.x3 = (dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2);
}
// ============= Get Inward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_inward_affine3(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
const BGC_FP32_Quaternion * real = &pose->_versor.real_part;
const BGC_FP32_Quaternion * dual = &pose->_versor.dual_part;
//TODO: set the shift in the affine map
}
#endif