141 lines
5.3 KiB
C
141 lines
5.3 KiB
C
#ifndef _BGC_RIGID_POSE3_H_INCLUDED_
|
|
#define _BGC_RIGID_POSE3_H_INCLUDED_
|
|
|
|
#include "types.h"
|
|
#include "quaternion.h"
|
|
|
|
// ================= Normalize ================== //
|
|
|
|
void _bgc_fp32_rigid_pose3_normalize_parts(BGC_FP32_RigidPose3* pose, const float square_magnitude);
|
|
|
|
void _bgc_fp64_rigid_pose3_normalize_parts(BGC_FP64_RigidPose3* pose, const double square_magnitude);
|
|
|
|
inline void _bgc_fp32_rigid_pose3_normalize(BGC_FP32_RigidPose3* pose)
|
|
{
|
|
const float square_magnitude = bgc_fp32_quaternion_get_square_magnitude(&pose->_real_part);
|
|
|
|
if (!bgc_fp32_is_square_unit(square_magnitude)) {
|
|
_bgc_fp32_rigid_pose3_normalize_parts(pose, square_magnitude);
|
|
}
|
|
|
|
const float dot_product = bgc_fp32_quaternion_get_dot_product(&pose->_real_part, &pose->_dual_part);
|
|
|
|
bgc_fp32_quaternion_subtract_scaled(&pose->_dual_part, &pose->_dual_part, &pose->_real_part, dot_product);
|
|
}
|
|
|
|
inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose)
|
|
{
|
|
const double square_magnitude = bgc_fp64_quaternion_get_square_magnitude(&pose->_real_part);
|
|
|
|
if (!bgc_fp64_is_square_unit(square_magnitude)) {
|
|
_bgc_fp64_rigid_pose3_normalize_parts(pose, square_magnitude);
|
|
}
|
|
|
|
const double dot_product = bgc_fp64_quaternion_get_dot_product(&pose->_real_part, &pose->_dual_part);
|
|
|
|
bgc_fp64_quaternion_subtract_scaled(&pose->_dual_part, &pose->_dual_part, &pose->_real_part, dot_product);
|
|
}
|
|
|
|
// ==================== Reset =================== //
|
|
|
|
inline void bgc_fp32_rigid_pose3_reset(BGC_FP32_RigidPose3* pose)
|
|
{
|
|
pose->_real_part.s0 = 1.0f;
|
|
pose->_real_part.x1 = 0.0f;
|
|
pose->_real_part.x2 = 0.0f;
|
|
pose->_real_part.x3 = 0.0f;
|
|
|
|
pose->_dual_part.s0 = 0.0f;
|
|
pose->_dual_part.x1 = 0.0f;
|
|
pose->_dual_part.x2 = 0.0f;
|
|
pose->_dual_part.x3 = 0.0f;
|
|
}
|
|
|
|
inline void bgc_fp64_rigid_pose3_reset(BGC_FP64_RigidPose3* pose)
|
|
{
|
|
pose->_real_part.s0 = 1.0;
|
|
pose->_real_part.x1 = 0.0;
|
|
pose->_real_part.x2 = 0.0;
|
|
pose->_real_part.x3 = 0.0;
|
|
|
|
pose->_dual_part.s0 = 0.0;
|
|
pose->_dual_part.x1 = 0.0;
|
|
pose->_dual_part.x2 = 0.0;
|
|
pose->_dual_part.x3 = 0.0;
|
|
}
|
|
|
|
// ==================== Copy ==================== //
|
|
|
|
inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
|
|
{
|
|
bgc_fp32_quaternion_copy(&destination->_real_part, &source->_real_part);
|
|
bgc_fp32_quaternion_copy(&destination->_dual_part, &source->_dual_part);
|
|
}
|
|
|
|
inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
|
|
{
|
|
bgc_fp64_quaternion_copy(&destination->_real_part, &source->_real_part);
|
|
bgc_fp64_quaternion_copy(&destination->_dual_part, &source->_dual_part);
|
|
}
|
|
|
|
// ==================== Swap ==================== //
|
|
|
|
inline void bgc_fp32_rigid_pose3_swap(BGC_FP32_RigidPose3* pose1, BGC_FP32_RigidPose3* pose2)
|
|
{
|
|
bgc_fp32_quaternion_swap(&pose1->_real_part, &pose2->_real_part);
|
|
bgc_fp32_quaternion_swap(&pose1->_dual_part, &pose2->_dual_part);
|
|
}
|
|
|
|
inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_RigidPose3* pose2)
|
|
{
|
|
bgc_fp64_quaternion_swap(&pose1->_real_part, &pose2->_real_part);
|
|
bgc_fp64_quaternion_swap(&pose1->_dual_part, &pose2->_dual_part);
|
|
}
|
|
|
|
// ================== Convert =================== //
|
|
|
|
inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
|
|
{
|
|
bgc_fp32_quaternion_convert_to_fp64(&destination->_real_part, &source->_real_part);
|
|
bgc_fp32_quaternion_convert_to_fp64(&destination->_dual_part, &source->_dual_part);
|
|
|
|
_bgc_fp64_rigid_pose3_normalize(destination);
|
|
}
|
|
|
|
inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
|
|
{
|
|
bgc_fp64_quaternion_convert_to_fp32(&destination->_real_part, &source->_real_part);
|
|
bgc_fp64_quaternion_convert_to_fp32(&destination->_dual_part, &source->_dual_part);
|
|
|
|
_bgc_fp32_rigid_pose3_normalize(destination);
|
|
}
|
|
|
|
// ================== Combine =================== //
|
|
|
|
inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second)
|
|
{
|
|
BGC_FP32_Quaternion dual_part1, dual_part2;
|
|
|
|
_bgc_fp32_restrict_quaternion_multiply_by_quaternion(&dual_part1, &second->_real_part, &first->_dual_part);
|
|
_bgc_fp32_restrict_quaternion_multiply_by_quaternion(&dual_part2, &second->_dual_part, &first->_real_part);
|
|
|
|
bgc_fp32_quaternion_multiply_by_quaternion(&combination->_real_part, &second->_real_part, &first->_real_part);
|
|
bgc_fp32_quaternion_add(&combination->_dual_part, &dual_part1, &dual_part2);
|
|
|
|
_bgc_fp32_rigid_pose3_normalize(combination);
|
|
}
|
|
|
|
inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second)
|
|
{
|
|
BGC_FP64_Quaternion dual_part1, dual_part2;
|
|
|
|
_bgc_fp64_restrict_quaternion_multiply_by_quaternion(&dual_part1, &second->_real_part, &first->_dual_part);
|
|
_bgc_fp64_restrict_quaternion_multiply_by_quaternion(&dual_part2, &second->_dual_part, &first->_real_part);
|
|
|
|
bgc_fp64_quaternion_multiply_by_quaternion(&combination->_real_part, &second->_real_part, &first->_real_part);
|
|
bgc_fp64_quaternion_add(&combination->_dual_part, &dual_part1, &dual_part2);
|
|
|
|
_bgc_fp64_rigid_pose3_normalize(combination);
|
|
}
|
|
|
|
#endif
|