From 51fafe50c8644226d2130e63af2fa4a7fedc7147 Mon Sep 17 00:00:00 2001 From: Andrey Pokidov Date: Sun, 22 Mar 2026 23:08:07 +0700 Subject: [PATCH] =?UTF-8?q?=D0=94=D0=BE=D0=B1=D0=B0=D0=B2=D0=BB=D0=B5?= =?UTF-8?q?=D0=BD=D1=8B=20=D0=BE=D0=BF=D0=B5=D1=80=D0=B0=D1=86=D0=B8=D0=B8?= =?UTF-8?q?=20=D0=BF=D0=BE=D0=BB=D1=83=D1=87=D0=B5=D0=BD=D0=B8=D1=8F=20?= =?UTF-8?q?=D0=B8=20=D0=B7=D0=B0=D0=B4=D0=B0=D0=BD=D0=B8=D1=8F=20=D0=BF?= =?UTF-8?q?=D0=BE=D0=B7=D0=B8=D1=86=D0=B8=D0=B8=20=D0=B4=D0=BB=D1=8F=20Rig?= =?UTF-8?q?id=20Pose?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- basic-geometry/rigid-pose3.c | 80 ++++++++++-------- basic-geometry/rigid-pose3.h | 152 +++++++++++++++++++++++++++-------- 2 files changed, 164 insertions(+), 68 deletions(-) diff --git a/basic-geometry/rigid-pose3.c b/basic-geometry/rigid-pose3.c index e2613b3..ea66fef 100644 --- a/basic-geometry/rigid-pose3.c +++ b/basic-geometry/rigid-pose3.c @@ -6,65 +6,77 @@ extern inline void bgc_fp64_rigid_pose3_reset(BGC_FP64_RigidPose3* pose); extern inline void _bgc_fp32_rigid_pose3_normalize(BGC_FP32_RigidPose3* pose); extern inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose); -extern inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion); -extern inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion); +extern inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* const quaternion); +extern inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* const quaternion); -extern inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source); -extern inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source); +extern inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* const source); +extern inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* const source); extern inline void bgc_fp32_rigid_pose3_swap(BGC_FP32_RigidPose3* pose1, BGC_FP32_RigidPose3* pose2); extern inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_RigidPose3* pose2); -extern inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source); -extern inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source); +extern inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* const source); +extern inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* const source); extern inline void bgc_fp32_rigid_pose3_shorten(BGC_FP32_RigidPose3* pose); extern inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose); -extern inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* const pose); extern inline void bgc_fp32_rigid_pose3_alternate(BGC_FP32_RigidPose3* pose); extern inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose); -extern inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* const pose); extern inline void bgc_fp32_rigid_pose3_revert(BGC_FP32_RigidPose3* pose); extern inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose); -extern inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second); -extern inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second); +extern inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* const first, const BGC_FP32_RigidPose3* const second); +extern inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* const first, const BGC_FP64_RigidPose3* const second); -extern inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* base, const BGC_FP32_RigidPose3* excludant); -extern inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* base, const BGC_FP64_RigidPose3* excludant); +extern inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* const base, const BGC_FP32_RigidPose3* const excludant); +extern inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* const base, const BGC_FP64_RigidPose3* const excludant); -extern inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose); -extern inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose); -extern inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose); +extern inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose); + +extern inline void bgc_fp32_rigid_pose3_get_outer_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_outer_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose); + +extern inline void bgc_fp32_rigid_pose3_set_outer_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position); +extern inline void bgc_fp64_rigid_pose3_set_outer_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position); + +extern inline void bgc_fp32_rigid_pose3_get_inner_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose); +extern inline void bgc_fp64_rigid_pose3_get_inner_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose); + +extern inline void bgc_fp32_rigid_pose3_set_inner_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position); +extern inline void bgc_fp64_rigid_pose3_set_inner_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position); diff --git a/basic-geometry/rigid-pose3.h b/basic-geometry/rigid-pose3.h index 80a4663..ce704d8 100644 --- a/basic-geometry/rigid-pose3.h +++ b/basic-geometry/rigid-pose3.h @@ -80,25 +80,25 @@ inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose) // ============ Get Dual Quaternion ============= // -inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* const pose) { bgc_fp32_dual_quaternion_copy(quaternion, &pose->_versor); } -inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* const pose) { bgc_fp64_dual_quaternion_copy(quaternion, &pose->_versor); } // ============ Get Dual Quaternion ============= // -inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion) +inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* const quaternion) { bgc_fp32_dual_quaternion_copy(&pose->_versor, quaternion); _bgc_fp32_rigid_pose3_normalize(pose); } -inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion) +inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* const quaternion) { bgc_fp64_dual_quaternion_copy(&pose->_versor, quaternion); _bgc_fp64_rigid_pose3_normalize(pose); @@ -106,36 +106,36 @@ inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, // =============== Get Real Part ================ // -inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose) { bgc_fp32_quaternion_copy(quaternion, &pose->_versor.real_part); } -inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose) { bgc_fp64_quaternion_copy(quaternion, &pose->_versor.real_part); } // =============== Get Dual Part ================ // -inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose) { bgc_fp32_quaternion_copy(quaternion, &pose->_versor.dual_part); } -inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose) { bgc_fp64_quaternion_copy(quaternion, &pose->_versor.dual_part); } // ==================== Copy ==================== // -inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source) +inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* const source) { bgc_fp32_dual_quaternion_copy(&destination->_versor, &source->_versor); } -inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source) +inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* const source) { bgc_fp64_dual_quaternion_copy(&destination->_versor, &source->_versor); } @@ -154,13 +154,13 @@ inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_Rigid // ================== Convert =================== // -inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source) +inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* const source) { bgc_fp32_dual_quaternion_convert_to_fp64(&destination->_versor, &source->_versor); _bgc_fp64_rigid_pose3_normalize(destination); } -inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source) +inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* const source) { bgc_fp64_dual_quaternion_convert_to_fp32(&destination->_versor, &source->_versor); _bgc_fp32_rigid_pose3_normalize(destination); @@ -186,7 +186,7 @@ inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose) // =============== Get Shortened ================ // -inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* const pose) { if (pose->_versor.real_part.s0 < 0.0f) { bgc_fp32_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part); @@ -198,7 +198,7 @@ inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, c } } -inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* const pose) { if (pose->_versor.real_part.s0 < 0.0) { bgc_fp64_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part); @@ -226,13 +226,13 @@ inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose) // ============== Get Alternative =============== // -inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* const pose) { bgc_fp32_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part); bgc_fp32_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part); } -inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* const pose) { bgc_fp64_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part); bgc_fp64_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part); @@ -254,13 +254,13 @@ inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose) // ================ Get Reverse ================= // -inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* const pose) { bgc_fp32_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part); bgc_fp32_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part); } -inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* const pose) { bgc_fp64_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part); bgc_fp64_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part); @@ -268,13 +268,13 @@ inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const // ================== Combine =================== // -inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second) +inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* const first, const BGC_FP32_RigidPose3* const second) { bgc_fp32_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor); _bgc_fp32_rigid_pose3_normalize(combination); } -inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second) +inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* const first, const BGC_FP64_RigidPose3* const second) { bgc_fp64_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor); _bgc_fp64_rigid_pose3_normalize(combination); @@ -282,7 +282,7 @@ inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const // ================== Exclude =================== // -inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* base, const BGC_FP32_RigidPose3* excludant) +inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* const base, const BGC_FP32_RigidPose3* const excludant) { BGC_FP32_Quaternion dual_part1, dual_part2; @@ -295,7 +295,7 @@ inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const _bgc_fp32_rigid_pose3_normalize(difference); } -inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* base, const BGC_FP64_RigidPose3* excludant) +inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* const base, const BGC_FP64_RigidPose3* const excludant) { BGC_FP64_Quaternion dual_part1, dual_part2; @@ -310,31 +310,31 @@ inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const // ============= Get Outward Matrix ============= // -inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose) { _bgc_fp32_versor_get_rotation_matrix(matrix, &pose->_versor.real_part); } -inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose) { _bgc_fp64_versor_get_rotation_matrix(matrix, &pose->_versor.real_part); } // ============= Get Inward Matrix ============== // -inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose) { _bgc_fp32_versor_get_reverse_matrix(matrix, &pose->_versor.real_part); } -inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose) { _bgc_fp64_versor_get_reverse_matrix(matrix, &pose->_versor.real_part); } // ============== Get Outer Shift =============== // -inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose) { const BGC_FP32_Quaternion* const real = &pose->_versor.real_part; const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part; @@ -344,7 +344,7 @@ inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const shift->x3 = 2.0f * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2)); } -inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose) { const BGC_FP64_Quaternion* const real = &pose->_versor.real_part; const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part; @@ -356,7 +356,7 @@ inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const // ============== Get Inner Shift ============== // -inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose) { const BGC_FP32_Quaternion* const real = &pose->_versor.real_part; const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part; @@ -366,7 +366,7 @@ inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const shift->x3 = 2.0f * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2)); } -inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose) { const BGC_FP64_Quaternion* const real = &pose->_versor.real_part; const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part; @@ -378,13 +378,13 @@ inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const // ============ Get Outward Affine3 ============= // -inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose) { _bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part); bgc_fp32_rigid_pose3_get_outer_shift(&affine_map->shift, pose); } -inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose) { _bgc_fp64_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part); bgc_fp64_rigid_pose3_get_outer_shift(&affine_map->shift, pose); @@ -392,16 +392,100 @@ inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map // ============= Get Inward Affine3 ============= // -inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose) +inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose) { _bgc_fp32_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part); bgc_fp32_rigid_pose3_get_inner_shift(&affine_map->shift, pose); } -inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose) +inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose) { _bgc_fp64_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part); bgc_fp64_rigid_pose3_get_inner_shift(&affine_map->shift, pose); } +// ============ Get Outer Position3 ============= // + +inline void bgc_fp32_rigid_pose3_get_outer_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose) +{ + bgc_fp32_quaternion_copy(&position->turn._versor, &pose->_versor.real_part); + bgc_fp32_rigid_pose3_get_outer_shift(&position->shift, pose); +} + +inline void bgc_fp64_rigid_pose3_get_outer_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose) +{ + bgc_fp64_quaternion_copy(&position->turn._versor, &pose->_versor.real_part); + bgc_fp64_rigid_pose3_get_outer_shift(&position->shift, pose); +} + +// ============ Set Outer Position3 ============= // + +inline void bgc_fp32_rigid_pose3_set_outer_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position) +{ + const BGC_FP32_Quaternion* const versor = &position->turn._versor; + const BGC_FP32_Vector3* const shift = &position->shift; + + bgc_fp32_quaternion_copy(&pose->_versor.real_part, versor); + + pose->_versor.dual_part.s0 = -0.5f * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3); + pose->_versor.dual_part.x1 = -0.5f * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0); + pose->_versor.dual_part.x2 = -0.5f * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0); + pose->_versor.dual_part.x3 = -0.5f * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0); +} + +inline void bgc_fp64_rigid_pose3_set_outer_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position) +{ + const BGC_FP64_Quaternion* const versor = &position->turn._versor; + const BGC_FP64_Vector3* const shift = &position->shift; + + bgc_fp64_quaternion_copy(&pose->_versor.real_part, versor); + + pose->_versor.dual_part.s0 = -0.5 * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3); + pose->_versor.dual_part.x1 = -0.5 * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0); + pose->_versor.dual_part.x2 = -0.5 * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0); + pose->_versor.dual_part.x3 = -0.5 * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0); +} + +// ============ Get Inner Position3 ============= // + +inline void bgc_fp32_rigid_pose3_get_inner_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose) +{ + bgc_fp32_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part); + bgc_fp32_rigid_pose3_get_inner_shift(&position->shift, pose); +} + +inline void bgc_fp64_rigid_pose3_get_inner_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose) +{ + bgc_fp64_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part); + bgc_fp64_rigid_pose3_get_inner_shift(&position->shift, pose); +} + +// ============ Set Inner Position3 ============= // + +inline void bgc_fp32_rigid_pose3_set_inner_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position) +{ + const BGC_FP32_Quaternion* const versor = &position->turn._versor; + const BGC_FP32_Vector3* const shift = &position->shift; + + bgc_fp32_quaternion_get_conjugate(&pose->_versor.real_part, versor); + + pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3); + pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3); + pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1); + pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2); +} + +inline void bgc_fp64_rigid_pose3_set_inner_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position) +{ + const BGC_FP64_Quaternion* const versor = &position->turn._versor; + const BGC_FP64_Vector3* const shift = &position->shift; + + bgc_fp64_quaternion_get_conjugate(&pose->_versor.real_part, versor); + + pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3); + pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3); + pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1); + pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2); +} + #endif