Добавлены операции получения и задания позиции для Rigid Pose
This commit is contained in:
parent
84be068503
commit
51fafe50c8
2 changed files with 164 additions and 68 deletions
|
|
@ -6,65 +6,77 @@ extern inline void bgc_fp64_rigid_pose3_reset(BGC_FP64_RigidPose3* pose);
|
|||
extern inline void _bgc_fp32_rigid_pose3_normalize(BGC_FP32_RigidPose3* pose);
|
||||
extern inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion);
|
||||
extern inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion);
|
||||
extern inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* const quaternion);
|
||||
extern inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* const quaternion);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source);
|
||||
extern inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source);
|
||||
extern inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* const source);
|
||||
extern inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* const source);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_swap(BGC_FP32_RigidPose3* pose1, BGC_FP32_RigidPose3* pose2);
|
||||
extern inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_RigidPose3* pose2);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source);
|
||||
extern inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source);
|
||||
extern inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* const source);
|
||||
extern inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* const source);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_shorten(BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_alternate(BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_revert(BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second);
|
||||
extern inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second);
|
||||
extern inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* const first, const BGC_FP32_RigidPose3* const second);
|
||||
extern inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* const first, const BGC_FP64_RigidPose3* const second);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* base, const BGC_FP32_RigidPose3* excludant);
|
||||
extern inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* base, const BGC_FP64_RigidPose3* excludant);
|
||||
extern inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* const base, const BGC_FP32_RigidPose3* const excludant);
|
||||
extern inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* const base, const BGC_FP64_RigidPose3* const excludant);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose);
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_outer_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_outer_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_set_outer_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position);
|
||||
extern inline void bgc_fp64_rigid_pose3_set_outer_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_get_inner_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose);
|
||||
extern inline void bgc_fp64_rigid_pose3_get_inner_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose);
|
||||
|
||||
extern inline void bgc_fp32_rigid_pose3_set_inner_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position);
|
||||
extern inline void bgc_fp64_rigid_pose3_set_inner_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position);
|
||||
|
|
|
|||
|
|
@ -80,25 +80,25 @@ inline void _bgc_fp64_rigid_pose3_normalize(BGC_FP64_RigidPose3* pose)
|
|||
|
||||
// ============ Get Dual Quaternion ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_dual_quaternion(BGC_FP32_DualQuaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_dual_quaternion_copy(quaternion, &pose->_versor);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_dual_quaternion_copy(quaternion, &pose->_versor);
|
||||
}
|
||||
|
||||
// ============ Get Dual Quaternion ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion)
|
||||
inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* const quaternion)
|
||||
{
|
||||
bgc_fp32_dual_quaternion_copy(&pose->_versor, quaternion);
|
||||
_bgc_fp32_rigid_pose3_normalize(pose);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion)
|
||||
inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* const quaternion)
|
||||
{
|
||||
bgc_fp64_dual_quaternion_copy(&pose->_versor, quaternion);
|
||||
_bgc_fp64_rigid_pose3_normalize(pose);
|
||||
|
|
@ -106,36 +106,36 @@ inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose,
|
|||
|
||||
// =============== Get Real Part ================ //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_real_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_real_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
// =============== Get Dual Part ================ //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_dual_part(BGC_FP32_Quaternion* quaternion, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_copy(quaternion, &pose->_versor.dual_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_dual_part(BGC_FP64_Quaternion* quaternion, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_copy(quaternion, &pose->_versor.dual_part);
|
||||
}
|
||||
|
||||
// ==================== Copy ==================== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
|
||||
inline void bgc_fp32_rigid_pose3_copy(BGC_FP32_RigidPose3* destination, const BGC_FP32_RigidPose3* const source)
|
||||
{
|
||||
bgc_fp32_dual_quaternion_copy(&destination->_versor, &source->_versor);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
|
||||
inline void bgc_fp64_rigid_pose3_copy(BGC_FP64_RigidPose3* destination, const BGC_FP64_RigidPose3* const source)
|
||||
{
|
||||
bgc_fp64_dual_quaternion_copy(&destination->_versor, &source->_versor);
|
||||
}
|
||||
|
|
@ -154,13 +154,13 @@ inline void bgc_fp64_rigid_pose3_swap(BGC_FP64_RigidPose3* pose1, BGC_FP64_Rigid
|
|||
|
||||
// ================== Convert =================== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* source)
|
||||
inline void bgc_fp32_rigid_pose3_convert_to_fp64(BGC_FP64_RigidPose3* destination, const BGC_FP32_RigidPose3* const source)
|
||||
{
|
||||
bgc_fp32_dual_quaternion_convert_to_fp64(&destination->_versor, &source->_versor);
|
||||
_bgc_fp64_rigid_pose3_normalize(destination);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* source)
|
||||
inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destination, const BGC_FP64_RigidPose3* const source)
|
||||
{
|
||||
bgc_fp64_dual_quaternion_convert_to_fp32(&destination->_versor, &source->_versor);
|
||||
_bgc_fp32_rigid_pose3_normalize(destination);
|
||||
|
|
@ -186,7 +186,7 @@ inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose)
|
|||
|
||||
// =============== Get Shortened ================ //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
if (pose->_versor.real_part.s0 < 0.0f) {
|
||||
bgc_fp32_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
|
||||
|
|
@ -198,7 +198,7 @@ inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, c
|
|||
}
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
if (pose->_versor.real_part.s0 < 0.0) {
|
||||
bgc_fp64_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
|
||||
|
|
@ -226,13 +226,13 @@ inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose)
|
|||
|
||||
// ============== Get Alternative =============== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
|
||||
bgc_fp32_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
|
||||
bgc_fp64_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
|
||||
|
|
@ -254,13 +254,13 @@ inline void bgc_fp64_rigid_pose3_revert(BGC_FP64_RigidPose3* pose)
|
|||
|
||||
// ================ Get Reverse ================= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_reverse(BGC_FP32_RigidPose3* reverse, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
|
||||
bgc_fp32_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.real_part, &pose->_versor.real_part);
|
||||
bgc_fp64_quaternion_get_conjugate(&reverse->_versor.dual_part, &pose->_versor.dual_part);
|
||||
|
|
@ -268,13 +268,13 @@ inline void bgc_fp64_rigid_pose3_get_reverse(BGC_FP64_RigidPose3* reverse, const
|
|||
|
||||
// ================== Combine =================== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* first, const BGC_FP32_RigidPose3* second)
|
||||
inline void bgc_fp32_rigid_pose3_combine(BGC_FP32_RigidPose3* combination, const BGC_FP32_RigidPose3* const first, const BGC_FP32_RigidPose3* const second)
|
||||
{
|
||||
bgc_fp32_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
|
||||
_bgc_fp32_rigid_pose3_normalize(combination);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* first, const BGC_FP64_RigidPose3* second)
|
||||
inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const BGC_FP64_RigidPose3* const first, const BGC_FP64_RigidPose3* const second)
|
||||
{
|
||||
bgc_fp64_dual_quaternion_multiply_by_dual_quaternion(&combination->_versor, &second->_versor, &first->_versor);
|
||||
_bgc_fp64_rigid_pose3_normalize(combination);
|
||||
|
|
@ -282,7 +282,7 @@ inline void bgc_fp64_rigid_pose3_combine(BGC_FP64_RigidPose3* combination, const
|
|||
|
||||
// ================== Exclude =================== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* base, const BGC_FP32_RigidPose3* excludant)
|
||||
inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const BGC_FP32_RigidPose3* const base, const BGC_FP32_RigidPose3* const excludant)
|
||||
{
|
||||
BGC_FP32_Quaternion dual_part1, dual_part2;
|
||||
|
||||
|
|
@ -295,7 +295,7 @@ inline void bgc_fp32_rigid_pose3_exclude(BGC_FP32_RigidPose3* difference, const
|
|||
_bgc_fp32_rigid_pose3_normalize(difference);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* base, const BGC_FP64_RigidPose3* excludant)
|
||||
inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const BGC_FP64_RigidPose3* const base, const BGC_FP64_RigidPose3* const excludant)
|
||||
{
|
||||
BGC_FP64_Quaternion dual_part1, dual_part2;
|
||||
|
||||
|
|
@ -310,31 +310,31 @@ inline void bgc_fp64_rigid_pose3_exclude(BGC_FP64_RigidPose3* difference, const
|
|||
|
||||
// ============= Get Outward Matrix ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_outward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp32_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_outward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp64_versor_get_rotation_matrix(matrix, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
// ============= Get Inward Matrix ============== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_inward_matrix(BGC_FP32_Matrix3x3* matrix, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp32_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp64_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
|
||||
}
|
||||
|
||||
// ============== Get Outer Shift =============== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
|
||||
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
|
||||
|
|
@ -344,7 +344,7 @@ inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const
|
|||
shift->x3 = 2.0f * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2));
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
|
||||
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
|
||||
|
|
@ -356,7 +356,7 @@ inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const
|
|||
|
||||
// ============== Get Inner Shift ============== //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
|
||||
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
|
||||
|
|
@ -366,7 +366,7 @@ inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const
|
|||
shift->x3 = 2.0f * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2));
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
|
||||
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
|
||||
|
|
@ -378,13 +378,13 @@ inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const
|
|||
|
||||
// ============ Get Outward Affine3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
|
||||
bgc_fp32_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp64_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
|
||||
bgc_fp64_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
|
||||
|
|
@ -392,16 +392,100 @@ inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map
|
|||
|
||||
// ============= Get Inward Affine3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
|
||||
inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp32_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
|
||||
bgc_fp32_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
|
||||
inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
_bgc_fp64_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
|
||||
bgc_fp64_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
|
||||
}
|
||||
|
||||
// ============ Get Outer Position3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_outer_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_copy(&position->turn._versor, &pose->_versor.real_part);
|
||||
bgc_fp32_rigid_pose3_get_outer_shift(&position->shift, pose);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_outer_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_copy(&position->turn._versor, &pose->_versor.real_part);
|
||||
bgc_fp64_rigid_pose3_get_outer_shift(&position->shift, pose);
|
||||
}
|
||||
|
||||
// ============ Set Outer Position3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_set_outer_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position)
|
||||
{
|
||||
const BGC_FP32_Quaternion* const versor = &position->turn._versor;
|
||||
const BGC_FP32_Vector3* const shift = &position->shift;
|
||||
|
||||
bgc_fp32_quaternion_copy(&pose->_versor.real_part, versor);
|
||||
|
||||
pose->_versor.dual_part.s0 = -0.5f * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3);
|
||||
pose->_versor.dual_part.x1 = -0.5f * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0);
|
||||
pose->_versor.dual_part.x2 = -0.5f * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0);
|
||||
pose->_versor.dual_part.x3 = -0.5f * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_set_outer_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position)
|
||||
{
|
||||
const BGC_FP64_Quaternion* const versor = &position->turn._versor;
|
||||
const BGC_FP64_Vector3* const shift = &position->shift;
|
||||
|
||||
bgc_fp64_quaternion_copy(&pose->_versor.real_part, versor);
|
||||
|
||||
pose->_versor.dual_part.s0 = -0.5 * (shift->x1 * versor->x1 + shift->x2 * versor->x2 + shift->x3 * versor->x3);
|
||||
pose->_versor.dual_part.x1 = -0.5 * (shift->x3 * versor->x2 - shift->x2 * versor->x3 - shift->x1 * versor->s0);
|
||||
pose->_versor.dual_part.x2 = -0.5 * (shift->x1 * versor->x3 - shift->x3 * versor->x1 - shift->x2 * versor->s0);
|
||||
pose->_versor.dual_part.x3 = -0.5 * (shift->x2 * versor->x1 - shift->x1 * versor->x2 - shift->x3 * versor->s0);
|
||||
}
|
||||
|
||||
// ============ Get Inner Position3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_get_inner_position(BGC_FP32_Position3* position, const BGC_FP32_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp32_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part);
|
||||
bgc_fp32_rigid_pose3_get_inner_shift(&position->shift, pose);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_get_inner_position(BGC_FP64_Position3* position, const BGC_FP64_RigidPose3* const pose)
|
||||
{
|
||||
bgc_fp64_quaternion_get_conjugate(&position->turn._versor, &pose->_versor.real_part);
|
||||
bgc_fp64_rigid_pose3_get_inner_shift(&position->shift, pose);
|
||||
}
|
||||
|
||||
// ============ Set Inner Position3 ============= //
|
||||
|
||||
inline void bgc_fp32_rigid_pose3_set_inner_position(BGC_FP32_RigidPose3* pose, const BGC_FP32_Position3* const position)
|
||||
{
|
||||
const BGC_FP32_Quaternion* const versor = &position->turn._versor;
|
||||
const BGC_FP32_Vector3* const shift = &position->shift;
|
||||
|
||||
bgc_fp32_quaternion_get_conjugate(&pose->_versor.real_part, versor);
|
||||
|
||||
pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3);
|
||||
pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3);
|
||||
pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1);
|
||||
pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2);
|
||||
}
|
||||
|
||||
inline void bgc_fp64_rigid_pose3_set_inner_position(BGC_FP64_RigidPose3* pose, const BGC_FP64_Position3* const position)
|
||||
{
|
||||
const BGC_FP64_Quaternion* const versor = &position->turn._versor;
|
||||
const BGC_FP64_Vector3* const shift = &position->shift;
|
||||
|
||||
bgc_fp64_quaternion_get_conjugate(&pose->_versor.real_part, versor);
|
||||
|
||||
pose->_versor.dual_part.s0 = -0.5f * (versor->x1 * shift->x1 + versor->x2 * shift->x2 + versor->x3 * shift->x3);
|
||||
pose->_versor.dual_part.x1 = -0.5f * (versor->s0 * shift->x1 + versor->x3 * shift->x2 - versor->x2 * shift->x3);
|
||||
pose->_versor.dual_part.x2 = -0.5f * (versor->s0 * shift->x2 + versor->x1 * shift->x3 - versor->x3 * shift->x1);
|
||||
pose->_versor.dual_part.x3 = -0.5f * (versor->s0 * shift->x3 + versor->x2 * shift->x1 - versor->x1 * shift->x2);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue