Операции оптимизации (shorten), получения альтернативного (alternate), получение матриц поворота, сдвигов и аффинных преобразовний из позиции твёрдого тела (rigid pose), представленной дуальным кватернионом

This commit is contained in:
Andrey Pokidov 2026-03-21 22:03:43 +07:00
parent 4ead7ca106
commit 84be068503
6 changed files with 163 additions and 34 deletions

View file

@ -95,13 +95,13 @@ inline void bgc_fp64_rigid_pose3_get_dual_quaternion(BGC_FP64_DualQuaternion* qu
inline void bgc_fp32_rigid_pose3_set_dual_quaternion(BGC_FP32_RigidPose3* pose, const BGC_FP32_DualQuaternion* quaternion)
{
bgc_fp32_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp32_rigid_pose3_normalize(destination);
_bgc_fp32_rigid_pose3_normalize(pose);
}
inline void bgc_fp64_rigid_pose3_set_dual_quaternion(BGC_FP64_RigidPose3* pose, const BGC_FP64_DualQuaternion* quaternion)
{
bgc_fp64_dual_quaternion_copy(&pose->_versor, quaternion);
_bgc_fp64_rigid_pose3_normalize(destination);
_bgc_fp64_rigid_pose3_normalize(pose);
}
// =============== Get Real Part ================ //
@ -166,6 +166,78 @@ inline void bgc_fp64_rigid_pose3_convert_to_fp32(BGC_FP32_RigidPose3* destinatio
_bgc_fp32_rigid_pose3_normalize(destination);
}
// ================== Shorten =================== //
inline void bgc_fp32_rigid_pose3_shorten(BGC_FP32_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0f) {
bgc_fp32_quaternion_revert(&pose->_versor.real_part);
bgc_fp32_quaternion_revert(&pose->_versor.dual_part);
}
}
inline void bgc_fp64_rigid_pose3_shorten(BGC_FP64_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0) {
bgc_fp64_quaternion_revert(&pose->_versor.real_part);
bgc_fp64_quaternion_revert(&pose->_versor.dual_part);
}
}
// =============== Get Shortened ================ //
inline void bgc_fp32_rigid_pose3_get_shortened(BGC_FP32_RigidPose3* shortened, const BGC_FP32_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0f) {
bgc_fp32_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_reverse(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
else {
bgc_fp32_quaternion_copy(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_copy(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
}
inline void bgc_fp64_rigid_pose3_get_shortened(BGC_FP64_RigidPose3* shortened, const BGC_FP64_RigidPose3* pose)
{
if (pose->_versor.real_part.s0 < 0.0) {
bgc_fp64_quaternion_get_reverse(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_reverse(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
else {
bgc_fp64_quaternion_copy(&shortened->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_copy(&shortened->_versor.dual_part, &pose->_versor.dual_part);
}
}
// ================= Alternate ================== //
inline void bgc_fp32_rigid_pose3_alternate(BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_revert(&pose->_versor.real_part);
bgc_fp32_quaternion_revert(&pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_alternate(BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_revert(&pose->_versor.real_part);
bgc_fp64_quaternion_revert(&pose->_versor.dual_part);
}
// ============== Get Alternative =============== //
inline void bgc_fp32_rigid_pose3_get_alternative(BGC_FP32_RigidPose3* alternative, const BGC_FP32_RigidPose3* pose)
{
bgc_fp32_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
bgc_fp32_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
}
inline void bgc_fp64_rigid_pose3_get_alternative(BGC_FP64_RigidPose3* alternative, const BGC_FP64_RigidPose3* pose)
{
bgc_fp64_quaternion_get_reverse(&alternative->_versor.real_part, &pose->_versor.real_part);
bgc_fp64_quaternion_get_reverse(&alternative->_versor.dual_part, &pose->_versor.dual_part);
}
// =================== Revert =================== //
inline void bgc_fp32_rigid_pose3_revert(BGC_FP32_RigidPose3* pose)
@ -260,42 +332,76 @@ inline void bgc_fp64_rigid_pose3_get_inward_matrix(BGC_FP64_Matrix3x3* matrix, c
_bgc_fp64_versor_get_reverse_matrix(matrix, &pose->_versor.real_part);
}
// ============ Get Outward Affine3 ============= //
// ============== Get Outer Shift =============== //
inline void bgc_fp32_rigid_pose3_get_outward_affine3(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
inline void bgc_fp32_rigid_pose3_get_outer_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
const BGC_FP32_Quaternion * real = &pose->_versor.real_part;
const BGC_FP32_Quaternion * dual = &pose->_versor.dual_part;
affine_map->shift.x1 = (dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3);
affine_map->shift.x2 = (dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1);
affine_map->shift.x3 = (dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2);
shift->x1 = 2.0f * ((dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3));
shift->x2 = 2.0f * ((dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1));
shift->x3 = 2.0f * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2));
}
inline void bgc_fp64_rigid_pose3_get_outward_affine3(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
inline void bgc_fp64_rigid_pose3_get_outer_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose)
{
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0 * ((dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3));
shift->x2 = 2.0 * ((dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1));
shift->x3 = 2.0 * ((dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2));
}
// ============== Get Inner Shift ============== //
inline void bgc_fp32_rigid_pose3_get_inner_shift(BGC_FP32_Vector3* shift, const BGC_FP32_RigidPose3* pose)
{
const BGC_FP32_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP32_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0f * ((dual->s0 * real->x1 - dual->x1 * real->s0) + (dual->x3 * real->x2 - dual->x2 * real->x3));
shift->x2 = 2.0f * ((dual->s0 * real->x2 - dual->x2 * real->s0) + (dual->x1 * real->x3 - dual->x3 * real->x1));
shift->x3 = 2.0f * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2));
}
inline void bgc_fp64_rigid_pose3_get_inner_shift(BGC_FP64_Vector3* shift, const BGC_FP64_RigidPose3* pose)
{
const BGC_FP64_Quaternion* const real = &pose->_versor.real_part;
const BGC_FP64_Quaternion* const dual = &pose->_versor.dual_part;
shift->x1 = 2.0 * ((dual->s0 * real->x1 - dual->x1 * real->s0) + (dual->x3 * real->x2 - dual->x2 * real->x3));
shift->x2 = 2.0 * ((dual->s0 * real->x2 - dual->x2 * real->s0) + (dual->x1 * real->x3 - dual->x3 * real->x1));
shift->x3 = 2.0 * ((dual->s0 * real->x3 - dual->x3 * real->s0) + (dual->x2 * real->x1 - dual->x1 * real->x2));
}
// ============ Get Outward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_outward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
}
inline void bgc_fp64_rigid_pose3_get_outward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
{
_bgc_fp64_versor_get_rotation_matrix(&affine_map->distortion, &pose->_versor.real_part);
const BGC_FP64_Quaternion * real = &pose->_versor.real_part;
const BGC_FP64_Quaternion * dual = &pose->_versor.dual_part;
affine_map->shift.x1 = (dual->x1 * real->s0 + dual->x3 * real->x2) - (dual->s0 * real->x1 + dual->x2 * real->x3);
affine_map->shift.x2 = (dual->x2 * real->s0 + dual->x1 * real->x3) - (dual->s0 * real->x2 + dual->x3 * real->x1);
affine_map->shift.x3 = (dual->x3 * real->s0 + dual->x2 * real->x1) - (dual->s0 * real->x3 + dual->x1 * real->x2);
bgc_fp64_rigid_pose3_get_outer_shift(&affine_map->shift, pose);
}
// ============= Get Inward Affine3 ============= //
inline void bgc_fp32_rigid_pose3_get_inward_affine3(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
inline void bgc_fp32_rigid_pose3_get_inward_affine(BGC_FP32_Affine3* affine_map, const BGC_FP32_RigidPose3* pose)
{
_bgc_fp32_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp32_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
}
const BGC_FP32_Quaternion * real = &pose->_versor.real_part;
const BGC_FP32_Quaternion * dual = &pose->_versor.dual_part;
//TODO: set the shift in the affine map
inline void bgc_fp64_rigid_pose3_get_inward_affine(BGC_FP64_Affine3* affine_map, const BGC_FP64_RigidPose3* pose)
{
_bgc_fp64_versor_get_reverse_matrix(&affine_map->distortion, &pose->_versor.real_part);
bgc_fp64_rigid_pose3_get_inner_shift(&affine_map->shift, pose);
}
#endif