Изменение функций-конструкторов для SLERP

This commit is contained in:
Andrey Pokidov 2025-06-05 00:30:40 +07:00
parent 75cd82de6a
commit 880673a17a
3 changed files with 95 additions and 95 deletions

View file

@ -473,17 +473,17 @@ int main()
{ {
//BgcVersorFP32 start = { 1.0f, 0.0f, 0.0f, 0.0f }; //BgcVersorFP32 start = { 1.0f, 0.0f, 0.0f, 0.0f };
//BgcVersorFP32 end = { 0.0f, 1.0f, 0.0f, 0.0f }; //BgcVersorFP32 end = { 0.0f, 1.0f, 0.0f, 0.0f };
/*
BgcVersorFP32 start = { 1.0f, 0.0f, 0.0f, 0.0f }; BgcVersorFP32 start = { 1.0f, 0.0f, 0.0f, 0.0f };
BgcVersorFP32 end = { 0.9999f, 0.01414f, 0.0f, 0.0f }; BgcVersorFP32 end = { 0.9999f, 0.01414f, 0.0f, 0.0f };
BgcSlerpFP32 slerp; BgcSlerpFP32 slerp;
BgcVersorFP32 result; BgcVersorFP32 result;
bgc_slerp_make_fp32(&start, &end, &slerp); bgc_slerp_make_full_fp32(&start, &end, &slerp);
bgc_slerp_get_turn_for_phase_fp32(&slerp, 0.5f, &result); bgc_slerp_get_turn_for_phase_fp32(&slerp, 0.5f, &result);
printf("Result: %0.12f, %0.12f, %0.12f, %0.12f\n", result.s0, result.x1, result.x2, result.x3);
*/
test_basis_difference_fp64(); print_versor_fp32(&result);
//test_basis_difference_fp64();
return 0; return 0;
} }

View file

@ -3,25 +3,42 @@
extern inline void bgc_slerp_reset_fp32(BgcSlerpFP32* slerp); extern inline void bgc_slerp_reset_fp32(BgcSlerpFP32* slerp);
extern inline void bgc_slerp_reset_fp64(BgcSlerpFP64* slerp); extern inline void bgc_slerp_reset_fp64(BgcSlerpFP64* slerp);
extern inline void bgc_slerp_make_full_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp);
extern inline void bgc_slerp_make_full_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp);
extern inline void bgc_slerp_make_shortened_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp);
extern inline void bgc_slerp_make_shortened_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp);
extern inline void bgc_slerp_get_turn_for_phase_fp32(const BgcSlerpFP32* slerp, const float phase, BgcVersorFP32* result); extern inline void bgc_slerp_get_turn_for_phase_fp32(const BgcSlerpFP32* slerp, const float phase, BgcVersorFP32* result);
extern inline void bgc_slerp_get_turn_for_phase_fp64(const BgcSlerpFP64* slerp, const double phase, BgcVersorFP64* result); extern inline void bgc_slerp_get_turn_for_phase_fp64(const BgcSlerpFP64* slerp, const double phase, BgcVersorFP64* result);
void bgc_slerp_make_full_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp) void bgc_slerp_make_fp32(const BgcVersorFP32* start, const BgcVersorFP32* augment, BgcSlerpFP32* slerp)
{ {
BgcVersorFP32 delta; const float square_vector = augment->x1 * augment->x1 + augment->x2 * augment->x2 + augment->x3 * augment->x3;
bgc_versor_exclude_fp32(end, start, &delta); if (square_vector != square_vector) {
const float square_vector = delta.x1 * delta.x1 + delta.x2 * delta.x2 + delta.x3 * delta.x3;
if (square_vector <= BGC_SQUARE_EPSYLON_FP32 || square_vector != square_vector) {
bgc_slerp_reset_fp32(slerp); bgc_slerp_reset_fp32(slerp);
return; return;
} }
if (square_vector <= BGC_SQUARE_EPSYLON_FP32) {
slerp->s0_cos_weight = start->s0;
slerp->x1_cos_weight = start->x1;
slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = 0.0f;
slerp->x1_sin_weight = 0.0f;
slerp->x2_sin_weight = 0.0f;
slerp->x3_sin_weight = 0.0f;
slerp->radians = 0.0f;
return;
}
const float vector_modulus = sqrtf(square_vector); const float vector_modulus = sqrtf(square_vector);
slerp->radians = atan2f(vector_modulus, delta.s0); slerp->radians = atan2f(vector_modulus, augment->s0);
const float mutliplier = 1.0f / vector_modulus; const float mutliplier = 1.0f / vector_modulus;
@ -30,28 +47,39 @@ void bgc_slerp_make_full_fp32(const BgcVersorFP32* start, const BgcVersorFP32* e
slerp->x2_cos_weight = start->x2; slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3; slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = -mutliplier * (delta.x1 * start->x1 + delta.x2 * start->x2 + delta.x3 * start->x3); slerp->s0_sin_weight = -mutliplier * (augment->x1 * start->x1 + augment->x2 * start->x2 + augment->x3 * start->x3);
slerp->x1_sin_weight = mutliplier * (delta.x1 * start->s0 + delta.x2 * start->x3 - delta.x3 * start->x2); slerp->x1_sin_weight = mutliplier * (augment->x1 * start->s0 + augment->x2 * start->x3 - augment->x3 * start->x2);
slerp->x2_sin_weight = mutliplier * (delta.x2 * start->s0 - delta.x1 * start->x3 + delta.x3 * start->x1); slerp->x2_sin_weight = mutliplier * (augment->x2 * start->s0 - augment->x1 * start->x3 + augment->x3 * start->x1);
slerp->x3_sin_weight = mutliplier * (delta.x3 * start->s0 - delta.x2 * start->x1 + delta.x1 * start->x2); slerp->x3_sin_weight = mutliplier * (augment->x3 * start->s0 - augment->x2 * start->x1 + augment->x1 * start->x2);
} }
void bgc_slerp_make_full_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp) void bgc_slerp_make_fp64(const BgcVersorFP64* start, const BgcVersorFP64* augment, BgcSlerpFP64* slerp)
{ {
BgcVersorFP64 delta; const double square_vector = augment->x1 * augment->x1 + augment->x2 * augment->x2 + augment->x3 * augment->x3;
bgc_versor_exclude_fp64(end, start, &delta); if (square_vector != square_vector) {
const double square_vector = delta.x1 * delta.x1 + delta.x2 * delta.x2 + delta.x3 * delta.x3;
if (square_vector <= BGC_SQUARE_EPSYLON_FP64 || square_vector != square_vector) {
bgc_slerp_reset_fp64(slerp); bgc_slerp_reset_fp64(slerp);
return; return;
} }
if (square_vector <= BGC_SQUARE_EPSYLON_FP64) {
slerp->s0_cos_weight = start->s0;
slerp->x1_cos_weight = start->x1;
slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = 0.0;
slerp->x1_sin_weight = 0.0;
slerp->x2_sin_weight = 0.0;
slerp->x3_sin_weight = 0.0;
slerp->radians = 0.0;
return;
}
const double vector_modulus = sqrt(square_vector); const double vector_modulus = sqrt(square_vector);
slerp->radians = atan2(vector_modulus, delta.s0); slerp->radians = atan2(vector_modulus, augment->s0);
const double mutliplier = 1.0 / vector_modulus; const double mutliplier = 1.0 / vector_modulus;
@ -60,70 +88,8 @@ void bgc_slerp_make_full_fp64(const BgcVersorFP64* start, const BgcVersorFP64* e
slerp->x2_cos_weight = start->x2; slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3; slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = -mutliplier * (delta.x1 * start->x1 + delta.x2 * start->x2 + delta.x3 * start->x3); slerp->s0_sin_weight = -mutliplier * (augment->x1 * start->x1 + augment->x2 * start->x2 + augment->x3 * start->x3);
slerp->x1_sin_weight = mutliplier * (delta.x1 * start->s0 + delta.x2 * start->x3 - delta.x3 * start->x2); slerp->x1_sin_weight = mutliplier * (augment->x1 * start->s0 + augment->x2 * start->x3 - augment->x3 * start->x2);
slerp->x2_sin_weight = mutliplier * (delta.x2 * start->s0 - delta.x1 * start->x3 + delta.x3 * start->x1); slerp->x2_sin_weight = mutliplier * (augment->x2 * start->s0 - augment->x1 * start->x3 + augment->x3 * start->x1);
slerp->x3_sin_weight = mutliplier * (delta.x3 * start->s0 - delta.x2 * start->x1 + delta.x1 * start->x2); slerp->x3_sin_weight = mutliplier * (augment->x3 * start->s0 - augment->x2 * start->x1 + augment->x1 * start->x2);
}
void bgc_slerp_make_shortened_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp)
{
BgcVersorFP32 delta;
bgc_versor_exclude_fp32(end, start, &delta);
bgc_versor_shorten_fp32(&delta, &delta);
const float square_vector = delta.x1 * delta.x1 + delta.x2 * delta.x2 + delta.x3 * delta.x3;
if (square_vector <= BGC_SQUARE_EPSYLON_FP32 || square_vector != square_vector) {
bgc_slerp_reset_fp32(slerp);
return;
}
const float vector_modulus = sqrtf(square_vector);
slerp->radians = atan2f(vector_modulus, delta.s0);
const float mutliplier = 1.0f / vector_modulus;
slerp->s0_cos_weight = start->s0;
slerp->x1_cos_weight = start->x1;
slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = -mutliplier * (delta.x1 * start->x1 + delta.x2 * start->x2 + delta.x3 * start->x3);
slerp->x1_sin_weight = mutliplier * (delta.x1 * start->s0 + delta.x2 * start->x3 - delta.x3 * start->x2);
slerp->x2_sin_weight = mutliplier * (delta.x2 * start->s0 - delta.x1 * start->x3 + delta.x3 * start->x1);
slerp->x3_sin_weight = mutliplier * (delta.x3 * start->s0 - delta.x2 * start->x1 + delta.x1 * start->x2);
}
void bgc_slerp_make_shortened_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp)
{
BgcVersorFP64 delta;
bgc_versor_exclude_fp64(end, start, &delta);
bgc_versor_shorten_fp64(&delta, &delta);
const double square_vector = delta.x1 * delta.x1 + delta.x2 * delta.x2 + delta.x3 * delta.x3;
if (square_vector <= BGC_SQUARE_EPSYLON_FP64 || square_vector != square_vector) {
bgc_slerp_reset_fp64(slerp);
return;
}
const double vector_modulus = sqrt(square_vector);
slerp->radians = atan2(vector_modulus, delta.s0);
const double mutliplier = 1.0 / vector_modulus;
slerp->s0_cos_weight = start->s0;
slerp->x1_cos_weight = start->x1;
slerp->x2_cos_weight = start->x2;
slerp->x3_cos_weight = start->x3;
slerp->s0_sin_weight = -mutliplier * (delta.x1 * start->x1 + delta.x2 * start->x2 + delta.x3 * start->x3);
slerp->x1_sin_weight = mutliplier * (delta.x1 * start->s0 + delta.x2 * start->x3 - delta.x3 * start->x2);
slerp->x2_sin_weight = mutliplier * (delta.x2 * start->s0 - delta.x1 * start->x3 + delta.x3 * start->x1);
slerp->x3_sin_weight = mutliplier * (delta.x3 * start->s0 - delta.x2 * start->x1 + delta.x1 * start->x2);
} }

View file

@ -53,13 +53,47 @@ inline void bgc_slerp_reset_fp64(BgcSlerpFP64* slerp)
slerp->radians = 0.0; slerp->radians = 0.0;
} }
void bgc_slerp_make_full_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp); void bgc_slerp_make_fp32(const BgcVersorFP32* start, const BgcVersorFP32* augment, BgcSlerpFP32* slerp);
void bgc_slerp_make_full_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp); void bgc_slerp_make_fp64(const BgcVersorFP64* start, const BgcVersorFP64* augment, BgcSlerpFP64* slerp);
void bgc_slerp_make_shortened_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp); inline void bgc_slerp_make_full_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp)
{
BgcVersorFP32 augment;
void bgc_slerp_make_shortened_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp); bgc_versor_exclude_fp32(end, start, &augment);
bgc_slerp_make_fp32(start, &augment, slerp);
}
inline void bgc_slerp_make_full_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp)
{
BgcVersorFP64 augment;
bgc_versor_exclude_fp64(end, start, &augment);
bgc_slerp_make_fp64(start, &augment, slerp);
}
inline void bgc_slerp_make_shortened_fp32(const BgcVersorFP32* start, const BgcVersorFP32* end, BgcSlerpFP32* slerp)
{
BgcVersorFP32 augment;
bgc_versor_exclude_fp32(end, start, &augment);
bgc_versor_shorten_fp32(&augment, &augment);
bgc_slerp_make_fp32(start, &augment, slerp);
}
inline void bgc_slerp_make_shortened_fp64(const BgcVersorFP64* start, const BgcVersorFP64* end, BgcSlerpFP64* slerp)
{
BgcVersorFP64 augment;
bgc_versor_exclude_fp64(end, start, &augment);
bgc_versor_shorten_fp64(&augment, &augment);
bgc_slerp_make_fp64(start, &augment, slerp);
}
inline void bgc_slerp_get_turn_for_phase_fp32(const BgcSlerpFP32* slerp, const float phase, BgcVersorFP32* result) inline void bgc_slerp_get_turn_for_phase_fp32(const BgcSlerpFP32* slerp, const float phase, BgcVersorFP32* result)
{ {