diff --git a/basic-geometry-dev/main.c b/basic-geometry-dev/main.c index b5fde56..13d611b 100644 --- a/basic-geometry-dev/main.c +++ b/basic-geometry-dev/main.c @@ -173,7 +173,9 @@ int main() #endif // _WIN64 for (int j = 0; j < 1000; j++) { for (unsigned int i = 0; i < amount; i++) { - bg_fp32_versor_combine(&versors1[i], &versors2[i], &results[i]); + bg_fp32_versor_shorten(&versors1[i]); + bg_fp32_versor_shorten(&versors2[i]); + //bg_fp32_versor_combine(&versors1[i], &versors2[i], &results[i]); } } diff --git a/basic-geometry/quaternion.h b/basic-geometry/quaternion.h index a26b9ff..9fb324d 100644 --- a/basic-geometry/quaternion.h +++ b/basic-geometry/quaternion.h @@ -231,7 +231,7 @@ static inline int bg_fp64_quaternion_normalize(BgFP64Quaternion* quaternion) // ============ Make Rotation Matrix ============ // -void bg_fp32_quaternion_get_rotation_matrix(const BgFP32Quaternion* quaternion, BgFP32Matrix3x3* matrix) +static inline void bg_fp32_quaternion_get_rotation_matrix(const BgFP32Quaternion* quaternion, BgFP32Matrix3x3* matrix) { const float s0s0 = quaternion->s0 * quaternion->s0; const float x1x1 = quaternion->x1 * quaternion->x1; @@ -269,7 +269,7 @@ void bg_fp32_quaternion_get_rotation_matrix(const BgFP32Quaternion* quaternion, matrix->r1c3 = corrector2 * (x1x3 + s0x2); } -void bg_fp64_quaternion_get_rotation_matrix(const BgFP64Quaternion* quaternion, BgFP64Matrix3x3* matrix) +static inline void bg_fp64_quaternion_get_rotation_matrix(const BgFP64Quaternion* quaternion, BgFP64Matrix3x3* matrix) { const double s0s0 = quaternion->s0 * quaternion->s0; const double x1x1 = quaternion->x1 * quaternion->x1; diff --git a/basic-geometry/versor.h b/basic-geometry/versor.h index bdd0f34..fdedb73 100644 --- a/basic-geometry/versor.h +++ b/basic-geometry/versor.h @@ -142,6 +142,52 @@ static inline void bg_fp64_versor_copy(const BgFP64Versor* from, BgFP64Versor* t twin->x3 = from->x3; } +// ==================== Swap ==================== // + +static inline void bg_fp32_versor_swap(BgFP32Versor* versor1, BgFP32Versor* versor2) +{ + const float s0 = versor1->s0; + const float x1 = versor1->x1; + const float x2 = versor1->x2; + const float x3 = versor1->x3; + + __BgFP32DarkTwinVersor* twin1 = (__BgFP32DarkTwinVersor*)versor1; + + twin1->s0 = versor2->s0; + twin1->x1 = versor2->x1; + twin1->x2 = versor2->x2; + twin1->x3 = versor2->x3; + + __BgFP32DarkTwinVersor* twin2 = (__BgFP32DarkTwinVersor*)versor2; + + twin2->s0 = s0; + twin2->x1 = x1; + twin2->x2 = x2; + twin2->x3 = x3; +} + +static inline void bg_fp64_versor_swap(BgFP64Versor* versor1, BgFP64Versor* versor2) +{ + const double s0 = versor1->s0; + const double x1 = versor1->x1; + const double x2 = versor1->x2; + const double x3 = versor1->x3; + + __BgFP64DarkTwinVersor* twin1 = (__BgFP64DarkTwinVersor*)versor1; + + twin1->s0 = versor2->s0; + twin1->x1 = versor2->x1; + twin1->x2 = versor2->x2; + twin1->x3 = versor2->x3; + + __BgFP64DarkTwinVersor* twin2 = (__BgFP64DarkTwinVersor*)versor2; + + twin2->s0 = s0; + twin2->x1 = x1; + twin2->x2 = x2; + twin2->x3 = x3; +} + // =============== Set Crude Turn =============== // void bg_fp32_versor_set_crude_turn(const float x1, const float x2, const float x3, const float angle, const angle_unit_t unit, BgFP32Versor* result); @@ -232,6 +278,72 @@ static inline void bg_fp64_versor_set_from_fp32(const BgFP32Versor* versor, BgFP ); } +// ================== Shorten =================== // + +static inline void bg_fp32_versor_shorten(BgFP32Versor* versor) +{ + if (versor->s0 >= 0.0f) { + return; + } + + __BgFP32DarkTwinVersor* twin = (__BgFP32DarkTwinVersor*)versor; + twin->s0 = -versor->s0; + twin->x1 = -versor->x1; + twin->x2 = -versor->x2; + twin->x3 = -versor->x3; +} + +static inline void bg_fp64_versor_shorten(BgFP64Versor* versor) +{ + if (versor->s0 >= 0.0f) { + return; + } + + __BgFP64DarkTwinVersor* twin = (__BgFP64DarkTwinVersor*)versor; + twin->s0 = -versor->s0; + twin->x1 = -versor->x1; + twin->x2 = -versor->x2; + twin->x3 = -versor->x3; +} + +// ================== Shorten =================== // + +static inline void bg_fp32_versor_set_shortened(const BgFP32Versor* versor, BgFP32Versor* shortened) +{ + __BgFP32DarkTwinVersor* twin = (__BgFP32DarkTwinVersor*)shortened; + + if (versor->s0 >= 0.0f) { + twin->x1 = versor->s0; + twin->x1 = versor->x1; + twin->x2 = versor->x2; + twin->x3 = versor->x3; + return; + } + + twin->x1 = -versor->s0; + twin->x1 = -versor->x1; + twin->x2 = -versor->x2; + twin->x3 = -versor->x3; +} + +static inline void bg_fp64_versor_set_shortened(const BgFP64Versor* versor, BgFP64Versor* shortened) +{ + __BgFP64DarkTwinVersor* twin = (__BgFP64DarkTwinVersor*)shortened; + + if (versor->s0 >= 0.0) { + twin->x1 = versor->s0; + twin->x1 = versor->x1; + twin->x2 = versor->x2; + twin->x3 = versor->x3; + return; + } + + twin->x1 = -versor->s0; + twin->x1 = -versor->x1; + twin->x2 = -versor->x2; + twin->x3 = -versor->x3; + } + // ================= Inversion ================== // static inline void bg_fp32_versor_invert(BgFP32Versor* versor) @@ -316,14 +428,6 @@ static inline void bg_fp32_versor_combine(const BgFP32Versor* second, const BgFP return; } - if (square_modulus <= BG_FP32_SQUARE_EPSYLON) { - twin->s0 = 1.0f; - twin->x1 = 0.0f; - twin->x2 = 0.0f; - twin->x3 = 0.0f; - return; - } - const float multiplier = sqrtf(1.0f / square_modulus); twin->s0 *= multiplier; @@ -352,11 +456,61 @@ static inline void bg_fp64_versor_combine(const BgFP64Versor* second, const BgFP return; } - if (square_modulus <= BG_FP64_SQUARE_EPSYLON) { - twin->s0 = 1.0; - twin->x1 = 0.0; - twin->x2 = 0.0; - twin->x3 = 0.0; + const double multiplier = sqrt(1.0 / square_modulus); + + twin->s0 *= multiplier; + twin->x1 *= multiplier; + twin->x2 *= multiplier; + twin->x3 *= multiplier; +} + +// ================= Exclusion ================== // + +static inline void bg_fp32_versor_exclude(const BgFP32Versor* basic, const BgFP32Versor* exclusion, BgFP32Versor* result) +{ + const float s0 = (basic->s0 * exclusion->s0 + basic->x1 * exclusion->x1) + (basic->x2 * exclusion->x2 + basic->x3 * exclusion->x3); + const float x1 = (basic->x1 * exclusion->s0 - basic->s0 * exclusion->x1) + (basic->x3 * exclusion->x2 - basic->x2 * exclusion->x3); + const float x2 = (basic->x2 * exclusion->s0 - basic->s0 * exclusion->x2) + (basic->x1 * exclusion->x3 - basic->x3 * exclusion->x1); + const float x3 = (basic->x3 * exclusion->s0 - basic->s0 * exclusion->x3) + (basic->x2 * exclusion->x1 - basic->x1 * exclusion->x2); + + const float square_modulus = (s0 * s0 + x1 * x1) + (x2 * x2 + x3 * x3); + + __BgFP32DarkTwinVersor* twin = (__BgFP32DarkTwinVersor*)result; + + twin->s0 = s0; + twin->x1 = x1; + twin->x2 = x2; + twin->x3 = x3; + + if (1.0f - BG_FP32_TWO_EPSYLON <= square_modulus && square_modulus <= 1.0f + BG_FP32_TWO_EPSYLON) { + return; + } + + const float multiplier = sqrtf(1.0f / square_modulus); + + twin->s0 *= multiplier; + twin->x1 *= multiplier; + twin->x2 *= multiplier; + twin->x3 *= multiplier; +} + +static inline void bg_fp64_versor_exclude(const BgFP64Versor* basic, const BgFP64Versor* exclusion, BgFP64Versor* result) +{ + const double s0 = (basic->s0 * exclusion->s0 + basic->x1 * exclusion->x1) + (basic->x2 * exclusion->x2 + basic->x3 * exclusion->x3); + const double x1 = (basic->x1 * exclusion->s0 - basic->s0 * exclusion->x1) + (basic->x3 * exclusion->x2 - basic->x2 * exclusion->x3); + const double x2 = (basic->x2 * exclusion->s0 - basic->s0 * exclusion->x2) + (basic->x1 * exclusion->x3 - basic->x3 * exclusion->x1); + const double x3 = (basic->x3 * exclusion->s0 - basic->s0 * exclusion->x3) + (basic->x2 * exclusion->x1 - basic->x1 * exclusion->x2); + + const double square_modulus = (s0 * s0 + x1 * x1) + (x2 * x2 + x3 * x3); + + __BgFP64DarkTwinVersor* twin = (__BgFP64DarkTwinVersor*)result; + + twin->s0 = s0; + twin->x1 = x1; + twin->x2 = x2; + twin->x3 = x3; + + if (1.0 - BG_FP64_TWO_EPSYLON <= square_modulus && square_modulus <= 1.0 + BG_FP64_TWO_EPSYLON) { return; }