From 97fb7948f05f1b580dbf96f3591d5acfc1c1e74c Mon Sep 17 00:00:00 2001 From: TheRedShip Date: Sat, 21 Dec 2024 20:45:56 +0100 Subject: [PATCH] + | Camera system --- .objs/srcs/Camera.o | Bin 0 -> 4360 bytes .objs/srcs/RT.o | Bin 0 -> 4969 bytes .objs/srcs/Shader.o | Bin 0 -> 10540 bytes .objs/srcs/Window.o | Bin 0 -> 6041 bytes .objs/srcs/gl.o | Bin 0 -> 129070 bytes .vscode/settings.json | 16 +- Makefile | 4 +- RT.exe | Bin 0 -> 540371 bytes includes/Camera.hpp | 18 +- includes/RT.hpp | 3 + includes/Window.hpp | 10 +- includes/glm/CMakeLists.txt | 69 + includes/glm/common.hpp | 539 ++++ includes/glm/detail/_features.hpp | 394 +++ includes/glm/detail/_fixes.hpp | 27 + includes/glm/detail/_noise.hpp | 81 + includes/glm/detail/_swizzle.hpp | 809 +++++ includes/glm/detail/_swizzle_func.hpp | 682 ++++ includes/glm/detail/_vectorize.hpp | 230 ++ includes/glm/detail/compute_common.hpp | 50 + includes/glm/detail/compute_vector_decl.hpp | 190 ++ .../glm/detail/compute_vector_relational.hpp | 30 + includes/glm/detail/func_common.inl | 936 ++++++ includes/glm/detail/func_common_simd.inl | 613 ++++ includes/glm/detail/func_exponential.inl | 152 + includes/glm/detail/func_exponential_simd.inl | 37 + includes/glm/detail/func_geometric.inl | 259 ++ includes/glm/detail/func_geometric_simd.inl | 181 ++ includes/glm/detail/func_integer.inl | 392 +++ includes/glm/detail/func_integer_simd.inl | 65 + includes/glm/detail/func_matrix.inl | 484 +++ includes/glm/detail/func_matrix_simd.inl | 263 ++ includes/glm/detail/func_packing.inl | 189 ++ includes/glm/detail/func_packing_simd.inl | 6 + includes/glm/detail/func_trigonometric.inl | 197 ++ .../glm/detail/func_trigonometric_simd.inl | 0 .../glm/detail/func_vector_relational.inl | 87 + .../detail/func_vector_relational_simd.inl | 6 + includes/glm/detail/glm.cpp | 263 ++ includes/glm/detail/qualifier.hpp | 293 ++ includes/glm/detail/setup.hpp | 1191 +++++++ includes/glm/detail/type_float.hpp | 68 + includes/glm/detail/type_half.hpp | 16 + includes/glm/detail/type_half.inl | 241 ++ includes/glm/detail/type_mat2x2.hpp | 177 ++ includes/glm/detail/type_mat2x2.inl | 536 ++++ includes/glm/detail/type_mat2x3.hpp | 159 + includes/glm/detail/type_mat2x3.inl | 510 +++ includes/glm/detail/type_mat2x4.hpp | 161 + includes/glm/detail/type_mat2x4.inl | 520 ++++ includes/glm/detail/type_mat3x2.hpp | 167 + includes/glm/detail/type_mat3x2.inl | 532 ++++ includes/glm/detail/type_mat3x3.hpp | 184 ++ includes/glm/detail/type_mat3x3.inl | 635 ++++ includes/glm/detail/type_mat3x4.hpp | 166 + includes/glm/detail/type_mat3x4.inl | 578 ++++ includes/glm/detail/type_mat4x2.hpp | 171 + includes/glm/detail/type_mat4x2.inl | 574 ++++ includes/glm/detail/type_mat4x3.hpp | 171 + includes/glm/detail/type_mat4x3.inl | 598 ++++ includes/glm/detail/type_mat4x4.hpp | 189 ++ includes/glm/detail/type_mat4x4.inl | 771 +++++ includes/glm/detail/type_mat4x4_simd.inl | 6 + includes/glm/detail/type_quat.hpp | 193 ++ includes/glm/detail/type_quat.inl | 424 +++ includes/glm/detail/type_quat_simd.inl | 208 ++ includes/glm/detail/type_vec1.hpp | 308 ++ includes/glm/detail/type_vec1.inl | 553 ++++ includes/glm/detail/type_vec2.hpp | 406 +++ includes/glm/detail/type_vec2.inl | 915 ++++++ includes/glm/detail/type_vec3.hpp | 447 +++ includes/glm/detail/type_vec3.inl | 985 ++++++ includes/glm/detail/type_vec4.hpp | 514 +++ includes/glm/detail/type_vec4.inl | 1130 +++++++ includes/glm/detail/type_vec_simd.inl | 1032 ++++++ includes/glm/exponential.hpp | 110 + includes/glm/ext.hpp | 267 ++ includes/glm/ext/_matrix_vectorize.hpp | 128 + includes/glm/ext/matrix_clip_space.hpp | 522 ++++ includes/glm/ext/matrix_clip_space.inl | 595 ++++ includes/glm/ext/matrix_common.hpp | 39 + includes/glm/ext/matrix_common.inl | 34 + includes/glm/ext/matrix_double2x2.hpp | 23 + .../glm/ext/matrix_double2x2_precision.hpp | 49 + includes/glm/ext/matrix_double2x3.hpp | 18 + .../glm/ext/matrix_double2x3_precision.hpp | 31 + includes/glm/ext/matrix_double2x4.hpp | 18 + .../glm/ext/matrix_double2x4_precision.hpp | 31 + includes/glm/ext/matrix_double3x2.hpp | 18 + .../glm/ext/matrix_double3x2_precision.hpp | 31 + includes/glm/ext/matrix_double3x3.hpp | 23 + .../glm/ext/matrix_double3x3_precision.hpp | 49 + includes/glm/ext/matrix_double3x4.hpp | 18 + .../glm/ext/matrix_double3x4_precision.hpp | 31 + includes/glm/ext/matrix_double4x2.hpp | 18 + .../glm/ext/matrix_double4x2_precision.hpp | 31 + includes/glm/ext/matrix_double4x3.hpp | 18 + .../glm/ext/matrix_double4x3_precision.hpp | 31 + includes/glm/ext/matrix_double4x4.hpp | 23 + .../glm/ext/matrix_double4x4_precision.hpp | 49 + includes/glm/ext/matrix_float2x2.hpp | 23 + .../glm/ext/matrix_float2x2_precision.hpp | 49 + includes/glm/ext/matrix_float2x3.hpp | 18 + .../glm/ext/matrix_float2x3_precision.hpp | 31 + includes/glm/ext/matrix_float2x4.hpp | 18 + .../glm/ext/matrix_float2x4_precision.hpp | 31 + includes/glm/ext/matrix_float3x2.hpp | 18 + .../glm/ext/matrix_float3x2_precision.hpp | 31 + includes/glm/ext/matrix_float3x3.hpp | 23 + .../glm/ext/matrix_float3x3_precision.hpp | 49 + includes/glm/ext/matrix_float3x4.hpp | 18 + .../glm/ext/matrix_float3x4_precision.hpp | 31 + includes/glm/ext/matrix_float4x2.hpp | 18 + .../glm/ext/matrix_float4x2_precision.hpp | 31 + includes/glm/ext/matrix_float4x3.hpp | 18 + .../glm/ext/matrix_float4x3_precision.hpp | 31 + includes/glm/ext/matrix_float4x4.hpp | 23 + .../glm/ext/matrix_float4x4_precision.hpp | 49 + includes/glm/ext/matrix_int2x2.hpp | 38 + includes/glm/ext/matrix_int2x2_sized.hpp | 70 + includes/glm/ext/matrix_int2x3.hpp | 33 + includes/glm/ext/matrix_int2x3_sized.hpp | 49 + includes/glm/ext/matrix_int2x4.hpp | 33 + includes/glm/ext/matrix_int2x4_sized.hpp | 49 + includes/glm/ext/matrix_int3x2.hpp | 33 + includes/glm/ext/matrix_int3x2_sized.hpp | 49 + includes/glm/ext/matrix_int3x3.hpp | 38 + includes/glm/ext/matrix_int3x3_sized.hpp | 70 + includes/glm/ext/matrix_int3x4.hpp | 33 + includes/glm/ext/matrix_int3x4_sized.hpp | 49 + includes/glm/ext/matrix_int4x2.hpp | 33 + includes/glm/ext/matrix_int4x2_sized.hpp | 49 + includes/glm/ext/matrix_int4x3.hpp | 33 + includes/glm/ext/matrix_int4x3_sized.hpp | 49 + includes/glm/ext/matrix_int4x4.hpp | 38 + includes/glm/ext/matrix_int4x4_sized.hpp | 70 + includes/glm/ext/matrix_integer.hpp | 91 + includes/glm/ext/matrix_integer.inl | 38 + includes/glm/ext/matrix_projection.hpp | 149 + includes/glm/ext/matrix_projection.inl | 106 + includes/glm/ext/matrix_relational.hpp | 132 + includes/glm/ext/matrix_relational.inl | 88 + includes/glm/ext/matrix_transform.hpp | 171 + includes/glm/ext/matrix_transform.inl | 207 ++ includes/glm/ext/matrix_uint2x2.hpp | 38 + includes/glm/ext/matrix_uint2x2_sized.hpp | 70 + includes/glm/ext/matrix_uint2x3.hpp | 33 + includes/glm/ext/matrix_uint2x3_sized.hpp | 49 + includes/glm/ext/matrix_uint2x4.hpp | 33 + includes/glm/ext/matrix_uint2x4_sized.hpp | 49 + includes/glm/ext/matrix_uint3x2.hpp | 33 + includes/glm/ext/matrix_uint3x2_sized.hpp | 49 + includes/glm/ext/matrix_uint3x3.hpp | 38 + includes/glm/ext/matrix_uint3x3_sized.hpp | 70 + includes/glm/ext/matrix_uint3x4.hpp | 33 + includes/glm/ext/matrix_uint3x4_sized.hpp | 49 + includes/glm/ext/matrix_uint4x2.hpp | 33 + includes/glm/ext/matrix_uint4x2_sized.hpp | 49 + includes/glm/ext/matrix_uint4x3.hpp | 33 + includes/glm/ext/matrix_uint4x3_sized.hpp | 49 + includes/glm/ext/matrix_uint4x4.hpp | 38 + includes/glm/ext/matrix_uint4x4_sized.hpp | 70 + includes/glm/ext/quaternion_common.hpp | 135 + includes/glm/ext/quaternion_common.inl | 144 + includes/glm/ext/quaternion_common_simd.inl | 18 + includes/glm/ext/quaternion_double.hpp | 39 + .../glm/ext/quaternion_double_precision.hpp | 42 + includes/glm/ext/quaternion_exponential.hpp | 63 + includes/glm/ext/quaternion_exponential.inl | 89 + includes/glm/ext/quaternion_float.hpp | 39 + .../glm/ext/quaternion_float_precision.hpp | 36 + includes/glm/ext/quaternion_geometric.hpp | 70 + includes/glm/ext/quaternion_geometric.inl | 36 + includes/glm/ext/quaternion_relational.hpp | 62 + includes/glm/ext/quaternion_relational.inl | 35 + includes/glm/ext/quaternion_transform.hpp | 47 + includes/glm/ext/quaternion_transform.inl | 24 + includes/glm/ext/quaternion_trigonometric.hpp | 65 + includes/glm/ext/quaternion_trigonometric.inl | 37 + includes/glm/ext/scalar_common.hpp | 181 ++ includes/glm/ext/scalar_common.inl | 170 + includes/glm/ext/scalar_constants.hpp | 40 + includes/glm/ext/scalar_constants.inl | 24 + includes/glm/ext/scalar_int_sized.hpp | 70 + includes/glm/ext/scalar_integer.hpp | 92 + includes/glm/ext/scalar_integer.inl | 243 ++ includes/glm/ext/scalar_packing.hpp | 32 + includes/glm/ext/scalar_packing.inl | 0 includes/glm/ext/scalar_reciprocal.hpp | 135 + includes/glm/ext/scalar_reciprocal.inl | 107 + includes/glm/ext/scalar_relational.hpp | 68 + includes/glm/ext/scalar_relational.inl | 40 + includes/glm/ext/scalar_uint_sized.hpp | 70 + includes/glm/ext/scalar_ulp.hpp | 77 + includes/glm/ext/scalar_ulp.inl | 291 ++ includes/glm/ext/vector_bool1.hpp | 30 + includes/glm/ext/vector_bool1_precision.hpp | 34 + includes/glm/ext/vector_bool2.hpp | 18 + includes/glm/ext/vector_bool2_precision.hpp | 31 + includes/glm/ext/vector_bool3.hpp | 18 + includes/glm/ext/vector_bool3_precision.hpp | 31 + includes/glm/ext/vector_bool4.hpp | 18 + includes/glm/ext/vector_bool4_precision.hpp | 31 + includes/glm/ext/vector_common.hpp | 228 ++ includes/glm/ext/vector_common.inl | 147 + includes/glm/ext/vector_double1.hpp | 31 + includes/glm/ext/vector_double1_precision.hpp | 36 + includes/glm/ext/vector_double2.hpp | 18 + includes/glm/ext/vector_double2_precision.hpp | 31 + includes/glm/ext/vector_double3.hpp | 18 + includes/glm/ext/vector_double3_precision.hpp | 34 + includes/glm/ext/vector_double4.hpp | 18 + includes/glm/ext/vector_double4_precision.hpp | 35 + includes/glm/ext/vector_float1.hpp | 31 + includes/glm/ext/vector_float1_precision.hpp | 36 + includes/glm/ext/vector_float2.hpp | 18 + includes/glm/ext/vector_float2_precision.hpp | 31 + includes/glm/ext/vector_float3.hpp | 18 + includes/glm/ext/vector_float3_precision.hpp | 31 + includes/glm/ext/vector_float4.hpp | 18 + includes/glm/ext/vector_float4_precision.hpp | 31 + includes/glm/ext/vector_int1.hpp | 32 + includes/glm/ext/vector_int1_sized.hpp | 49 + includes/glm/ext/vector_int2.hpp | 18 + includes/glm/ext/vector_int2_sized.hpp | 49 + includes/glm/ext/vector_int3.hpp | 18 + includes/glm/ext/vector_int3_sized.hpp | 49 + includes/glm/ext/vector_int4.hpp | 18 + includes/glm/ext/vector_int4_sized.hpp | 49 + includes/glm/ext/vector_integer.hpp | 149 + includes/glm/ext/vector_integer.inl | 85 + includes/glm/ext/vector_packing.hpp | 32 + includes/glm/ext/vector_packing.inl | 0 includes/glm/ext/vector_reciprocal.hpp | 135 + includes/glm/ext/vector_reciprocal.inl | 105 + includes/glm/ext/vector_relational.hpp | 107 + includes/glm/ext/vector_relational.inl | 75 + includes/glm/ext/vector_uint1.hpp | 32 + includes/glm/ext/vector_uint1_sized.hpp | 49 + includes/glm/ext/vector_uint2.hpp | 18 + includes/glm/ext/vector_uint2_sized.hpp | 49 + includes/glm/ext/vector_uint3.hpp | 18 + includes/glm/ext/vector_uint3_sized.hpp | 49 + includes/glm/ext/vector_uint4.hpp | 18 + includes/glm/ext/vector_uint4_sized.hpp | 49 + includes/glm/ext/vector_ulp.hpp | 112 + includes/glm/ext/vector_ulp.inl | 74 + includes/glm/fwd.hpp | 1233 ++++++++ includes/glm/geometric.hpp | 116 + includes/glm/glm.cppm | 2675 ++++++++++++++++ includes/glm/glm.hpp | 137 + includes/glm/gtc/bitfield.hpp | 266 ++ includes/glm/gtc/bitfield.inl | 635 ++++ includes/glm/gtc/color_space.hpp | 56 + includes/glm/gtc/color_space.inl | 84 + includes/glm/gtc/constants.hpp | 170 + includes/glm/gtc/constants.inl | 173 + includes/glm/gtc/epsilon.hpp | 60 + includes/glm/gtc/epsilon.inl | 80 + includes/glm/gtc/integer.hpp | 43 + includes/glm/gtc/integer.inl | 33 + includes/glm/gtc/matrix_access.hpp | 60 + includes/glm/gtc/matrix_access.inl | 62 + includes/glm/gtc/matrix_integer.hpp | 433 +++ includes/glm/gtc/matrix_inverse.hpp | 50 + includes/glm/gtc/matrix_inverse.inl | 118 + includes/glm/gtc/matrix_transform.hpp | 36 + includes/glm/gtc/matrix_transform.inl | 3 + includes/glm/gtc/noise.hpp | 61 + includes/glm/gtc/noise.inl | 807 +++++ includes/glm/gtc/packing.hpp | 728 +++++ includes/glm/gtc/packing.inl | 952 ++++++ includes/glm/gtc/quaternion.hpp | 173 + includes/glm/gtc/quaternion.inl | 208 ++ includes/glm/gtc/quaternion_simd.inl | 0 includes/glm/gtc/random.hpp | 82 + includes/glm/gtc/random.inl | 303 ++ includes/glm/gtc/reciprocal.hpp | 24 + includes/glm/gtc/round.hpp | 160 + includes/glm/gtc/round.inl | 155 + includes/glm/gtc/type_aligned.hpp | 1315 ++++++++ includes/glm/gtc/type_precision.hpp | 2094 +++++++++++++ includes/glm/gtc/type_precision.inl | 6 + includes/glm/gtc/type_ptr.hpp | 230 ++ includes/glm/gtc/type_ptr.inl | 398 +++ includes/glm/gtc/ulp.hpp | 155 + includes/glm/gtc/ulp.inl | 173 + includes/glm/gtc/vec1.hpp | 30 + includes/glm/gtx/associated_min_max.hpp | 205 ++ includes/glm/gtx/associated_min_max.inl | 354 +++ includes/glm/gtx/bit.hpp | 96 + includes/glm/gtx/bit.inl | 92 + includes/glm/gtx/closest_point.hpp | 47 + includes/glm/gtx/closest_point.inl | 45 + includes/glm/gtx/color_encoding.hpp | 52 + includes/glm/gtx/color_encoding.inl | 45 + includes/glm/gtx/color_space.hpp | 70 + includes/glm/gtx/color_space.inl | 144 + includes/glm/gtx/color_space_YCoCg.hpp | 58 + includes/glm/gtx/color_space_YCoCg.inl | 107 + includes/glm/gtx/common.hpp | 74 + includes/glm/gtx/common.inl | 125 + includes/glm/gtx/compatibility.hpp | 131 + includes/glm/gtx/compatibility.inl | 62 + includes/glm/gtx/component_wise.hpp | 77 + includes/glm/gtx/component_wise.inl | 147 + includes/glm/gtx/dual_quaternion.hpp | 272 ++ includes/glm/gtx/dual_quaternion.inl | 352 +++ includes/glm/gtx/easing.hpp | 217 ++ includes/glm/gtx/easing.inl | 436 +++ includes/glm/gtx/euler_angles.hpp | 333 ++ includes/glm/gtx/euler_angles.inl | 899 ++++++ includes/glm/gtx/extend.hpp | 40 + includes/glm/gtx/extend.inl | 48 + includes/glm/gtx/extended_min_max.hpp | 135 + includes/glm/gtx/extended_min_max.inl | 138 + includes/glm/gtx/exterior_product.hpp | 43 + includes/glm/gtx/exterior_product.inl | 26 + includes/glm/gtx/fast_exponential.hpp | 93 + includes/glm/gtx/fast_exponential.inl | 136 + includes/glm/gtx/fast_square_root.hpp | 96 + includes/glm/gtx/fast_square_root.inl | 75 + includes/glm/gtx/fast_trigonometry.hpp | 77 + includes/glm/gtx/fast_trigonometry.inl | 142 + includes/glm/gtx/float_normalize.inl | 13 + includes/glm/gtx/functions.hpp | 54 + includes/glm/gtx/functions.inl | 30 + includes/glm/gtx/gradient_paint.hpp | 51 + includes/glm/gtx/gradient_paint.inl | 36 + includes/glm/gtx/handed_coordinate_space.hpp | 48 + includes/glm/gtx/handed_coordinate_space.inl | 26 + includes/glm/gtx/hash.hpp | 156 + includes/glm/gtx/hash.inl | 175 ++ includes/glm/gtx/integer.hpp | 74 + includes/glm/gtx/integer.inl | 185 ++ includes/glm/gtx/intersect.hpp | 90 + includes/glm/gtx/intersect.inl | 200 ++ includes/glm/gtx/io.hpp | 210 ++ includes/glm/gtx/io.inl | 452 +++ includes/glm/gtx/log_base.hpp | 46 + includes/glm/gtx/log_base.inl | 16 + includes/glm/gtx/matrix_cross_product.hpp | 45 + includes/glm/gtx/matrix_cross_product.inl | 37 + includes/glm/gtx/matrix_decompose.hpp | 50 + includes/glm/gtx/matrix_decompose.inl | 234 ++ includes/glm/gtx/matrix_factorisation.hpp | 67 + includes/glm/gtx/matrix_factorisation.inl | 84 + includes/glm/gtx/matrix_interpolation.hpp | 58 + includes/glm/gtx/matrix_interpolation.inl | 146 + includes/glm/gtx/matrix_major_storage.hpp | 117 + includes/glm/gtx/matrix_major_storage.inl | 166 + includes/glm/gtx/matrix_operation.hpp | 101 + includes/glm/gtx/matrix_operation.inl | 176 ++ includes/glm/gtx/matrix_query.hpp | 75 + includes/glm/gtx/matrix_query.inl | 119 + includes/glm/gtx/matrix_transform_2d.hpp | 79 + includes/glm/gtx/matrix_transform_2d.inl | 68 + includes/glm/gtx/mixed_product.hpp | 39 + includes/glm/gtx/mixed_product.inl | 15 + includes/glm/gtx/norm.hpp | 85 + includes/glm/gtx/norm.inl | 95 + includes/glm/gtx/normal.hpp | 39 + includes/glm/gtx/normal.inl | 15 + includes/glm/gtx/normalize_dot.hpp | 47 + includes/glm/gtx/normalize_dot.inl | 16 + includes/glm/gtx/number_precision.hpp | 44 + includes/glm/gtx/optimum_pow.hpp | 50 + includes/glm/gtx/optimum_pow.inl | 22 + includes/glm/gtx/orthonormalize.hpp | 47 + includes/glm/gtx/orthonormalize.inl | 29 + includes/glm/gtx/pca.hpp | 112 + includes/glm/gtx/pca.inl | 343 ++ includes/glm/gtx/perpendicular.hpp | 39 + includes/glm/gtx/perpendicular.inl | 10 + includes/glm/gtx/polar_coordinates.hpp | 46 + includes/glm/gtx/polar_coordinates.inl | 36 + includes/glm/gtx/projection.hpp | 41 + includes/glm/gtx/projection.inl | 10 + includes/glm/gtx/quaternion.hpp | 172 + includes/glm/gtx/quaternion.inl | 159 + includes/glm/gtx/range.hpp | 96 + includes/glm/gtx/raw_data.hpp | 49 + includes/glm/gtx/raw_data.inl | 2 + includes/glm/gtx/rotate_normalized_axis.hpp | 66 + includes/glm/gtx/rotate_normalized_axis.inl | 58 + includes/glm/gtx/rotate_vector.hpp | 121 + includes/glm/gtx/rotate_vector.inl | 187 ++ includes/glm/gtx/scalar_multiplication.hpp | 80 + includes/glm/gtx/scalar_relational.hpp | 34 + includes/glm/gtx/scalar_relational.inl | 88 + includes/glm/gtx/spline.hpp | 63 + includes/glm/gtx/spline.inl | 60 + includes/glm/gtx/std_based_type.hpp | 66 + includes/glm/gtx/std_based_type.inl | 6 + includes/glm/gtx/string_cast.hpp | 45 + includes/glm/gtx/string_cast.inl | 497 +++ includes/glm/gtx/texture.hpp | 44 + includes/glm/gtx/texture.inl | 17 + includes/glm/gtx/transform.hpp | 58 + includes/glm/gtx/transform.inl | 23 + includes/glm/gtx/transform2.hpp | 87 + includes/glm/gtx/transform2.inl | 125 + includes/glm/gtx/type_aligned.hpp | 980 ++++++ includes/glm/gtx/type_aligned.inl | 6 + includes/glm/gtx/type_trait.hpp | 83 + includes/glm/gtx/type_trait.inl | 61 + includes/glm/gtx/vec_swizzle.hpp | 2769 +++++++++++++++++ includes/glm/gtx/vector_angle.hpp | 55 + includes/glm/gtx/vector_angle.inl | 45 + includes/glm/gtx/vector_query.hpp | 64 + includes/glm/gtx/vector_query.inl | 154 + includes/glm/gtx/wrap.hpp | 35 + includes/glm/gtx/wrap.inl | 6 + includes/glm/integer.hpp | 212 ++ includes/glm/mat2x2.hpp | 9 + includes/glm/mat2x3.hpp | 9 + includes/glm/mat2x4.hpp | 9 + includes/glm/mat3x2.hpp | 9 + includes/glm/mat3x3.hpp | 8 + includes/glm/mat3x4.hpp | 8 + includes/glm/mat4x2.hpp | 9 + includes/glm/mat4x3.hpp | 8 + includes/glm/mat4x4.hpp | 9 + includes/glm/matrix.hpp | 161 + includes/glm/packing.hpp | 173 + includes/glm/simd/common.h | 249 ++ includes/glm/simd/exponential.h | 20 + includes/glm/simd/geometric.h | 130 + includes/glm/simd/integer.h | 115 + includes/glm/simd/matrix.h | 1040 +++++++ includes/glm/simd/neon.h | 159 + includes/glm/simd/packing.h | 8 + includes/glm/simd/platform.h | 469 +++ includes/glm/simd/trigonometric.h | 9 + includes/glm/simd/vector_relational.h | 8 + includes/glm/trigonometric.hpp | 210 ++ includes/glm/vec2.hpp | 14 + includes/glm/vec3.hpp | 14 + includes/glm/vec4.hpp | 15 + includes/glm/vector_relational.hpp | 121 + srcs/Camera.cpp | 41 +- srcs/Window.cpp | 48 +- 442 files changed, 67864 insertions(+), 41 deletions(-) create mode 100644 .objs/srcs/Camera.o create mode 100644 .objs/srcs/RT.o create mode 100644 .objs/srcs/Shader.o create mode 100644 .objs/srcs/Window.o create mode 100644 .objs/srcs/gl.o create mode 100644 RT.exe create mode 100644 includes/glm/CMakeLists.txt create mode 100644 includes/glm/common.hpp create mode 100644 includes/glm/detail/_features.hpp create mode 100644 includes/glm/detail/_fixes.hpp create mode 100644 includes/glm/detail/_noise.hpp create mode 100644 includes/glm/detail/_swizzle.hpp create mode 100644 includes/glm/detail/_swizzle_func.hpp create mode 100644 includes/glm/detail/_vectorize.hpp create mode 100644 includes/glm/detail/compute_common.hpp create mode 100644 includes/glm/detail/compute_vector_decl.hpp create mode 100644 includes/glm/detail/compute_vector_relational.hpp create mode 100644 includes/glm/detail/func_common.inl create mode 100644 includes/glm/detail/func_common_simd.inl create mode 100644 includes/glm/detail/func_exponential.inl create mode 100644 includes/glm/detail/func_exponential_simd.inl create mode 100644 includes/glm/detail/func_geometric.inl create mode 100644 includes/glm/detail/func_geometric_simd.inl create mode 100644 includes/glm/detail/func_integer.inl create mode 100644 includes/glm/detail/func_integer_simd.inl create mode 100644 includes/glm/detail/func_matrix.inl create mode 100644 includes/glm/detail/func_matrix_simd.inl create mode 100644 includes/glm/detail/func_packing.inl create mode 100644 includes/glm/detail/func_packing_simd.inl create mode 100644 includes/glm/detail/func_trigonometric.inl create mode 100644 includes/glm/detail/func_trigonometric_simd.inl create mode 100644 includes/glm/detail/func_vector_relational.inl create mode 100644 includes/glm/detail/func_vector_relational_simd.inl create mode 100644 includes/glm/detail/glm.cpp create mode 100644 includes/glm/detail/qualifier.hpp create mode 100644 includes/glm/detail/setup.hpp create mode 100644 includes/glm/detail/type_float.hpp create mode 100644 includes/glm/detail/type_half.hpp create mode 100644 includes/glm/detail/type_half.inl create mode 100644 includes/glm/detail/type_mat2x2.hpp create mode 100644 includes/glm/detail/type_mat2x2.inl create mode 100644 includes/glm/detail/type_mat2x3.hpp create mode 100644 includes/glm/detail/type_mat2x3.inl create mode 100644 includes/glm/detail/type_mat2x4.hpp create mode 100644 includes/glm/detail/type_mat2x4.inl create mode 100644 includes/glm/detail/type_mat3x2.hpp create mode 100644 includes/glm/detail/type_mat3x2.inl create mode 100644 includes/glm/detail/type_mat3x3.hpp create mode 100644 includes/glm/detail/type_mat3x3.inl create mode 100644 includes/glm/detail/type_mat3x4.hpp create mode 100644 includes/glm/detail/type_mat3x4.inl create mode 100644 includes/glm/detail/type_mat4x2.hpp create mode 100644 includes/glm/detail/type_mat4x2.inl create mode 100644 includes/glm/detail/type_mat4x3.hpp create mode 100644 includes/glm/detail/type_mat4x3.inl create mode 100644 includes/glm/detail/type_mat4x4.hpp create mode 100644 includes/glm/detail/type_mat4x4.inl create mode 100644 includes/glm/detail/type_mat4x4_simd.inl create mode 100644 includes/glm/detail/type_quat.hpp create mode 100644 includes/glm/detail/type_quat.inl create mode 100644 includes/glm/detail/type_quat_simd.inl create mode 100644 includes/glm/detail/type_vec1.hpp create mode 100644 includes/glm/detail/type_vec1.inl create mode 100644 includes/glm/detail/type_vec2.hpp create mode 100644 includes/glm/detail/type_vec2.inl create mode 100644 includes/glm/detail/type_vec3.hpp create mode 100644 includes/glm/detail/type_vec3.inl create mode 100644 includes/glm/detail/type_vec4.hpp create mode 100644 includes/glm/detail/type_vec4.inl create mode 100644 includes/glm/detail/type_vec_simd.inl create mode 100644 includes/glm/exponential.hpp create mode 100644 includes/glm/ext.hpp create mode 100644 includes/glm/ext/_matrix_vectorize.hpp create mode 100644 includes/glm/ext/matrix_clip_space.hpp create mode 100644 includes/glm/ext/matrix_clip_space.inl create mode 100644 includes/glm/ext/matrix_common.hpp create mode 100644 includes/glm/ext/matrix_common.inl create mode 100644 includes/glm/ext/matrix_double2x2.hpp create mode 100644 includes/glm/ext/matrix_double2x2_precision.hpp create mode 100644 includes/glm/ext/matrix_double2x3.hpp create mode 100644 includes/glm/ext/matrix_double2x3_precision.hpp create mode 100644 includes/glm/ext/matrix_double2x4.hpp create mode 100644 includes/glm/ext/matrix_double2x4_precision.hpp create mode 100644 includes/glm/ext/matrix_double3x2.hpp create mode 100644 includes/glm/ext/matrix_double3x2_precision.hpp create mode 100644 includes/glm/ext/matrix_double3x3.hpp create mode 100644 includes/glm/ext/matrix_double3x3_precision.hpp create mode 100644 includes/glm/ext/matrix_double3x4.hpp create mode 100644 includes/glm/ext/matrix_double3x4_precision.hpp create mode 100644 includes/glm/ext/matrix_double4x2.hpp create mode 100644 includes/glm/ext/matrix_double4x2_precision.hpp create mode 100644 includes/glm/ext/matrix_double4x3.hpp create mode 100644 includes/glm/ext/matrix_double4x3_precision.hpp create mode 100644 includes/glm/ext/matrix_double4x4.hpp create mode 100644 includes/glm/ext/matrix_double4x4_precision.hpp create mode 100644 includes/glm/ext/matrix_float2x2.hpp create mode 100644 includes/glm/ext/matrix_float2x2_precision.hpp create mode 100644 includes/glm/ext/matrix_float2x3.hpp create mode 100644 includes/glm/ext/matrix_float2x3_precision.hpp create mode 100644 includes/glm/ext/matrix_float2x4.hpp create mode 100644 includes/glm/ext/matrix_float2x4_precision.hpp create mode 100644 includes/glm/ext/matrix_float3x2.hpp create mode 100644 includes/glm/ext/matrix_float3x2_precision.hpp create mode 100644 includes/glm/ext/matrix_float3x3.hpp create mode 100644 includes/glm/ext/matrix_float3x3_precision.hpp create mode 100644 includes/glm/ext/matrix_float3x4.hpp create mode 100644 includes/glm/ext/matrix_float3x4_precision.hpp create mode 100644 includes/glm/ext/matrix_float4x2.hpp create mode 100644 includes/glm/ext/matrix_float4x2_precision.hpp create mode 100644 includes/glm/ext/matrix_float4x3.hpp create mode 100644 includes/glm/ext/matrix_float4x3_precision.hpp create mode 100644 includes/glm/ext/matrix_float4x4.hpp create mode 100644 includes/glm/ext/matrix_float4x4_precision.hpp create mode 100644 includes/glm/ext/matrix_int2x2.hpp create mode 100644 includes/glm/ext/matrix_int2x2_sized.hpp create mode 100644 includes/glm/ext/matrix_int2x3.hpp create mode 100644 includes/glm/ext/matrix_int2x3_sized.hpp create mode 100644 includes/glm/ext/matrix_int2x4.hpp create mode 100644 includes/glm/ext/matrix_int2x4_sized.hpp create mode 100644 includes/glm/ext/matrix_int3x2.hpp create mode 100644 includes/glm/ext/matrix_int3x2_sized.hpp create mode 100644 includes/glm/ext/matrix_int3x3.hpp create mode 100644 includes/glm/ext/matrix_int3x3_sized.hpp create mode 100644 includes/glm/ext/matrix_int3x4.hpp create mode 100644 includes/glm/ext/matrix_int3x4_sized.hpp create mode 100644 includes/glm/ext/matrix_int4x2.hpp create mode 100644 includes/glm/ext/matrix_int4x2_sized.hpp create mode 100644 includes/glm/ext/matrix_int4x3.hpp create mode 100644 includes/glm/ext/matrix_int4x3_sized.hpp create mode 100644 includes/glm/ext/matrix_int4x4.hpp create mode 100644 includes/glm/ext/matrix_int4x4_sized.hpp create mode 100644 includes/glm/ext/matrix_integer.hpp create mode 100644 includes/glm/ext/matrix_integer.inl create mode 100644 includes/glm/ext/matrix_projection.hpp create mode 100644 includes/glm/ext/matrix_projection.inl create mode 100644 includes/glm/ext/matrix_relational.hpp create mode 100644 includes/glm/ext/matrix_relational.inl create mode 100644 includes/glm/ext/matrix_transform.hpp create mode 100644 includes/glm/ext/matrix_transform.inl create mode 100644 includes/glm/ext/matrix_uint2x2.hpp create mode 100644 includes/glm/ext/matrix_uint2x2_sized.hpp create mode 100644 includes/glm/ext/matrix_uint2x3.hpp create mode 100644 includes/glm/ext/matrix_uint2x3_sized.hpp create mode 100644 includes/glm/ext/matrix_uint2x4.hpp create mode 100644 includes/glm/ext/matrix_uint2x4_sized.hpp create mode 100644 includes/glm/ext/matrix_uint3x2.hpp create mode 100644 includes/glm/ext/matrix_uint3x2_sized.hpp create mode 100644 includes/glm/ext/matrix_uint3x3.hpp create mode 100644 includes/glm/ext/matrix_uint3x3_sized.hpp create mode 100644 includes/glm/ext/matrix_uint3x4.hpp create mode 100644 includes/glm/ext/matrix_uint3x4_sized.hpp create mode 100644 includes/glm/ext/matrix_uint4x2.hpp create mode 100644 includes/glm/ext/matrix_uint4x2_sized.hpp create mode 100644 includes/glm/ext/matrix_uint4x3.hpp create mode 100644 includes/glm/ext/matrix_uint4x3_sized.hpp create mode 100644 includes/glm/ext/matrix_uint4x4.hpp create mode 100644 includes/glm/ext/matrix_uint4x4_sized.hpp create mode 100644 includes/glm/ext/quaternion_common.hpp create mode 100644 includes/glm/ext/quaternion_common.inl create mode 100644 includes/glm/ext/quaternion_common_simd.inl create mode 100644 includes/glm/ext/quaternion_double.hpp create mode 100644 includes/glm/ext/quaternion_double_precision.hpp create mode 100644 includes/glm/ext/quaternion_exponential.hpp create mode 100644 includes/glm/ext/quaternion_exponential.inl create mode 100644 includes/glm/ext/quaternion_float.hpp create mode 100644 includes/glm/ext/quaternion_float_precision.hpp create mode 100644 includes/glm/ext/quaternion_geometric.hpp create mode 100644 includes/glm/ext/quaternion_geometric.inl create mode 100644 includes/glm/ext/quaternion_relational.hpp create mode 100644 includes/glm/ext/quaternion_relational.inl create mode 100644 includes/glm/ext/quaternion_transform.hpp create mode 100644 includes/glm/ext/quaternion_transform.inl create mode 100644 includes/glm/ext/quaternion_trigonometric.hpp create mode 100644 includes/glm/ext/quaternion_trigonometric.inl create mode 100644 includes/glm/ext/scalar_common.hpp create mode 100644 includes/glm/ext/scalar_common.inl create mode 100644 includes/glm/ext/scalar_constants.hpp create mode 100644 includes/glm/ext/scalar_constants.inl create mode 100644 includes/glm/ext/scalar_int_sized.hpp create mode 100644 includes/glm/ext/scalar_integer.hpp create mode 100644 includes/glm/ext/scalar_integer.inl create mode 100644 includes/glm/ext/scalar_packing.hpp create mode 100644 includes/glm/ext/scalar_packing.inl create mode 100644 includes/glm/ext/scalar_reciprocal.hpp create mode 100644 includes/glm/ext/scalar_reciprocal.inl create mode 100644 includes/glm/ext/scalar_relational.hpp create mode 100644 includes/glm/ext/scalar_relational.inl create mode 100644 includes/glm/ext/scalar_uint_sized.hpp create mode 100644 includes/glm/ext/scalar_ulp.hpp create mode 100644 includes/glm/ext/scalar_ulp.inl create mode 100644 includes/glm/ext/vector_bool1.hpp create mode 100644 includes/glm/ext/vector_bool1_precision.hpp create mode 100644 includes/glm/ext/vector_bool2.hpp create mode 100644 includes/glm/ext/vector_bool2_precision.hpp create mode 100644 includes/glm/ext/vector_bool3.hpp create mode 100644 includes/glm/ext/vector_bool3_precision.hpp create mode 100644 includes/glm/ext/vector_bool4.hpp create mode 100644 includes/glm/ext/vector_bool4_precision.hpp create mode 100644 includes/glm/ext/vector_common.hpp create mode 100644 includes/glm/ext/vector_common.inl create mode 100644 includes/glm/ext/vector_double1.hpp create mode 100644 includes/glm/ext/vector_double1_precision.hpp create mode 100644 includes/glm/ext/vector_double2.hpp create mode 100644 includes/glm/ext/vector_double2_precision.hpp create mode 100644 includes/glm/ext/vector_double3.hpp create mode 100644 includes/glm/ext/vector_double3_precision.hpp create mode 100644 includes/glm/ext/vector_double4.hpp create mode 100644 includes/glm/ext/vector_double4_precision.hpp create mode 100644 includes/glm/ext/vector_float1.hpp create mode 100644 includes/glm/ext/vector_float1_precision.hpp create mode 100644 includes/glm/ext/vector_float2.hpp create mode 100644 includes/glm/ext/vector_float2_precision.hpp create mode 100644 includes/glm/ext/vector_float3.hpp create mode 100644 includes/glm/ext/vector_float3_precision.hpp create mode 100644 includes/glm/ext/vector_float4.hpp create mode 100644 includes/glm/ext/vector_float4_precision.hpp create mode 100644 includes/glm/ext/vector_int1.hpp create mode 100644 includes/glm/ext/vector_int1_sized.hpp create mode 100644 includes/glm/ext/vector_int2.hpp create mode 100644 includes/glm/ext/vector_int2_sized.hpp create mode 100644 includes/glm/ext/vector_int3.hpp create mode 100644 includes/glm/ext/vector_int3_sized.hpp create mode 100644 includes/glm/ext/vector_int4.hpp create mode 100644 includes/glm/ext/vector_int4_sized.hpp create mode 100644 includes/glm/ext/vector_integer.hpp create mode 100644 includes/glm/ext/vector_integer.inl create mode 100644 includes/glm/ext/vector_packing.hpp create mode 100644 includes/glm/ext/vector_packing.inl create mode 100644 includes/glm/ext/vector_reciprocal.hpp create mode 100644 includes/glm/ext/vector_reciprocal.inl create mode 100644 includes/glm/ext/vector_relational.hpp create mode 100644 includes/glm/ext/vector_relational.inl create mode 100644 includes/glm/ext/vector_uint1.hpp create mode 100644 includes/glm/ext/vector_uint1_sized.hpp create mode 100644 includes/glm/ext/vector_uint2.hpp create mode 100644 includes/glm/ext/vector_uint2_sized.hpp create mode 100644 includes/glm/ext/vector_uint3.hpp create mode 100644 includes/glm/ext/vector_uint3_sized.hpp create mode 100644 includes/glm/ext/vector_uint4.hpp create mode 100644 includes/glm/ext/vector_uint4_sized.hpp create mode 100644 includes/glm/ext/vector_ulp.hpp create mode 100644 includes/glm/ext/vector_ulp.inl create mode 100644 includes/glm/fwd.hpp create mode 100644 includes/glm/geometric.hpp create mode 100644 includes/glm/glm.cppm create mode 100644 includes/glm/glm.hpp create mode 100644 includes/glm/gtc/bitfield.hpp create mode 100644 includes/glm/gtc/bitfield.inl create mode 100644 includes/glm/gtc/color_space.hpp create mode 100644 includes/glm/gtc/color_space.inl create mode 100644 includes/glm/gtc/constants.hpp create mode 100644 includes/glm/gtc/constants.inl create mode 100644 includes/glm/gtc/epsilon.hpp create mode 100644 includes/glm/gtc/epsilon.inl create mode 100644 includes/glm/gtc/integer.hpp create mode 100644 includes/glm/gtc/integer.inl create mode 100644 includes/glm/gtc/matrix_access.hpp create mode 100644 includes/glm/gtc/matrix_access.inl create mode 100644 includes/glm/gtc/matrix_integer.hpp create mode 100644 includes/glm/gtc/matrix_inverse.hpp create mode 100644 includes/glm/gtc/matrix_inverse.inl create mode 100644 includes/glm/gtc/matrix_transform.hpp create mode 100644 includes/glm/gtc/matrix_transform.inl create mode 100644 includes/glm/gtc/noise.hpp create mode 100644 includes/glm/gtc/noise.inl create mode 100644 includes/glm/gtc/packing.hpp create mode 100644 includes/glm/gtc/packing.inl create mode 100644 includes/glm/gtc/quaternion.hpp create mode 100644 includes/glm/gtc/quaternion.inl create mode 100644 includes/glm/gtc/quaternion_simd.inl create mode 100644 includes/glm/gtc/random.hpp create mode 100644 includes/glm/gtc/random.inl create mode 100644 includes/glm/gtc/reciprocal.hpp create mode 100644 includes/glm/gtc/round.hpp create mode 100644 includes/glm/gtc/round.inl create mode 100644 includes/glm/gtc/type_aligned.hpp create mode 100644 includes/glm/gtc/type_precision.hpp create mode 100644 includes/glm/gtc/type_precision.inl create mode 100644 includes/glm/gtc/type_ptr.hpp create mode 100644 includes/glm/gtc/type_ptr.inl create mode 100644 includes/glm/gtc/ulp.hpp create mode 100644 includes/glm/gtc/ulp.inl create mode 100644 includes/glm/gtc/vec1.hpp create mode 100644 includes/glm/gtx/associated_min_max.hpp create mode 100644 includes/glm/gtx/associated_min_max.inl create mode 100644 includes/glm/gtx/bit.hpp create mode 100644 includes/glm/gtx/bit.inl create mode 100644 includes/glm/gtx/closest_point.hpp create mode 100644 includes/glm/gtx/closest_point.inl create mode 100644 includes/glm/gtx/color_encoding.hpp create mode 100644 includes/glm/gtx/color_encoding.inl create mode 100644 includes/glm/gtx/color_space.hpp create mode 100644 includes/glm/gtx/color_space.inl create mode 100644 includes/glm/gtx/color_space_YCoCg.hpp create mode 100644 includes/glm/gtx/color_space_YCoCg.inl create mode 100644 includes/glm/gtx/common.hpp create mode 100644 includes/glm/gtx/common.inl create mode 100644 includes/glm/gtx/compatibility.hpp create mode 100644 includes/glm/gtx/compatibility.inl create mode 100644 includes/glm/gtx/component_wise.hpp create mode 100644 includes/glm/gtx/component_wise.inl create mode 100644 includes/glm/gtx/dual_quaternion.hpp create mode 100644 includes/glm/gtx/dual_quaternion.inl create mode 100644 includes/glm/gtx/easing.hpp create mode 100644 includes/glm/gtx/easing.inl create mode 100644 includes/glm/gtx/euler_angles.hpp create mode 100644 includes/glm/gtx/euler_angles.inl create mode 100644 includes/glm/gtx/extend.hpp create mode 100644 includes/glm/gtx/extend.inl create mode 100644 includes/glm/gtx/extended_min_max.hpp create mode 100644 includes/glm/gtx/extended_min_max.inl create mode 100644 includes/glm/gtx/exterior_product.hpp create mode 100644 includes/glm/gtx/exterior_product.inl create mode 100644 includes/glm/gtx/fast_exponential.hpp create mode 100644 includes/glm/gtx/fast_exponential.inl create mode 100644 includes/glm/gtx/fast_square_root.hpp create mode 100644 includes/glm/gtx/fast_square_root.inl create mode 100644 includes/glm/gtx/fast_trigonometry.hpp create mode 100644 includes/glm/gtx/fast_trigonometry.inl create mode 100644 includes/glm/gtx/float_normalize.inl create mode 100644 includes/glm/gtx/functions.hpp create mode 100644 includes/glm/gtx/functions.inl create mode 100644 includes/glm/gtx/gradient_paint.hpp create mode 100644 includes/glm/gtx/gradient_paint.inl create mode 100644 includes/glm/gtx/handed_coordinate_space.hpp create mode 100644 includes/glm/gtx/handed_coordinate_space.inl create mode 100644 includes/glm/gtx/hash.hpp create mode 100644 includes/glm/gtx/hash.inl create mode 100644 includes/glm/gtx/integer.hpp create mode 100644 includes/glm/gtx/integer.inl create mode 100644 includes/glm/gtx/intersect.hpp create mode 100644 includes/glm/gtx/intersect.inl create mode 100644 includes/glm/gtx/io.hpp create mode 100644 includes/glm/gtx/io.inl create mode 100644 includes/glm/gtx/log_base.hpp create mode 100644 includes/glm/gtx/log_base.inl create mode 100644 includes/glm/gtx/matrix_cross_product.hpp create mode 100644 includes/glm/gtx/matrix_cross_product.inl create mode 100644 includes/glm/gtx/matrix_decompose.hpp create mode 100644 includes/glm/gtx/matrix_decompose.inl create mode 100644 includes/glm/gtx/matrix_factorisation.hpp create mode 100644 includes/glm/gtx/matrix_factorisation.inl create mode 100644 includes/glm/gtx/matrix_interpolation.hpp create mode 100644 includes/glm/gtx/matrix_interpolation.inl create mode 100644 includes/glm/gtx/matrix_major_storage.hpp create mode 100644 includes/glm/gtx/matrix_major_storage.inl create mode 100644 includes/glm/gtx/matrix_operation.hpp create mode 100644 includes/glm/gtx/matrix_operation.inl create mode 100644 includes/glm/gtx/matrix_query.hpp create mode 100644 includes/glm/gtx/matrix_query.inl create mode 100644 includes/glm/gtx/matrix_transform_2d.hpp create mode 100644 includes/glm/gtx/matrix_transform_2d.inl create mode 100644 includes/glm/gtx/mixed_product.hpp create mode 100644 includes/glm/gtx/mixed_product.inl create mode 100644 includes/glm/gtx/norm.hpp create mode 100644 includes/glm/gtx/norm.inl create mode 100644 includes/glm/gtx/normal.hpp create mode 100644 includes/glm/gtx/normal.inl create mode 100644 includes/glm/gtx/normalize_dot.hpp create mode 100644 includes/glm/gtx/normalize_dot.inl create mode 100644 includes/glm/gtx/number_precision.hpp create mode 100644 includes/glm/gtx/optimum_pow.hpp create mode 100644 includes/glm/gtx/optimum_pow.inl create mode 100644 includes/glm/gtx/orthonormalize.hpp create mode 100644 includes/glm/gtx/orthonormalize.inl create mode 100644 includes/glm/gtx/pca.hpp create mode 100644 includes/glm/gtx/pca.inl create mode 100644 includes/glm/gtx/perpendicular.hpp create mode 100644 includes/glm/gtx/perpendicular.inl create mode 100644 includes/glm/gtx/polar_coordinates.hpp create mode 100644 includes/glm/gtx/polar_coordinates.inl create mode 100644 includes/glm/gtx/projection.hpp create mode 100644 includes/glm/gtx/projection.inl create mode 100644 includes/glm/gtx/quaternion.hpp create mode 100644 includes/glm/gtx/quaternion.inl create mode 100644 includes/glm/gtx/range.hpp create mode 100644 includes/glm/gtx/raw_data.hpp create mode 100644 includes/glm/gtx/raw_data.inl create mode 100644 includes/glm/gtx/rotate_normalized_axis.hpp create mode 100644 includes/glm/gtx/rotate_normalized_axis.inl create mode 100644 includes/glm/gtx/rotate_vector.hpp create mode 100644 includes/glm/gtx/rotate_vector.inl create mode 100644 includes/glm/gtx/scalar_multiplication.hpp create mode 100644 includes/glm/gtx/scalar_relational.hpp create mode 100644 includes/glm/gtx/scalar_relational.inl create mode 100644 includes/glm/gtx/spline.hpp create mode 100644 includes/glm/gtx/spline.inl create mode 100644 includes/glm/gtx/std_based_type.hpp create mode 100644 includes/glm/gtx/std_based_type.inl create mode 100644 includes/glm/gtx/string_cast.hpp create mode 100644 includes/glm/gtx/string_cast.inl create mode 100644 includes/glm/gtx/texture.hpp create mode 100644 includes/glm/gtx/texture.inl create mode 100644 includes/glm/gtx/transform.hpp create mode 100644 includes/glm/gtx/transform.inl create mode 100644 includes/glm/gtx/transform2.hpp create mode 100644 includes/glm/gtx/transform2.inl create mode 100644 includes/glm/gtx/type_aligned.hpp create mode 100644 includes/glm/gtx/type_aligned.inl create mode 100644 includes/glm/gtx/type_trait.hpp create mode 100644 includes/glm/gtx/type_trait.inl create mode 100644 includes/glm/gtx/vec_swizzle.hpp create mode 100644 includes/glm/gtx/vector_angle.hpp create mode 100644 includes/glm/gtx/vector_angle.inl create mode 100644 includes/glm/gtx/vector_query.hpp create mode 100644 includes/glm/gtx/vector_query.inl create mode 100644 includes/glm/gtx/wrap.hpp create mode 100644 includes/glm/gtx/wrap.inl create mode 100644 includes/glm/integer.hpp create mode 100644 includes/glm/mat2x2.hpp create mode 100644 includes/glm/mat2x3.hpp create mode 100644 includes/glm/mat2x4.hpp create mode 100644 includes/glm/mat3x2.hpp create mode 100644 includes/glm/mat3x3.hpp create mode 100644 includes/glm/mat3x4.hpp create mode 100644 includes/glm/mat4x2.hpp create mode 100644 includes/glm/mat4x3.hpp create mode 100644 includes/glm/mat4x4.hpp create mode 100644 includes/glm/matrix.hpp create mode 100644 includes/glm/packing.hpp create mode 100644 includes/glm/simd/common.h create mode 100644 includes/glm/simd/exponential.h create mode 100644 includes/glm/simd/geometric.h create mode 100644 includes/glm/simd/integer.h create mode 100644 includes/glm/simd/matrix.h create mode 100644 includes/glm/simd/neon.h create mode 100644 includes/glm/simd/packing.h create mode 100644 includes/glm/simd/platform.h create mode 100644 includes/glm/simd/trigonometric.h create mode 100644 includes/glm/simd/vector_relational.h create mode 100644 includes/glm/trigonometric.hpp create mode 100644 includes/glm/vec2.hpp create mode 100644 includes/glm/vec3.hpp create mode 100644 includes/glm/vec4.hpp create mode 100644 includes/glm/vector_relational.hpp diff --git a/.objs/srcs/Camera.o b/.objs/srcs/Camera.o new file mode 100644 index 0000000000000000000000000000000000000000..4a6966dfc716a0cc56b6b5fe9b260abc4569e741 GIT binary patch literal 4360 zcmbVPZA@F&89v64uqD|s2}Deyjo006mkx{_8%nBb!+;fg(1>DGY=U+%{s`UxJNAXp zsG6wKZDELtKhyf7n^NshwP}-9ZBw)#9RwN(qzOs43fgpQvQ6V7RYKTT+pX+*&oRc1 zGp6lG=iK*w-shb2-uFEB`a1NjJ&f8vKgn2yu=1Hj4-e_WmP5-f#x5He>-~iEOK>r> zmM%*upa(K|4>Idm%Y>ynnS4+z6B`Dv%8`jMW`bK*n2niDEnQXi$FjhDcop~Y>P*(U zs>f_uyuI+O;C%`AO3jE)Jj+n_jf8+dasPS@ym&V63GmK=*O^+6(|W{9Wb>@RZ{)1k zQdecORwI;dEwJ{cYP(#9{mp^SYnA zJx*r1u2^~bvr1hfFK<*@Fg7b)7~7OM#+Q^~kT$rfnhJ@l)3L$*7u_TC?y)vSZ60e> z%p1I8Vi{3{{Yh(t-u~2p2lE=}KH<@PpzG*h z`6sb!Q&}Tdm*D&kyZhgU@gDjtoX&R;K5pjf8h};7;GKWk;GIiYW}Z@*=jv_X=)N~3 z4OB8$kiz?jydgT6D_bcLaua=tc3a}=W$Zo;A9;U-%g0!H4WXjr74TMh@ms_bsiBgc zA7Eo%d;_7`4Hxg60<^-_tMIR4op*`*F6}h$;_4z8{zc)5J9P2ZcSO>1KyZnIbJE33 zRKx!P`ihemPw;~ay!Z|;^6upa*Lcy_%eYG7P=z>DAr4iDLzS|?7Ep=%xoHy8@hpXn zyGQd-$wozWj~GVl3wiMrKj5^EI14?a%st{Pb&l2*jyTOL`oA_e`Jp-nIi96MFLx-U zMiaCL;b3f5tQgybK0AdzLqeauN*uRtDk15R@epYSTXvf;e61Km$6gGrrCfa-B_~RE z8Gy%ex5E318D63g?&}n+&!2J9TF7p8##u(zjI%;s@GuX58K*o+8h`|(2^F?pf=e}= zq>55meK;TgBu?})uB56(^kuGIz=h6y9z`Q<{T6%rNm#Ss(h5@qU{WVEK1l~n3rvE2 zivqW3>1mwsO*-M5!b9vmc&&e=y{F}XprB4kS3D~I1#)O9h_J|mqN>-?A^W1IOtko$ z@JRfkF8)7|MT){aq%nu!0zs5`~ zjf_3*b)UIiOK(S#$w|ERd{1GzZym2bTE*dd{*wMEuPQNP694v3ME0fvg}kZ@gOr5R zy7nn7CSSSmy;k?g((;)PH#gm*`@my(-9bMcSC+|fo~Ku}dyIZSp+UT2Ck5^vSx@nr ztSMm1jb~`IZrMHhbK_lNfAep8bod*!Oh3Y3d%A&kp+&~&@ik)|_nGP1<6m7NES=Ac z)5_SZ^()cM)bls8E>|Dj^(*@6aF2-;v5nPzWLEz%(%4+!uPv;-EY}`cm>SP)lsq;1 z-K0jop^~%8^My=#WjA~H#a*oQYysOpl+X6{7+IlVKQkZ&!E@|qj>X-~GP{eZ=L^^) zujI3`7me)T**s<%GO#^8jO{iQF~gn$*m-PMcb?VIMZ3uPbS3!&^BAL_sW`4Lp{uaZ z;aXlGzhA7l()UUma5H-KhcDpCuA6}QYxH6CjP`0h3Upx$INH$gxLJKN+JCgYvc1M( z9;&U;Y8;gV!GvBJk0ql+mC0~Vg}Hkm+N+zppE1`b!oeQh><#pX!ii+L*=D!cEV$qQ z)SL)Bgr3j3Fg_xz1jf$_>pK`pTKVh;7$xtgX=@4hD6v~<>tz%|Vt(AUv`(b0aaf-b z>wO>A=Y;j6bo3XnJ}F6C;Ul5pr3YH7Y4vXrY&zfL8Dl8)`sypF~Asr}!Y-tt3 zB7sYl&^M8cN{gbO7MS=R5Eh-Qj25S(rD=>_(=uvKN0l^2dB`Ze$K}d&1QMeUN@>-m zqcGSP(GAQJiS~xcMf-S@u@i=dKwmf!umt0AiOO*-<{^7=et1YS=gTSOCZ$fd*5iFb4UTO zD4Wc$eu@|k^c;y-6+GE8zxZj6aeIj^3d{W3r&>LTL*%QexkwTmsU^ax(c0A3Qye4$ z4k>24`Xc9yFZbPo6T+C?GQZmCu8jt~5QHkWd6{4A_^<|tbd}oajeSFaB0&&0!?psO|%(@sT0;eJ<;$v;gyGQ{7d9U0<}l5+a=X(mrVu_VHg zxSp_R9bVlLj1B1jJ&_%9M5snS(v62*gDk_fWx0oK1C(o63ySAo8w>~aSR$FlX*ldU zW4JTYym&@g~YmrFCMh&+AXO^w{L|E4bqv2Cp zUqDYphq5xU)x;CAU^to7`eK9OzHq<%n}#}gbz6<5#gcj=9O&DM2Z7pxJ%NO#CjwDD31wk|ZnJj>lF{H6+ta4C zSv8lpMr&z8S%;{q-k3c;phJDau8#B$Bzt6SGG%JBBNw4l>=@MrK_;U2Bx0wu?m!49 LozzJYTQd7M7^Cy# literal 0 HcmV?d00001 diff --git a/.objs/srcs/RT.o b/.objs/srcs/RT.o new file mode 100644 index 0000000000000000000000000000000000000000..c7c96bd4ece99e10269a433418b232345a9da4e5 GIT binary patch literal 4969 zcmcgvZ)_vQ6(8H1OE?Y^{`Ax<|BPh89Zei>l5=v`(%MOU*^nHvY$vI%$8GlPwX^ER zHoNP@iP|dkLfe&bdmuhkYE@M#m5_=M9}r66RDzP;p{hb4klKTOfEJ;Ky9#J&r54)s zy_s3Z>%HScK6Ip=`R#kZdGp@PyxG~@ce@CdA6-Mp2t@)3TWZy!m{lv#JnknX*3RAm zk|0UWhI5LgFws@(7)h>&Pn+gd30$#)ysF#aKG5A9aQBRnNGx3T@`Aw2aR0Ciq>7;mq5#6_HiiTEAxp60x}9A1CFizmTbHyF7G<`|0) z9`vtwV2Hju6~=1;Z-DchIC`UQd+6>8@KUYz2HpOly=Q@Uven*^!_JVjO+vj6yygPy z7V8xw(PkO$lrD&Mxp~|!? zN-86dC8vlvr{q+_?9CfWA&gZ+vY;A}I$>bS~f<#?enO!-;wNdR6Sl#en3@H{U@On z)YaR%p^@sVp|j9}{;!gKQLep|BNlx!z17LOBzBhPo1+G5ZzzJ?jX zI!J5IjG{3tduR8njx4^`!HAeGxLxfH z&UN>J^>f)+UGH4&*xufD=F`!O>(WD;FM}<}iU;B+;}677#UVZHjjb*^z{VCdFwljq zAb;=R?H8fl*Qw*NyQk?n2Y-S@Q-qP9IPkh8wF zR|N8NU)!$*@`CTwd4XK?w>>A2?*zULn3iccJFy%fHxlx3kdSxpAmokP3E70X%G?SE z;Vp#Zz5s{e0XQ)4q+A$7xCO?*;+$Y(BO^zH2ajvTv6-Hk=s>VK6qTa`J(a9s^^{Ad zR_!s>x$fZUil$q^(@Vi&L(R@v!L(Au96l5j`@>>5!ffuQo`Fs{O9FWiYD@+eAa0)k z#SGz$LZn>QOL?fdm!g}Y{t879Ir%C@cSFs1STlsYLsW|g^L~mZpvD~~kW@pK2ZYuI zQfbH@ZOEPg6sFpzpk|hzZ6Mt7I3WVrgqn@<0U$i$1@cd*@1p2GP&2Inn;pFs$O%Aw z6jcG?S4$vg0O3X!$m4);-30PuKnE#$5)iZU44^2LVfqhJv^DyVM6Y$V= zw?sw2w+yv2CO|07_e#`UDX!)_C9cR7IZgLT%*ucdbHZJk?~-`5Fb-Q|hZ%0VN8%A4 z@O{lux!NB#B1I1FbY#2BPdNwYJmAAq5VOf$p5h$576p7`P;)^O?FBB{OF0;q+h?Ty zmUFJ99Lzzloj19NTA=gewevpb;8lV71E8h+gL804vDMtm7ZvvpgCQ=#-Qp**+`Kn~ zoD=07LLA9)mtcq(`E9BqfjJTOP?{9?35~lHYhFZ+Iks_DKHLkPyB)pL!(M^Y1an z?SIoq3TQg)wC4Ds+0-)#S1rH>7U%rD^1v@zD&JiizZ@dd3l+zoD78x zHD9)juzVtI4P;9dt5s3Zm7HAAr%W|zlnRD2e-*-rt||tRPu!oj24y*0t%{=9cUm#E ztZZ6_RxFHX)0QY^=M+P>3`Mid@oXZkK$Q^t&P(?fFF>Wkz{ z#-d{6WX)0y#VSE>_-oKiLxEdIOAy>w=E{|ISC3<71~9F{*^-|7zbyq@gBJH)LYLpI zOD4;OYE>@7eXLYebj@0l;m#J>-C`WUwJ=o0QWL4ACA1Np(Tcg!;)s~gG*}yr%F^^7 z4rIGn??n2rygTQ_2zZy%Ot_e;iG?OdqKaiH**TX?95hu(nQ8h@Ib6x-RU?s{xW6xz z8JJeHedGB=g2p!yWx)&q9ToXRYGOB0L9b@p=$&I)j1*Lh=a1XR{+yvKW(-X!7If95 z4ndkkWw`{|pepmSRx}}iDhsIx}I22ix#)ScZMjMbES%&8_`>rWmI%o bV6RifMc7bsN;NCPWl@R{}%r2$@ literal 0 HcmV?d00001 diff --git a/.objs/srcs/Shader.o b/.objs/srcs/Shader.o new file mode 100644 index 0000000000000000000000000000000000000000..92b2336b1213f8f4e9053e45d369023bba42095f GIT binary patch literal 10540 zcmeHNZIDyd8NOk|5^!asAjK9-q|vU^Wl6HjvQVs>-K5;O0n*I^Eg;!!Hrb7YB)z!{ zi=r+H)J)uVt8ITc{cvihV`n=3m}x6eYZpNjOIy@VEsoQ%9owp6xunQo+i+Zs>vS{0ZYeU|HdH9g)TZYqpj*+QXC%pq$ILt<=W<2{SG1D{)dJU~ zeI{ME)6>0TmA5JaYQi-E-Gj}XZa_u4HpMs*5BLYn_M}L6+qh9T0o@bO{Ycoekv$3a z@w?)_4W!7zn?pCv|-Rm*yGSCfJbRF$x-9G4c zSaho_>+%3}qh?))*RZo_zF&t-T-=_E~gYhWAGHo`CLD5>BHkb$jI1&#F(My?mzm$uoaH z%sX$v1nxWMf?pqlOgM8~J~*u={fA*y^1dJ!`9PI{F-0QJ%8Bz#Ke@psCr*j3&rsI| zA)3tjqm)FiWg08^4=!{H%n5e)a1sf zaxvrf7{^kBt(d*miZVi`~V-X~Od6N__-9RwtJr$k|xIM*V#oi|7zrVcD zEpuI|uL;UPsnB$EKTKeTa*4%S0PlWLX~5lj@P#o49$vrYKjrcf4>V+2*z98FioMR_ zF|t|ebE2)Z%}EPI=89)wBLExIV-D=%ci%0Sr||x|@iK4oZ};Ekr)}6RdH?aolkcGk zbz2#A^U?$4xFoNBUY#$OdK_}m?%7}l8etB zp>QGpxUZ8@yT-8hU%G`}hkSu1U$~pz^szrT_>a7X7T#R3hL`I-3u;n9+bSNuohkPdTBLyw%{(7=E}tvHHQO}_HF1$ z2PeLMu5IX`U$3|Q&CUqiUrfMJalCYODdc#&`6}#7+{5mcvh(d`Cwp?@sd!8-{sA6% z$XaTu|_LDan)*GEEO|3GNthS;b5p>QxT zPf~`sl-+G}?b2LZP@3hj4cg||r8&L^ P-ZtMM%~$4m<_ZXV-06zNNFnHQv=3>+D!QmQd8?`CK7AzPykeZFLQe zr8BB)Xs@eBNhU^BS2Ug-PAY|)T;BFJZyP!H|MG#X0PT&>;Id0M;NH&=9k4?Tt-*bS zp&r~v8N$XB+-{&WDaL+RI7VbHQ z${IHh`!>(G8-PB~&~hM!p*Rq?rvPz#Uj*Xz={9fNo>0BS1SC+6@$A=m5|lLyrQ*8TvKQ5JMM% zc#lTJYl3ktK*J2(43uODH#(FeE24uyl=Xv*>&89B5N^^f43Ul_()BTp_8RA$8d|8e zQfA_a@ZvB|kT z8ln+7w^u`VXy{H2kw08_mxd_9Id_kSCN#8PL-%UvJ`K@ac;AB>I;5d5Fw~YxXK1fc zd9RENV~dW)@d;>4z@|P zQivLC4&19s4{dGpmHs)S~Gn}XcpJe zJZ`ak5)7SEs$;z@?0l$>o%e)hQ60_ojc7!<23IYgd!eCoQFZiTFmyVqG7kwm3+vc9 zA~Xx?XkHeYtLtc_IcP+=3Rf+kuNN9;9ZiqWG}qB&gl2vn&BM^p`LMbgKNG#Ktkdfy zXy_za?d79DL}|iRYhL|Ab44A^ozT!;3<2H%QRECyKx!IS>X@;3e;d*XKWNtuL%w1Og;}gnM9!s$(cohpI$t39j%SuJ_s6j(bKeRBvurPl#FsWR=YM70lDkPIT`9QeSSSdA` z$rzYj@k}O{s8CF!B;&&sg7->fa)o4ts`&hlHo7Ec>ZFAcg|Ei-7N*_8^jnx+7Umuc z^JNS3n1wlNVV<`zuUVLj7UoLY0)}}lvoM_&CS+lDSQy2^9I`M|2Iet5u`KD1ZQalp zRaYj|z4>G)5$qby#rC9!li6TG;_0TYg8kX4u7drc%YDZkcZdw1c*_ZDghg4y^`Cb>DO;BSq7RaMeM zOkP8+*F|#ateRA6D0{|IsiYF1KTPr@Zk}RKIy=lobp&EOnm`TJx@4AHsWm|ebXIDq z!s+bJh>{yo;=5`LgvE&`M)?7~hNKtKttRW62_!Sg%hS{y3LAvCr+3$KCbuh(e_qtJ z!W*NxF(r|#WrodZW-GC+QMDtH8&hla)tn4vQ@L<%V)*E!vm>EIRP}ljqj4psD)F>hK-P%HaSH~cD`PdB;`?RhwH1lra^BEo zyl8Fu*e<#WdEfVhbVhu^{=R6;Xx7BUqh?LO7p%2$d4$wm6=M)peF=DBwffi@XE9rI zq|1rd%LsdY?x+qv^DfnS=Ur7us$+ScoOlikM*24R`Ug5TClkIuaQ2*7jk1(gu`tJUfy}7HTe$?!$-9eIj*qVaaK6 zQ`nF1CzjDm|9)z$*rh(oh(`0*UFW0MGm=!rM})R6`_C?QPZR&!dBc3r;E|@KXmJp! zCd5jL6dB2sv<%iv*~-u+(zO=XhDLpUGL2_S2ZnCOJYec*j&4IwQ&=0?^zm%6X+2CB zdKx0m&|(cWOS5(uGPdZ0%g{1&R2d|-!pSr%U96Z|^gG#Ld8REK8yO#u#2H?h%8V68dD_<>*}d(t_593^5z{IYa7WZpCATLwgim}TUQl;SwU6AQ zrMd`$(5;tXYAv*Ap6OMICjQphIl01A55_u1pQi<_+ig`wrz=#Zrr*#)Y1sdQGX{(x jjUi?rMJNkSAHo%bhLedP4LPDB)E_ddna6ColB50$3zaZk literal 0 HcmV?d00001 diff --git a/.objs/srcs/Window.o b/.objs/srcs/Window.o new file mode 100644 index 0000000000000000000000000000000000000000..fb73af590778d0bc8b3b90c0990513c727276952 GIT binary patch literal 6041 zcmbtY4Qw1o6`u2*6Ne<-OWQ;Zv}79@>Qu@3E>4`JP;!p%;x)0OQ|&Zq{x)mhuD#0n z?s~UppQDzhBq=>bK~@E&NJt3cJ+Wk$W|VcA_B5UY?Kw;7nAaLFqxq1VpI725X`Pnxw^^<6f~9m`boBX*5?@ZYDGIt*M?7E30(8=na4C5zEu(ymaN&%e94t1!YZ|X{UF$^_(T2;Qb}km*#tv{jbsE zMY*_dL5xK%e;lpn`sFMv`1{whr)PN2UcPJf%;_JtU*xJUpWTYK@vBVZ`N763+@5x3 zxIIllN?ypctB}It3-2lMTb>~JG!cv)->yNLCa5*u1=Iu-qsS%`#v0W&4o=M~Cw~O? z9OdiT7PUplMK>zD5jD!rwt3yghuPFigN-+6E*0Zgn@ zcxL4@A1y2dx);OUAN$?qWR+`X8kZ5l`hmmxp`L?1hj_Z3r9-o@l615)qG>$)=0*%Z z@sh9~)=)_i8gXv|PfcsgU75d>zN&Wmo;K~&Z=980(9Y4;c^&T847^eg%+2t~btq5a z${~%HPtMkE(9ZC)j0kYmQ?s=mZ`H_sWZA*xZO}%xwTd?0O7WXl4nI4u?4Ny4S6c##S#|%$EJJExGu} zqH=34HrqnUjIyv0xvm`>bZ_fNE$rAo$qZZF+QM2lMv|_atzHymi*^V*F4uP3XBiuB zeL&1he(do5Af2`JKp`Wv9TtR?LSZ-2BynAhB^ZnN4pC3E5JJqyI|&3r=;3N+p&#;eauw*26pas-#xy( zR5puTqE6Yxd}%77_FL+7siuw_lcs7E)&8Pm7Sz7np7p9TZceL4#Z-$WN9{^sTBwtJMqv z&jV~iJ0fN!*;0Cup-|FNq4qXZic4Q_?;UE#pk3_SyJ2tlCbhn`Tkqbqp*C7^Hk3dJV`Oej^lYV zM~_PM6Cira#@I8GyCl*35+Nnsu`49n14M7$7&`>CmLpT537{^{eHVy+Ld4j4DV+zR zJdQCsBi$Tb-@@1yj&7AGDUk+5S5u7b1EOCVF*XWx2S*j4&v5i05DpWhGeCE8^fe&b z^%(mG(C0Y%DbNOv&I7IG=n_zxqu&7u>7Rhsa_(KA9*#Z)5_)TKZZe$f1lqyTCLp2L z4L~#_6rr8C00-kQ? z(g^h0Ir=70A4hYNdkyIGoJ-@GK#j5^5;+n9@ZW1u`0t@MpiiPk`Au{aM{y~|8FHm0 z5mMR{fmA6a-vrko5yebcrqu{3Ws*Q!rIg|=xU@tR6)~3fNk|7I%1P<4e~4kn6RCTVCenmTmC~6^A4DFNGIw};d}TgFyw{LWWmsF@6WD-p?9u7 zR~^h7A?6Vp?Aasd< z+V5S8PFFNSxka;v%R85LVuseb%2^G6Z-OGt;9_nVkSa-%)Vw=^-`O-z3iGt$J| z-^AdDv|DH{v+nszq&NH5Lpu8Yd?7dKt=`l5UPy;VKh?xM-^9G$#QeF5dB2HSLaEJ* zH~S}=nA@9}v}71oI)olSy@^_}U>`LL(@Y$&K%f6<6X^4U(|+vOF_sW8Q8BHuQ%O*$ z`%EWSDUJ5z^A!_+9_jlBwpitgU39Emw{Dk4YF2L7=rQoSyTzTJN*y()dyPV2#27uA z&83p`+gM+66K;R6Osljhac^Lc3dp8RL^?B;?|b@u&F zqwWcK*1Iul+t>*EzuH1?tK_J}XsJN1OF_yr=X05j*~x%PCKc*#$=lU(!Ijpk~C}nl4RvcxzK31>mWqjk5iulNMrgeM=B|%Tv z#jz=U(#>+_9s=R_o~l{Nm5}{r#S;wrIn2Y;YfP9GBh_8TslkcSCy=cZ*rlvxjqou; zrqf%)tT069X?M36(VNQd?iwphbWNJ0{R4Ja)*9G7l+?G~TZ4C&ZC0|$Y&JX8rCSy^ zBOKD;3-9(;Ov5n+$%#SZsM%YBr*)dH0uQ{|uf7zxwaY)ap04!NJ8+&LuxMp7Gb(um?cSPQ zG5L7hMLd5gBttO1qh?u%A7bX~SImNGR53$x2<0vtuQhvyU6;mUpX$jT*Uzu){EqDL zhWsd}?3q1YaoPDT+2c+4Q6Bf>wY|%+4E?!=jV5hEV$!=*Z62#!bVK?Jb&adU(Q82+2h7eiNylFS~N)Ve{Bjs zRxjx+(a6csgo{PJq)SAVq@*ZGvaP`Ei%IVk6_RcgO}{o;;w_>n(jB4+(%qtQ(q}{i zq+OzZ($__Oq=!Vkq`jhU(qp17(yv7wq$yDg=`W&2(mzGDq+EYcpuVa}uM$;~P8TIf z=ZeZm7m7+rmx+o=Yea>lYefa5dQm>9Nt8#rMU+c=zbHofuxMssw6{MlnkIc#G)4N7 zXp;1RXoB>xXq@z8(HLn$G(vhtG(`HdXpr;|(E#b?IoxUeq?1Iwq%%eNqzgpj`B6<= zD(WXyh?1nMMP;OVQ7NfOlt=0mjhqxM@dKg`(p{o5(x*kmq5EP6QpgTani>{W2Ao32x*sSi1anlAnDtp0n!gd z{iJ=OZqlzsWu#|Cg`~fV#`B`hKK5m-=rPiXq7l*=q9M|`qCwJ9(E#aEQ9o&wsE>5D zsFzeH>LxXbI!NykwUBy5jimn))sl9Ks!8{W%1B=qm6E#<&vHe z#YoelnFY}vdQmh@%Jq*J_*9r8B}9{?Gei@l^F`yNi$!Ck3eh0xTG0ThPE_b0U&2MS zNmM}U5ap9@7v+&YD#|5&LKGw2E1HQ%OZ=*6n)Dsf6lqK}NqSr~L3&CwK>EF?pY(T8 z7s>g@6YP!-(yK%*q%%Z~qzgo~q%u)8=_*kr>7Ak^DJ3c(-6F~-b&GOI_lRPoe$mwN zQAPicXp;1xsNfaRSdWNuNk0|ENWT(I9~VvdN6{4N1<@pF{(M&S1nDHvIO%NB7-^|! zgtS~VM7l~eNU9YLkT!|>N$(T&k+zC@NuLmPlLkaxqz6SEr0S}7_dtq~QFt`X&v-X+Q-Z4l*>nnf{EhiJxc zTDS~#iKaM1!QSi5f}Y7TEz4R^kVuYSI%TJHf(O2Sk0OX;BC1 zU!q3RasFgN-B*)N5|xt95*3q5M1`a>Q2}XL8^=Eu@=8jift7wWPa5)uhjeDoOW=lB92n%1Doh>@FqL>Q6;e zr2V2v(la8v2?`TFCmJUm7LAdP@gGR2`w`L!q9KxfxM8e8((6P6q+(G&=~7W2X^p6n z^e#~?X_Kg$bepJ>^kLDAzwZv4y+brj`n;&XKL&-d21WU#Z;A3qdqlaUeIozT8T0zJ zXvROghS?t!O_TmAnj+a<7ix8qbi8PSbdqSC^g7WP>3q=$=_1h(X{Bh8bd|^+|HIn9 zL)1@d6!nqXM7^ZjMBSthiMmK17j=+6D{3KqS=0#XasBoJ^!7&z-~O)e-LT;FJA4$2 z@%{GkS?{+mve!J@5<{3r*r=aqegk}S{VP#F=?|i5zHNR%)W{ccukbhM*l~HJQ$)F> zVo{8=LNvoyY*&kFNjHjeNw=e<6KQY1{I!}~Gdb4Q8U%J9rYenOv8$|j3!zGMm zkFQvX8D1;8MFpfjQ9h{04Wy5;1*fk_A79629bY&4L((5#1DHN6aY*wT;=%NjH1TEj zZ$;HSCSDX(l3wL24r`wzy+Kq)S|uta)rpEpouYBRx3XP@*;o2yhqeErC`o!q)bDq4 z80!g<9ch7{6*ZDx5Y>{7_iu%;#A?#%qDsAsn%VAiryF`Pe&xr;|4~Y6nW1>FNlcHYIpG4iHm_PQg**!b|ohhp2mHbVjGSX^MKFLPMyz)rx zBHPKK3U3#6kUlQTCk=>lNe_r(q(?>7{-g-A|D~vmbWl`EI>u)wqE#iGB+4bdUKAs} zMKt3tNMVWpAu1qUBbwobZ@s9C)F!gGJ7Hcuq88F;M75-^i7H9o6;1F4^+%#{(u8P? z^jpyg>3LBf>0hE=(sBNHK^1nBP7-yIUMH$1S?&%KR+26eB}o;cGSbzed{VtAkF-gY zOL~teM*4_of_L1X6%CNSF6tmXEGi>CCMqU9BdX=C=ifxtq?h{>GpY&FLQ#@*uBeQ3 ziKvvcMpR6?UQ|f33kH^0K-wb8C*3W|BYi=XOL{;QBaMk>{JnBGqW6m|aT@3kqAAjg zqDj&T{yK?Oogked8Ye9kjgeM~Mo8C*hDe)4gQP8@0n*1s{Upn{W4-!F!=hf&qoQuo zQ=%@?bD|ECwNSw9TS%`IHIiN@swG`0swP#4DoO7UB|$xID)Z*_4txC3^UrkF=iw#z zJZ!}*!miw^*>&^7@AIM-(lOQI(7NH6zQ0*aAN6HWSSc$n}_qH5Arq8Q2UW-za5|M?jvyiGJk`nYJ4G$@)N zjf#dyPl)Lq(dZnn8w-jfHib*A+TvEAcf_E8fMR}wa z(Kugm-z6F&eNNQDN81NPEu?*-80j~nA%5KcNi;}$LDWIYJ)JetLQ05ANv{(XlZr(% z`~-fpXqr?hD&*yGov46xvnZeRAyFRbQ=(kb{h}CYR5asTY=r~!=b~xSlxT|d578v) zgaTHMH5&`_S|l1Ly-_qqS|u7GT_+kMZ4?cX-Y*&;-6QHJ4T$E;zj_=Ko!NZFu9>nB3<6Y}0N_JnjDs=lEK^mJ|L6Y}Mt{$^+`i5Y)3C40V~w5F+{Y1g**o>}~me+lI0 zf9hievDhC9_`WZ>_4?f8?R#>QcRtx(knGuSABle2A5`>ACwqRG>^T_V&XLVOPtNv>NC&tgH~_zix0!M`oh+?oq68vGxHYQanR3U!ACCko4v=`YeZl0CA)Z` zWUqcuimCLXzW~DQx@V=^XL1(Yakf94_PGDkK>Jk2z+)ENu_FxpqXtf?^o&ZAn&8+4 zci4fx$2}F%Wp)6>hOyKMP4ReCN{!^56N|0raRxo{apQ5n9~Z$1T78g`5R<{yvOlRbc=2II9Bt_lFA?oL=T*o1gtd)uCiF zKxyuDdz@7rhFXoGWHpA8RTN5AQ7BnOp=1?>lGTs>Te)~-@!mboYR3NU2^C?bgHQkK z$Ppet|9(9Wo_xecU15`RXW#yU-*<8E*yOM~U+{b9l0e-3*y~s@x8dNM!rZz{7msLP zg!$Wkc*cABXjyQ_uS3jZW{>-KHE{57*r=Gz=lx+i9w+U$p%Sp7^V;9-E5`4@aNu0F z;A0~_ajT-mdu_Wu^YFn({%Kq6=hN|@SfWkW-n_^0@bt$+`;B`Xk5M}o?s%_X_#VfD z_24Zb)1gT;IOvj6arqt?yIVVZxtiLddaT=ur;ic_4t{n9`C>qKn99TWb)$*r!| z-y4KW#+b$1{OLS=>K0$4E6xnpmLua^-*M)7+x}x-^5LhC31@vp&m&=cJCgn7X7Le! z4Otsq*Lq$UKE_|ejwI|}BH81wTi*-QB)7H9^EWIgd%km|{We@{Zr?L6dHeTsqaiD2 zE`INs=d})1Y&-VFxFh~5liU`6#Todhil62S{+4ar-?Q3BSG*W;0ppd3^BGr1!=oGLcr#*qzzoyh ziFkzZClC)Yz7O$qbQ>K10pcmfzeb!J-R;Kzf_N;tGmI}-6xO$k@i~aA8J8l?WsLNP zvp14q{Z?c6WOQ2@Uypc%@vVpl822OYXFQ6ykMWa;dl?@>+|79Y>%#hWL^p!**CB3U zd@G&|>7RGxJ4@Eha_yl6hh=%%@LOdO1SK@~e z=S8WNc;ZadcWyNP*@(*+mmw~V?lI%nAueRxh&Y$=t%*zrB-XXFoK%hiPKL!!nF`HNtPk)?4%(X(J2DH_N8 zOdR)-936?k#ERNZgII=4)m*Y=_3$TkZMqb6uL`i};k}7VRF)}KS z>6AFqDUP|6IC3d&g5x8R;>H;xi{h9t&2=iDRlHZWks$KJz4TVWckLKw59v%Nb z_oq!b-hYqtr-R3T&RJo3lf0gvjJTiid5HTMUxv7g@fCgNP>?{|s>@<4MFx#(#^(=k;d3zb*IsbBM?DDTw=cy*(dsFXL5+yBXIZ9*G|I z;~NoAxoG{~jd+sr2M`Z1z8i5rtO!lZE~H zSxi5~fwB2F@%MqI}DMZ~3y=NEP_0?*F$TE@gZJ;$p^~hzl8iI2wO`v_1XN_>8}XxRCK4#089hi8!C}bBOa8&pS74 zPcGvG;uzyX#51|k@-IO=&A1Zr6ypZOlZ-nNPcZ%n;&H}15sxu`5b+4(pCTS${42yA z{x%`(-#;R5Vf+H(M#l4BAGWWS@kxlQ8J~^V-e-jQEk&GUyc}^E z7xVMre-XzR{~2*N@9$qk+{HN8KW+KNb}&vLZee_OG~90h>I-o%ug9wp_wj!G9f%uw zJ#Ix@&3G%~QpR6ET+H}8hzl7%fw+M2(}-idU;ZoNMt;5?_lB@NwTw?gT+Mh1;!4J= z5GNVG192JS7R05D??hb8cst_pcvL_Ah{qV;k9g*|X!!RL_wjst3UM#vX~f-(-T7gA zx)?7++`;&K#4U`MBOdYxRM=lNhzA)rA})P-G`s_GG2;&+E@b>)hzl6si#VV01Bmk& ze;;u!<0lZu82@Fz5WWmqO9`TjtMjfBsr-FRULD zd#;=3E3$g7g)vfku9`9Od9IQ%l6kI_F*13skTKGDE|)QKcy5B9k4WIT7RF`1;I9Yv zC#)Y*cdnE%@^&tdF_LzU8M`<#b}q)@NY^>$>f*@NIVS4j_QCC!$1GhOSvtoQT^uPo z$NXFz`8mhrTzneSGcy-QX3jA!7e`voF((&CPR=nQ7e_+QF&h_e^cCgThpD(YQgM!X zxH$4~j!C%qAjY5I^$;02H^dm}H`m7)xi{Crcm~rm>lR1WO)0l{LAVui%(umnZ*xqx z#gS}t%(TUkX>(<~UUp%6doLFDCvt4AmoXA-j@h+1vTLq|!;xBZ#f*_xb4;qmkyLZc zsKt>{b4;hjkxp~WrNxm;b4;Ygkw|mQqQ#L#b4;PdkwSCKpT&_sb7Q>TA$jJQIg2B6 z=9o5%BW>oGGm9f<=9n;xBVp#4EsG;t=9nsrBUR>@CyOIb=9nalBT43%A&VnJ=9nIf zBR%G(dH;*tm}6orj>MR=WI@uR=V+_&UTxj5i`4Wc+@_1B~xM+|PIb zaUbJvBko14{8sk!vc0>GuCHCVzE1eda;VSeqU*IEBgDrRhwaN_d?MmWUXP0qPw;-H z9C0t>s}UD7P9ZL2d<)_N#@i4N@O=Li;t}32{ZBNU@pll9^Zeb9c!=>d;z7o*Sd8sq zT!1*A=j+la=KazN#AC-t^-+y@gz-&?hZx_6c#!dS!~=})McmJL7;zutA0zH%{50Zj z#(zWH#dv;6*q#o?ry_1)T#UGp@e0JXjITvp&G;t7m5gsgoMe0t;xfixL|n@FTZoGp z{{(R%;{%8b82=S*aB%P$P;SH}1?hoFR7yFOlz(aPJ|ZCDtNCf<(iyW?oz?bx_Gj>g??iq~7T>~@ok(X89`F}4e@zrSl? zj0W8H%wF~(@A?W#|V=7(n5u8c9-XuDFzXrS%#7^8Kz z%VmtF*)GNy?Xum>E2H_NQMO}?>^NFvyP3RbIGSTSw#AO4Ew*Dr>^K@?yB3}gXoc-+ z8KVidt7MG!*KWceMxnmY_}bZF6=Jl!c4Lgu?AnbmMw@Hb#~2N+T`yy_wszf&(bU@6 zZAF+r+F845#>M_X@cW~ZF$M(^2w2yXd934mF zXcy!7mbmuwo8kR1nngQ%jU2WQZKB;2V>F0%mSzdV(Hh!KFh)~oH_jOCpxqc_G=g>` zjL`zx4KYUZXE(?gZJ*r$V>Enr{fyD-+4V6-lV{h<810>1H)Aw*c3q6o(%E$|Ml)yE z!WeCwT_a;OaCWtf(Yo1HGe*;9SIHRdnq3mH8Z}4v<7i%1;D4vk-;VqCThUJ?`I_~+ z#9u-}0}guxpK9HI<{uK5X+owT5=cRW*~g=`M;5|}iHHOe5k|~KB#?^;6Gpc$38W*! zgd}7n!h|FwC5#G4$V(V8HIYDS!YGepAvja5xTHp8frgoK7sl7yUwQ5oqf&Aya`%!Z9sOhR(QsE~yG zhEV|tDGnolQsV4Dmcz*32Y`_1F!GgZgj~ng&=S#3#gxCr^rzxYID5lM)}b@GE4m*{ z+$pLhZ5K82?A$4;ChZmt`qd1x9~E_zek_`PO*GaoMI+HoTjE(!Kj~kh>FCxraiXtU zw3eSDoh6zim5OSkd$`1EQ6;HHlq9WBQ*<+y*d)p$y<3#;cSWf7r2p)XE{07%u#2I! z-}G18R$u$w7(GqUTN$GDt z6p+58vG`6lF`B06HahWhQ7!2?(M0qzAn}4I=}(%l^QL^Qt=M|%l&`CmcRs1JChww9 z8=KfYorLsGHO+Y9!qwYT+reOEkl37)=w8^v6XVq-R7e zq`!;uNXJ~vT|L3dI$ku5!+KldTv5k@Xd5pP6-LkRiMNXS`EgP&>c#!Sw#2(dUA%?p z@>i`DTN9RwUFn4 z0(n^@re+gJ%^EQ~n?QCpjK%%B^gLEpFZTs=eT|s< zO(6Aa#O!YZ*)* zdgYWrO4z7?gsiYpJ_(6oqdXFF!$!Fzq=$`SBxHz(#hZ+@=kPA_41Ce+ZVQmFMHnU?+2gp zk^fY>^WyZUQXk5GngV_*jflp0lQkvkCFNhj)4GdPCaNYiijt%rQ6q1%J|n8+hs*t< z5k7}MBuerL`^Ta(K4BO8!q$X+0otCgSK!UOeX)_B!}xBa)zbLM(Se4qHyRa?@C`?! zd=kFo7{*!{O^EL~8d=6D+@avBjz%LSeB04zh=eaZ8cnla@SR7aDH6W+Xq3yBB>3i| z(HN@=Uw$;|BH{awM%5&I1v1Qwl>QOO7ztm5w6P}n(Twjx8jX)K9{f zB8~b;_+F$@F9~0bH0mbd+mS{c{N%?MB#l}~_>QCzzb2W$*CdTYsgbL^THwjoX*0eA1TnhOSwvAq=YD+bf##AC)pCw6ls-cjC8H2mb5`M z_S&!#{&@9;sqa2~z6X+zH{VA&kxXzn$^T9{&@9`uzh`mZI1h$<}WhpIxX(cDT@cAt&RJ?7WXnf z3-L&_1@XnvaK@J)u4J4yxR~)f5f?JP5wW%9wtW)61@RQ)I}lGWz8mp4?qmEA;$Fsk5qC3w3~?7@`>6w8A03RR5VtV?3*tt`|3qBN*j~r@>8lx| z*E3hi_;d_UGDd%Au8c7{J9DLs(bJhLW{hsmTp?rhapnpbqk}V-<U%xje?`+RWuL zM!#k*#u%NNxtZw0B#s`<+%#i!XXd6Dqc1Zz$rv4(xe3PT#mtQ}Mi*vojPV5OV}$WD zh=&-X=Q20Q7~Ph+0mkUF%=I%yhh?spF?uU=`Haz3nH!Hj^Wx~I%=I&_@RuRK|B{T+ zLzydMjPA)?DP#0a=JFVKV)~Kj(=z@6#2t*$C7CN@jQ+@6G2?qNej#J@MCNiCqZ=|e z6Mgi?(Fd8cwn5>1LI-4SoH2SIb7PFBqxEBqe#hJpV{|&^1{tHrF*m^YT)TJipQnt` z*O=>LT#n(rjM2-O>t>8D##|R;^e^T*7^8DB*T@(>i@93H=vK@XGDe?bu7EK*6m$8E z(VLjdV?2QM8;|bt;^;@rjWI?iVs4NzdJuCl#^^rGO+@!5ar7PLh8UycFgM5;y@t7d z#^^FE{6t9DU+6E)B{>|Og}E}u=qb#VGDbIHu9)$vW5fO^WQ-2NTmj<^7@p4=mgA-F-8wyZiF$q|8hf& z(f5}dWQ>l#+yG^z|{qco*U!#^}_`4KhZLUT%Ofy7O}VjM0~u>t>9Oyj&S$^y1|T z8KVm?Hy*u#jidiAH^vy9cexS9=()=cF-EstZjdqh>~aH)(P5YCXN=yuTpwd})#Z8_ zqn|F<%^00@xemtYq06-}?(sK*zWy2+qi-%(%NQMVxoXDfmCKbeMweW!lrj3_a>b0% z8J8<$jGnk$0pkfQKbJB3;Bqm>(-=Mz-CD=d`<9z#Z22C){!@(6@0Ocnj83=Q1Y`8L z<;EGKyDc}yxB~MVWQ>ls+yGty{9MN99m~ZS z_ha}}^aK@0zgTXPF*?O^1<~VF{E=vWjL{vIi!uHchFhmU;d+XWu-p`5^n&Fk8KVm< zH^CVFU%7F{=={o!F-Ffdg0(dU&LWL!Hx)aL+W^mgU?8KbK!*T)$BT)AGx z=;X?EGakVFx)`H-E7!poeOtK}#^~6}H8Mu8R<4#Yy0miDjM1N!t7MGMtXz^Yda`n5 zjM0siD`jj2fBY0PMh8}|kTH6%as`ahb(PC!jDD+J9%FP`<#HLL$0`?N+~Es;c{9FR z!v5|;Jk1y#RkS3{%^3Yh zxh}@&Jj!)2M$b{Mg)zE~a*d4d!2D_%qr)gy%^1B!xk|?9D#|4pqn{{O#`qD;&pt*& z{h)^^H^ms;L%B)D&tQD}SPtW(V<(0!BZXKYQQ{QC4UM#oLA zmoa*6a@~y4Ws~b-jQ*Nj2V->Bmd*0lo=CCrqxNF?wKf(|nVK?w4F6-z=c-C6~t-9WS|D#>M`&&TnmuF}hrGGkkM^ z{+3)VV|2FUav7tiB{#`80qADQ4KnV-{7V_5gC$qMIQn_PZa&G{ZKxd$e7ZxwN^X)* z8|YNY^)W_|O0Ji&-8TE_yBMP{B{#*p=~FO##2@5g@1qwbm&X`gD7hJb5eUQ4f07$# zjLwr>zQ4$Z;r3YJmp8++ts8Lx<37ashJU^S1`xSXW=L zy5X^drroZVG1_&zMjlIO)a}@!JB}9JZjk#P&ADASW3=UV(|oIehTLw1`xdRZT|Z+q z;daw}3xM|9Zi+D)Z@WpxXu0ibd2FEBwyR`ZQyR7}$ruf`U6RKFT5G#J#%QYT^8GII z_2XZ!`AeA97T(slWW(=w;X71?P?jL zv9+7vd4!hME{VG&|M=?bVQbu4SzGosI&!U%b=dkde8KOaLDmMESGz_Yc}WZ(;MSpG zwd-b#R@JVNF`87n79Od)Fg+Vn$I+PD&G5)WOKR7_7|p0%3uCmQc6p4^fZ9!P>(P4J zP2))07Dv-**TH9Mw3~K?ysDtlwCm?RFXwTgtcve|KDv)veL38`$?%NTiV*Ton~Y{v|C92snv zVXKg0 zbIh;BkzaF6uEmjDbIh#8ky&$0tHqI4b5pzuBB$oY7$c$Pm`#f#o94!FY(}ZH_IpDr zEzkeVy8X78Po?=T7+3h;k^Wm+di}o*O_!}(w|QgC|7~h%th;>krggqt-moFH$^NF= zH`cXSp{Zfh`e@+tx~8VpskU~{EVfy>siCfwe>u~D%bS~;HDpCYOM4?ntJt)@p@YHN z_J&RCQcWRDwNy6MZECQxO}wN&UAkGt_32U?ZxG&)#*{FX#<0?1?rmw@oLf+AiIk zoRD6Q-q4z=3;UqCrL!{C(a;nYyt%3A^15}lKUOp}HMBR_ z{1wz~SJP-?BarF`=ZH#FGcu)+sg!pij<8ZK|D+u)r= zTf>!`qA6EyvikG?jgGvPH`nAV!H`tOc_kV27 z{a@R6_9uj`&70bziod*db6a~jQ&%-?V*RgbXfJPW_GjKE8{J3Co6@I_4_7pA_B9_3 zTiMzg&O`5)H#OI_M_jR~y*7ZXL0A(Xtf^}Wb%Y{E@tPh1M^AbX zVh#^G+mFxTjB$+m*$TqZY0%u%xuJQ}+V)gSOE`eMt!-~jZL;IUm*3XVv2xSRoF5`C z%}2hXxW5n$DER2`*$A@3( zaK(#Gl2EpT5Cz)-p=1XH3L-lw0)=^nlAV4iV60U86@Gn#w&Ao1vu=w{n6N9?G_P-H zve{=uHg!6*1!Y7UsEI;tgfTQ~Sf|ygO%3V&WYM*$_2EiyD^On7X6OIv=DPJ2>l-$; zr`kKC&|h*|QysRdHV{!*n2&6O!hj8_bywQQkUx0B`EGx0B>y*@#r8LfSGA^aP4E`V z8P0Ldqw%97Hq0yI=;ejbwozrP46LBlqXnCr{8_>EPe-=MD;jQzYW|Am){S*d;UkPi zn^RP_EoU)*5=I{?3%;VE6k7q zPCi9p@(`_UZ*Fa{)ksIS^bozRwQf_Jzlv)xL1SFk+-x@$;YzqR^=_-J=wy!SZqBJ) z+1zqjyRS?8@bqQWE6UMvQ0Xt3=}~-;9>)jKNL6)hem_<=w}qXZ5m|MmLtFEVD0`r7 zMS5U3ywjo8bw-pmUS_!ANDss^8|v2UqpZpY;a~_W*RZZVY`gspYv2oEd+jeFY@-J( z$rm_jG)N$)Ya8pQJ1EF6{`6pqStkh!dI zOlnX#I22_r3wOU7l({V21N%Yt>5sbQeE=|;k4ILH-3wq$-WB*UeKDHcO3_OCVl=sx zA`CYOi&tO{At;FgeauI5O2^Tpf@rSkIE-MI(x&KG50_C3Y+lziw6-^Ngad>{n;&$h~fXgw|DLr;3`a1WO~2Y*0jEF|0@``b#~yN7$h^yDE7&xcXT_JQ5% zxB91rn;X_^MJn3T6@+_;^h}};=__xzp{>Dc*MBZ!h0-6xt*PeLRJ;F#&sZ`0pq@3J z9~!Q$>3Mr$9rC~n)l@#_*TV74}IZ1G1#TZ8TP%$+uyqy0OZm)Kdo zJaZ;osj!kq9Wr-ToF>f8@20HD!dm&I*!^xe;ai&98q&9sM`1hH?e6j@V;_Y%c8Yb5 zTV_9fhSaMzbTQrLHE>o)oa@a4@nH?-nG*RyVJ! zYfm+Yw}m}^B_de2m}X|MPf05_!v%-WQU8}f|9BICwhMgRN7Rr0*qh#4iqeJ8Kj> zy0c;STrzG{F&8||WKI;SKO5%EW%hz_oz9#nT&A;O_FOXVV6iT!$*hTHFDUco6%#GJ zBD}uOoNDP6VaH`t#vL^$3^!icW8nq;EIZ2F@M=D5yt%7kpHx|+gmX4aX75xSIa#C3 zUHa@wiSD|?lSs5}N7)X%L&}=<+^tC8$%X9F^5#wJ{e5Hern;so|1lA6$io1sjTX+>RoUB)Fb;*AaUOgorhiCGk_z1^PM0-I9per_3) zEpKdCcT)!0O+6ASP5!enE5(I1y}>RiA^#P{c93Soq4G20j7*Kao3N?zNtdxu+daU{ zEl6v|Mg+uDh|LO56DX}eTHUYo4@%*!UUtogJcWOi+R_m10M>WLT4k<_UsT2re+Oz0 zXyMj*E{FT(xg2hq=kn^h&Tx?UXP}IMwwYcsS0SQVb>T#6LrT+s=tZfM z)b+M&eC+3&ef-4aJQDm#;O~3v=`#B<-cKFnQ8c&Ab(%9WvKY2M{FJ_$G(tr%7;WB6&gG9l+=u~5TjPKMHvHIp^CPQ`VDdBmITPCTOa$w1BW<|jm+`w zHL~3@M*sZl(Qvlo+=~W=8_u?Hi|J=Kw>Gi_Yy496U^_pmg~cHanhtCyM^Q=UARlB5 zT9P@)2N{EqSB+-j0~?f8nTxIZX35-s^ZmM_DFL+B`czwb@pg;*Wze39VM`{Cglgrl`RdMR;?~r zx%Lgq*A&DuqSYCZiffk_#LilO{#o{kY99@lC5~rL85d~#WXTr)m{0VtlEQO+npXIa zoDDuj+~$vdYbRlOU_W^l`|NIOvt`>ewPI77|GYI!wYAi>uWPjDrOkMQlr0thBed1O z_V(juoU*!<*lzI`lFWJQ@#CoD+kq2Z$Xfm9vOVeeXSWNNMt)`ejecRP>*^b9mHmi- z%J$X_)3c6$({zJxGQi7OdcXC1<ptg?v!BmAqLT_4UFzX*TQ;O$3xQ1p?9G4MhD@5Y;T z%KJOz4Y~|ut{=xsFDUG*^m(5i5Uqi(4Po&SrpLm^sNaBSNO&mn@3&{yfZxhBsZDsN zWoH47`AWOi>f#w5fwRf>Z)M%)wuYn4&|la#Z#?RdXl25KxXN}(je_vtta8SH@SqzF zKrT57!b?GwGp2~%B!umbhDX$qH3K4=qGgSFl<}7MB-EUREa^DP#7Co}O}sRFb<%sV z4ZAR$czmYHItzWZg*fvh^yV8(%}Os&>~mdZ+X@Eue5|q!(6v^n5ecupeynhpkv>Z1 zip?r}q;T9=`!G!wj{LO^>-;mYzkN?Pahy#yuUTf>Gnz(ZUfu%WTzV(^2MT zmyB7X;k9t~CWN;hv&O^gg4uYK9w&*OI`-olXY0hX^%d9D*HO(@t?QBZV&E~B&$X-xX*O=CVjHxnb%TqYIiL-`K ztgfJ))?EBzXRijwV3T3nhml1$SK;n&Hd#&0GOL=|bd=GKvVgF0 zvnCA((;S;Szi8(On!6Dp=`(9I z+{Gia9PNqlGHljpN2xoU(6h$FNtcb`W_&iyUAvGLoHg1})-EIoXN`80wZjQNYrLba zUFH)(_UO@&qpSm;4sic+)Tugf`*QSQcnWxlVK2G7mtNXSFN;qmM_ZO3_R`CW9$t<< z9WrG{8}`y$_HU+%9)ONIzkjoid`3F@bf}Z62H%*gEow;-HLD0W3I4+7--GCKycTi# zTAU4bwBnkm3u`*V93J28GCwQN%nPm!$;c0`o|AC)VhhL`X-=|u_1tXn>Tup?O@?D6 zi!$07;zjmq?4+!b(vJshK&YxAM0I0VxY=ZDKdU8R$WhtLnD7~w@p$NOM8aFWFoF;K zqis4)zm)QGv`=EaX3lyMiNH@8y_m!YFBG4*_7yCo+|Ro%GFhr8wCahh44^r-ccF$%`W zI$yGeUhcpB*$^!<<6XL6VS9;;&BUswk6@)HqTK6+OLeCxv9s6joAB7x79MHBq2b9E z79FY4a>GcO8 zi=Jq)M|pJHPRqzV*{)mDu(7$dv%Id=U+nCo%~w#AZcwXX<|VR*h6`}|8rx>u=SRqp za41HUS@(8{wb7zyVusP4VTfY@s~;@1FNs#o&mlZg@rcNX`GF8s;JlX|6MI>CPVv0j zSnU6E+0SEMu8ZZ49$S1&ZBEO)N*^EhveRAVf8F2zFhb0}Aa6gqeWs1$ z|6Y1-ELLpe_`enY|NaZ}G3&E){_l(aw*)$T*=F|bx5Q#yzBuClp0Y9)>-OaW|M%N(jm3J+{onii%=>(K z(EmMUV=UHh3Ls#`c1>b=HKbt*Bt}lJggzU+iZT{qsLBVLxr-f1Jqw z;7ZJYh2NKoR9fOo7;1Hap_j|9l-0=UWlgfpvfE^~?U>(o*(YURkbOlqBzsu)sO)i> z{S-9T`B~ZXvKM5>`X7J6cyV9Cx|}2{kl9Z?W4tA@OJ$eKDrGgYbu#-wXDs7p*=@2O z*>>5dWM7coFB_7L${v+HA^Wv#O7^_WewG>Ac5DvU?*w0h*^e?~ozIY+D_bhFA7jRN zt7Mh3>tyR>8)Y}kx@0}FyJeq}*-tHF{qC0y%SL5Cl06~&jciJGNcInz^S?cXWt`wk z*tUGx8M4>Qmdf5NTP3?ncAad!%zjK6%eX~mzm^62knC>Rr)Brb?DxB{&cm|r$$ljJ zx$HNxgEIReWGweZne#tA16$xrScd)j8Eld4^|Ck0-YiSXu9Cf7wqCYLc8l!&vJc7b zk$qbBMcJV2o3ii8ek}XBY*Kbm_7~ZUvX?p5!2(}G9W0bBlAR}eqs)E^8TFTxRmt8i zYmnKGA!8XGviHk;x8c$J?vees?2EFm%DyRkMD}CZFJzOl-^>0Y`={(>{wHU!etEux zZCfaNo$Nf>g|fHEDr8l%cgPxK&9V;J7TH$W$7KI4v)>cM`h8XQEt&lYF}C9;vR}xa zmi=D#f3kndUhaQW5zEN)CCu-&ve(JpAiGdzzsHDa?FWW2?ONG8WH-o~Wu3AuvJcBX zCi{%ce$5ig*e&~(?EA8x$R=b@%l;tyKiR)zFZVYWSjH=T3ETEs*_pC8$lfF?lf6~8 zR%SmDjQQOlYms%zZkK&n_Ho%~WV>X$W#5*4UuHiAjO9+q4#@r>`>X6gKZBLa{zG=P>|HYZ8D1>oKV|Qc-68vk?7w86mHm(G>$302?5B6JjB(j7 zW&bPtlk9IYdpyIm^L+{1_A1#ava@9u$d<{n(1S*pIb*^F$S z|5Ze6oBen#*6-D_(`4t!7RxS@*$>%b+BLFkWwkQv z))wPEAp5RtuWX;}DcNsjf0kKq513z09@p;`z649iPLo;x64;K#vWsOaWLL@BIv?*#*xuL3PL~zQN@SPFR?4oFRmk-9+&-E_N?rA*$c8`{jaQ|4y<=QtjkF<>xl_=u55|yQrYD)`_)#= zuST{`)+D=GcAKn6wq5oq*%xH@%Z6m5vPWf4$bK!eUya84Jumx*>{$Q1#n|2xd$q@vW>D^WY*Ifmir;u-Lg;1?v)M7hGpNA z{Ya%LmHw&m=KEhH#?oHpOW4X&WM|7RkS&vy%T~*-mR&Ei-+)IImt}*-FY?*AiY_;qf+4ZuN ztX1}2*`2bF%KBtGWnYqgL-vqtO!hO`ld@-I&&mESo98P4b#R>jgL-+j>{Qt~vSO8z zD!o(VrBrIu(D%wdAp59nhis?pKG`>9-<6HY_Q{@<{Z{sz%=*jp|MNModH&a_v6Zjz zCD^NFr^(KdEtXv*TOnH`yH-{!yHVCAd!OtBvR>H^+2>^U$sUk>SGHHSPxh4Tx3WLW zW@I_ODo_Wn@FmnsLUx+W`iI4Mi)9zfR>-c9T`Q}T-6(69y-)T*S+DF9vd_uBEPFsU zBHJr_O!k!Qcd|dr4$E?Up4bQeC^_Dju)PV{>9RsuiR@z8O4${%YFVA^CRw}eR@nz- zcga2>`@HPSvIk`&D%lU-V>|w&@eZqWj86q)sCBx6Eq{&dbXk$CM0Sa6rR+*swX9xt zlWeo>R#~^~F4-q#pO<|__Mq%x*$-up%YG$$RyHkrL3Ye(tb@2Op>5dWM7coFB_7L${v+HA^Wv#O7^_$AF^W$ zSO+Kg5{`|NWM|0El`WNBDqAJ1lwBuVC)+5yS=J@%k=-r(ldBm0x=@3Pogtb^lx33c!)*{QO# zWyP{(vgNYXvTJ15%Tlsd*?VPo%04RVlkJp!N%jrdL$Wd1&ty-^o{>E#`@3x3*{p-( zdZL;^uJ|O$3Y=>;8>^|8yWZ#vI$@a;fl>Jur zoNPum?;O^_D|`vZ#;awg$v&UfB-W=VbTE9*})k zwpX@K_LS_mvOmjaWI2VbgID+xj*W!uG+Ci+vFu{m3fUF1Yh`t^8)faX_sKpe>y>>% z_Bq*?We>)?1_!m*K%oh~bsmB=oZt(09MtCrQtZj!aj zZk2sdc9-lEvd_!DEPGHkBKx82G1;$VzmrYN4$F=?mvwNwFX7mDjqG$;k*q|ew`*v< zN*yYFLF0|ep7y1m9AnSAhkWU$p%i;PmwTcwjR(8Hm#}A+skB_BH7Z@JQk_ceD&4Bm zT`GN2r7x&-ze=Mj{X(UuRXU{7j7qOPk866aN|&lssnT^S-KbK#O1G-itVcDo`uWX-ezwChQ zpzM(Buq>xU_2)}b{mB-}7RicaC9-9*a#@9}N>(GQm!)K_vQF6+S&wYHY=>;8Y?o}e zY*;ob+bi2A+b=ty(%)5@zl3#lqDl*W3AMIJRwOHtEt8eYDr8l%8d<$8C2N&+%C^XQ zWZPvsWIJWMWV>a*&*3sS+b-K7+bP>6+btWGjmq}Q_R03k4yg2Z zmF8c_x;jy%g}xM>p|T=biENpyTvj2glGVuSWhq&!tW&l{)+5_4+acR2+a=pA8jjUdllC{b@Wm{xD zvhA`RvYoPBvfZ*_*{E!fU=Sxxj$rj2M$%(GQm!)K_vQF6+S&wYHY=>;8Y?o}eY*;ob+bi2A+b=sHJ19FO zJ1on&h_yD~mlnqCIr&7{LfIl&k*q|vOja(dkX6ZQWc9L?tX0-2+al|cZI|tk?Ue13 z?UoJ8MrC_t`(*oN2V@6jhh&FkITx$`d?~6w*+SVOS&^(nwoFzotB_U6YGn1Yl&n?O zDcd6Jk!_dlknNQ1lI@la%SL5;W&33NWd~#jWrtLH`6aBYg}!uhIO-OuRHUILvSqSz zS%s`hRwJvIrDUzLPT3Y&k8Hbahis>8mu$CeST-u#E88dAFFPPRC_5xOEX%o+H8kIs zq9aVUP_{@`BrB0ElaA=zPB&YM+#z7*A;Y@uwCtVmWOTP7=)RmiGjHL`kHO4cgtlx>mq$hOON$aczh z$#%yd4j?U3!1?ULPPqtrnKz2}eNOoA3bD8STm!kTU zEtD;i70F6u%Vg!U3R#t`MpiFN$y#NdvMsV6*>>3u*-qIm*>2geY*e;awokTSc0hJe zc1U(umQ$|!^QEZ%WD8}BWJR(P*)mzVtU^{LtC7{qQnFTAr)-O?N48zIL$*`4OSW4! zEE|>WmF<)5mmQEDlpT^CmgOv0{rOT@sQ!E@sz2F6*&8d<$8C2N&+%C^XQWZPvsWIJWMWV>a*&*3s zSvEG27|b;`ELdSu&WJ7ha$yJWj%!?ID? zUfDj`e%S%pLD?bMVOh>9)t@g#^(R{>TO=!zmB^OK%4HR@Dp`%JUY3%z$~t9RWIeL& zvK_LWvR$&>vSHb%Y_DveY`^S)?4azB?652+srvJ!sQzRNWs77*vJ%-cS-Gr2Rwb*E z)yq<{R#~TPi>ybsUA9BEQ?^UCTQ)2kmF<=7lkJxskR6mAk{y=iRH*)ZDXKr&LfIl& zk*q|vOja(dkX6ZQWc9L?tX0-2+al|cZI|tk?Ue13?UoJ8MrC_t`(*oN2V@6jhh&Fk zId4_{`BGGWvW2omvLacDY?-WFRw1jB)yV2)DOszmQ?^CcBik<9A=@e2CEKmi9+mcM zyaOs7)X+n+!?K+JP!0J~R70|bvPH5YS&3|!tXx(htCH2o>SZZetE^MDMb;zRF54m7 zDcdF6EgP1N%J$0k$@a?*$PUU5$qvhMR;&JeDXKr&LfIl&k*q|vOja(dkX6ZQWc9L? ztX0-2+al|cZI|tk?Ue13?UoJ8MrC_t`(*oN2V@6jhh&FkIcrpZz7*A;Y@uwCtVmWO zTP7=)RmiGjHL`kHO4cgtlx>mq$hOON$aczh$#%y&Mg^~ko%cF1 z{jvkHgR(=i!?K(!Re!z|)t_vkY>});Rw7#_E0vi-6HvV*civcs~RO4Xk)MfE3JC|e{el9kAo$;xFFvMO1PtX`Iq zwaPkWTVy@5?Xn%RohprJ=#OMGDjjnbw|9X`3w;S&zDRbS?2WRw$dW46sMM(O+Euz$ zLq90HOZExb=Vf1(Jt!NI{ZRIp>{qhi$);t8Wye%;E06c3lVg_ge2wgMS&^(nc8P4I z>`Ga+tX_7LY_sfES-0#i*(YV6mwiR{pzLAU4`q+bekFTWHZ6NWcFbDVLEM+H_g^C` zkQK?6$S#pxF1u1zBdeD+$u`SwlXc6s%RVXlg6u1@A=$&SM`e%yKb74Jd=F|1ZaC` z`)D7~4$+2O;Bu{raex3j@ColP1{R*m-Z1Y<41lQ9Ep(I1}D%?p?#lr4()u}MA{76r3@`( z=xV0Bj-geIZKbWJZKORwdxX|Sd!DwF_69@mGW03aWzO}F_ZtYgBg408-=mGB71Acq zE}~sRyOLH#yPCF~_A}Z|v~{!%w9T}ww8v@B(t2pG)ArK-m-Z3u3)+!oej9v)p_3UJ zj*#0o&SmIA#+J}7rd>{p(CTPSw4cy!q}@ilo3@Ge5N#XnY1&J)U9>*he%b-rXSBmF z^N%tMA-6}IOdCc!n|2=ULfSOi#k4DE5n4U1iS|?4jSSt&&~KP-3qzfZeUkP9?G@S{ z+CJI`v_rHZ^ZX+}79nqjpykj;(DG^HXj5n*+GVr~S}kn}?Rwg3+O4!ZY4_0{q;=As zqP;+Sm9~f0Py2xO32n&bej6Nz(8-Cl)Je3{X(MO_v~jelv=D6`t%A0gwuIJ9TTNR_ zyOVZ5?LpdOw5Mn<(q5&#MeC=1Nc)8LwJZEKI1Zt2Ct4$$b~^1WS^;f5Z7OXJZ62+X zwwSh*)=XPNTT8o(c0a9y_84tD?M2#aw6|#Q&_1MnO8eT6{WdrrA-8X2)6SrsMLU-^ zo>oGeL%W<-NvoqRrTv7qhISk6F4`tq2W=Z|JMAUfYqUPvI}8mllyRls2FEaTB0_F! zoJJc?8%?`_Hkme?Rz_PuyNcFGTS04~-9q~n?Oxgz+M~25X*+1I(B7o&qYcmw(FV`= zTjN-S+%`ComO~p(%cosHn?jpSyNtGgR!eK7T~BMF-Aelv?LOKTS|{yE+6%N-XnSb; zXdloH(T0@!ZE!3??r3omEr&LOmQNc;n?eiGE~8b@YH3So*V9(hZl&ExyN~uDt&{c? z?FHJav^}(b+6S~xXhXt&8yts_Hp3A(RR>w(%z)~l{P^8Cv9-0-v-AZ z$eT%MIke%leA)%HDYV(N%V-N|wX{as^|Th+t+Zd!?xSs?b<&=sy+C_~wuiQl_5tk> zZOB5u4UR>~n@MOnv=Ov?+Bn)2T8MTTt%6odTSB{@wwiV;?M~W#va6Cfp z^>H%o4BFYWb7>dSN@y3;E~iCkb+jhhPiQyNZlm2z+eCYawvF~Q?IqeSS|4pc?EvjF z+Tpc+8)PBm_KlNi!)RyI&ZAw(&}4=#VY+gL7BMzXyN-4Pt(A5MZ6ob>v`1)vpgm9f zBkc{^Uuf^s4l*=ivEK&gBjmQhY=*96Y!yRwj9p4wL0d(;nYNC0H*FJb3+++b6SU`O zFVl9>_R#jx-lKg?`3RkS+VQrZgID%#Dob+o%_n`m2TkJ6r?Jx6<)wu`ohwvYB6?PJ>Kv?HQy zKZLyYqn%1SlQxnzhBlryl{Skumv$wsidIKkN?So&MZ1}{j&?U~6KxCaQQ8x<=V&j} zcG33G_R-#>eN6kDcEr_eKZLyYqn%1SlQxnzhBlryl{Skumv$wsidIKkN?So&MZ1}{ zj&?U~6KxCaQQ8x<=V&j}cG33G_R-#>eN6kDc0`QrhmhBPv{Pwk(niw8(8klI(q_@- z(ypXc(duYRX)9=}XgAZ=(e9>gqHUo)N_&F#9PMS=F4`X2KH7Vm+DO_M+IZSj+AP{!+Lg2_S{-dEZ3S%=?Pl6K+TFCx3~goXHrjUD4%$xIZrWbj ze%b-rA==;uztxUH$ZfR~XhUhkXd`I(w6U~e+H_heZ62+Hwuly|EvK!dt)Z=@t*33I zZKiFdZKG|c?V#cMYK3=Ic+6v4Q(xLJ#8axGi@tv8*Mvn2W=;9H*GI%KkWeR5N+^M zwjV-X`_YEdhS5gQ@@Zpf#kA?PQrbLP1#J;6PFqe}Nn1l(OIuIdNZU-?O4~-;PTN7- zN!v}^OWRL7Ks!Vm+{E@n$ZJ2^P}(rs2wFaEEUlO}omNVlN2{PMqQz;;X)9@KXlrTf zX&Y&qXLrcvZ4GTLZ9Q!xZ8L2v zZ5wSnZ3k^9Z8vQ%Z9nY*?GSD7wQN6xy!N9Fr46HvpykuX(u!%*X{EGzvHuyTWA3|RH(T387(MHhnX=7={ zwCS``+B{kXZ4oU_TTWX^TSHq*TTk0a+f3U^+eX_?+dVn@6jlEuzI~%V{fVYiMg}>uDQl zn`v8V+i2TqJ7_y;yJ>rA`)LPghiHSF*?tIl?ME9*8%7&J%cqT{71O5EN@??G6|_aP zI77=AYGJxt7;0neuW9YHhiIL&Cu!ZZ|EBfQ{)g62`+#m{v(! zOlzQBNBbG=CfaSZyJ+{)9-uu;dz|((?IqePv^QyQ)83{1o%R_+$NbE1zifou_B)-S zT*i)~71AcqN@#OvWwbD@nszlq%?!0N-5m^VVC=usI%tp4x@gbSdT6iH`e^Ua25A4J zW>4mm?8!+a-&3GA)NToR&vBpH@VhMhnp{qm|QYXi?fShOTGmW~N)m(A|vPMB74p zl=cMeIoiv#U9>&4eYE#zAJaak9dU!-YR4huj$9|vPNkhm8%Y~O8&8``n?;*TyOLH# ztD`NYt)Q)<-Ar3YyPLL&wuSa6L$5OQHq*V!(BB#R8Ewcazcr3U$Q!R{-=Te%_5)f0 zZ5(Y9Z6@s!+K*|Kw8gXr+I6&_(QcyMM!Sol-!im~>9#YpgRwhlyJ>rA`)LPghiHRa z{1!S2A#cq^8%i5S8$rvbjinVcglQtHxan|2CAIgI@- zEtjFQ8JkBdV5pF>7to3rn#9-=+6;z5jJrVpMQ~&Q8lQlc4S4=X&U)C-rE|Kk&`iakVJ9gEQI<_%*aTjN$6Dk zw+LO?NCl+l6kBl9kg9r8EzvpQpY)x?wIOB*!8mAW||I9&}EY#Ggvzt7E z|8=8p7>PkTL{>stMOq_HRmdFxFrpOJD43S?#4q}W# z)xCarA7nt}VMxEo(~v%qoseFUy^tP}1CVZ!A-L4KM7{~>6!{*cL*!gYyT}yC29ddt zR*_mrw#W)dPquyRH#_Bs{2Eds@*j{Qk?oK|kyjyEB5y;wPO|s)5u`~Z6N5^TNH!!- zWH=;OWDF!zWFq7sW+HP==RkTzDj_{0O^|Mp8zEgH_dq&D{sYn>@+_oXA}*{wDTrN zx5y)q4v}Xe&LYd?dL7dHO(-g5-%T zh2)B?g5-$Y3CR|D2y*ZQ`?#Kk42Zk|=@8xk0SFZ9J|QLkY159Aw44JLApgI zLApdrA)O*&NQX!i(k`+BvO(k~NQ=nbkRp)>A-N)tLwd68Bkh57i@Xi#68RX?DRT7D zsI$n)kam$=$Oe(~AZ;R(A*~{1kQR{|NR!AiNL1t|NVUiYNV&*XNSVlXNRh}NA%!Au zLGncY7m_RTPe_i);m2U)7dZ)%A(9I@h}#!4+MEv=5Sb3?7r7kLC$bpQE3yL8Bhm`# z7WoaNL*zdp?IPWfT&(*|4R=9uMD{_lMGiu;L^6*>Ylxf-$q*S1If#GFzQ$r}Kx8_$ z`b93sR-edK*yT1cPBZy-77!6wbakW7(hAsHgCK?X1` zm|X8b`b9p2^oeAlfAxxd7vhXTrkpX5Zjp;1T_RUPIz?iT4w0Wj+C}bwY!LY!q)ns? z(kikO(ju}C(kyZa(j;;muJx$M>5yuXb0Os-C6F?aD*j$PJJ(kzYbKV0~k9{X3*h7Xk-3nAxMMf% zQv(?gxdxJhXB;NYjgV}SJ0V#jzk_6oJPyeac?oh5#hQ}$Kn6tKgY=7h0qGMt9`~=k zBFBk&z%ISx6$vX2hNS4Sq&|1!&fXOu+(j!s~ z$(B{+$rHIB(t|P4>}xwDB<=hb#90xVG@n2=V6AE7B=qh!k$gzC$aF}# z$O1^2$TCPsZwSL6dokI0eeU)>@(kS>uzNT*u8lo8zT9TP8mg~K(a+5ka8JU zuZ0we+~(vG*#gNDc@EMoqj4XkN#yTNIU>j8pofbLgJg)D2Pwm<+O${*QY2CdDHLgf zWQnYUWQue^GDMz-9F)1>&yXCE15O$lqXwUjUMF%Qq($yQMnIw>lOg3I<&cobGDxqi z>()SeMDB!ii#!1768Qt9O{53XD)MJYi^u^;v&h%Zz_}1P5fT!~g_MY#4=ED42$C&w z1td$P4w5NyJtRZq7D%tGPX8U!F7hO#N#u1%k;va5c_N1m!`VTXGrcYwQYLZ^BqTB! zQX+B%q(~$NDHK@^$rHH;k}L8EBuC^$NVdpcNS4SyA(?Y~F#AZyK?X!h zA^jqYAbldukY15>5a(S!c9((w;pag}x5#slE|Iq&ogyDYIz+O*hqEj)9I`=VBBV{^ zGDxdPJ)}kC21v8WoscGxt&ph54oI~~AEaF5V@R3E@!v=P5jhi5B61<5A1!Cva2{kp z=AAl7i^wk^QIXA%GLbGwNMskJP~-q4SLCQO(W6Apfb`0&cRnO45`q+o)IkbGnju*t zzl3CpY=$@=FPhf)1LPpqh(=z8w8@;i57I6(*NyI0U40_^skV9k;8_gWRX)Ly&~fvog$Y&+C*w0O(H8HQIR_! znIc;tt+IN59?~N6cSyC!kt1;4Wi~$xG9YvAI7q9=Tu6&Z3=$RjB_vN|GbC5!X-JO9 zTaZGT%|CF;5jp%U9J|OVkY1VJM?tbgrb2p-w9oqjNViA}r0H-w%{`E)$fJ-9kzPpW z*X&&TAsr&0L7GI4KN}^BoB=5mDS+gOOobdAV(;rR$bd*KBv;0$m5>~fUqP}(wnDN* zUW8xDUgXD;;hGdIubkfK;|9eQc zjN!W=T_PVsIz^7jLywX%d>Eu(WFjO}M(p{JZW-&AL7XSf=3K0Yl!-j*lq2#IBtzt{ z5a;>3DQEB~^eB-tAR9y`LE1#Fg0zaPhO~&>2XWpNnSDJ8iHh_=szpA7l#6_4G>%kc z3?w9S38dwCdtY%#ROB{D=dpI0O^^$ji6(EiD`5!Myb0K9SOCTYU zJ0T?^U6AGz>=t_+5*7In(*F%R%`xYp4Mp-GA(7dT5|IigSC(DQ3P`8))!QL$(lfU} zT1B3Lw2175bjhmVura8!%)%!?`b545X%{&UvO%N-QY~^Bq+H}GNUq4WkQ|YlA$>CA z-w){#>4J2L?1prRd_F4Wbq(tOnNRh~q=i^!y847XURGDk}Y)Gs0mSRY=$Q6(x=^JrKp~#JpJdxi( zaz(Z|xui$^(a9z9Hz$|K;ITOGBBwyIMG7EUA~PYGA`wW2$O_0o8EJ2a42V1k=@;pS z^ojf#(kt=_q(|giKg297G7{1yQUYlgi9niU>}`TXMScOP7P$vfF0vI;Ch{yKB(fV) zBJwVzNMz6jID;bJgkTw9^{8A(uE-A|IU=(m*&+)d7{;t}mO#)LmaK+2Kc@HI1<59NiIT_M~N}Ii(18J3!et0p@**TxF0?6Q(M2~vHWU7tKixyW=#nMegBByt_3MC5izkw^!m zP~>??p2%KEkBkZbfOLywUWlvmD7((5K-%QW9Rq0#= zNSBNfuQ_Q%-huSU)jg;f$1ZXPq(fvpBunHj_Cd--zJP>8vL|Eo6Bz|561fOcC{h8*6S)?WD{>noN8|xWw#YM(ERj8sOp%Wv z86sbwf~!*QG>1b5L?%M|Mdm^JMB5)!!tQX+B{q)22XBv0fXNETZE_OZz+sX%*>p$`Ltw4*H?U zC`gIOk0I64sw*KOX`j0xU2^POopR(To`V#L{0WjL^1hQxj_dG?kxS%MNVdor$Uzy$ zE`s!n%!hP~G(pNmZgSE{U;PcFixO{$)kf-YA`(A28HZ&+>M9mTZHZTe>lS*D;iOp{ zuC1-F5Q!|Vh%U2bVJs5yw{d2XYa90L{wWyWUA}yIMpbRNa(-3q{Mvf_f9j0+Kbm}T z>9pCi=8wV~mh^F>{zY7V+PKmGqRf0Nt}42KLYx*hPWvp&+n>0zLYIO5J~f9(_UJ=<$~vdIM47 z{1pT$SDpwFbze=h&Gu#k~)r3u#sgEuT)igzF zO;sCfYo~-O*oP)XY9kF1r-L&|qHEfhKmzQ2CE((4U6m=OCLUgZKgG&xuSv1+(us`= z7e?6W9nqRhuB?hosSQ{8dHii2;^aEJ$jNm)mk8QdXXuI%GeNA11BuNOD)7lbRW(k7i! zQ&$t`7&4`{(b}l5Vzs6q%xYtUoz4lHh**7HgWWNv#2VubW(=Jgsgq7JHPSG#z8<4p z9g6@Z4NCU`VhSe820OvDx`s$qBxci$nyTssa~#N(5OV;Knc=9p zz};=hf%rgr_i70wa#2zydM>g{N*}Gu7CWVyYt_;E+GSPsb)^k8(Wtph5K-C?tEuBP z0JtR5G`Vhx?8qgl5C~^Pj@G4MMx+X6oVGY@E?jqyGa^eOwV`k(ZM1XYmBwBNRw$nrtK3MG`l&d88vm0KxcAjZcU|GRXFvY z7>;vLb{0_6DkF6bH4V#Hc^D>*SWOcT*h%J+sTs&=C6lYFreZeNOt>)2(Cch4BR0%1 z>TK9CQ)4yOkd4>`1XDI#;Sbbv^ClP<}GW+z!8stc(+ys?2;MRz>H^llT=#p5tE{=tPCiGR)&a1EIB0e!A zmewriS@MQd`^2RlWkdDR2@N=#Tq*+G)615qkJ{Zkgqbvu0}{vrvGZIUj-xY%>f@%r zTEsJCjK^w{awnBCzou5ZAK-|RoJ$$geqglkP?V#|uFiO0<Eh>gcYrUglCMk7fjnakBr zG}@d&Qc|6)B|Z}+)oFBcokl0sX|y@Neo5vsHIh{E{G>Wr*C_KP)ycX-nJB4F*7fNp zGMA^3q&k_4lZi~9CneR%y0K=Wq&iu**GS|nk^NrjZQ8X#w7nN+(~fqQ&Q@+&NuxAu z+l4!uf!ucCh?c?=%j;2-tnw2w+7>R2?fn?E_eQn%Y0xBf=G9tzG)T;;j&LVGio_Zs zO{UBH+bqO`UZO=M}bKGu+t{1lT<0w%;F=#CAIu}J*l zNW8{byZha0LM#?uHn}#k*s_T=b(LOvKUw0zRbuI69>o;np%9V<`l+p^uXg2A1L|N_UkdpeLjTe1E_DQGifmf4~7y4!=`c`6?sQS^B)cc4#mA!OHnE=%3u`| zcj37Iw4N&@ZUzHICd@p#^ETNtoJSLQWfEQzsnl9eiwBzCU3>>h_g1L07cPuPcrLLX zbq^u1=#16W$7&j||N3Kb{i%jY;MasIKG156p6rer_2DIC2;3P{x1KTziu zIWf9M>*JBYonneO4yVptWV(DQ;v`>0ct@ie8Fu}qV5SPgKR(s_ky zax>nGo2e(jsY|zR_3cBl7GGLS>bhIYFioDi?$&}#lIL^D5a85xHxrm9Pt)#ZI@9E4 zMo~{)cWaK5JKAY=3+rdp+t0AQ2y_0C*ey3Xt?Q-r zjWP35ipiw}ug)nd;>DQmi2^*6671&kCOKXJ$YMvEo4`PX?8MGt_B1UX=m*o{+6VBc z_NqXVRLx-BZU;N7b=#dLYIZaj;kAf)I!j<9_`-pd5KLl*W-o~~DEksD#TtmcBxaB# z1b1WxMn8#ZXiv;=MF^H+hA2NtQYr2{o_rXtTQ}EU39ei3jPbET;H3U2Tw8;`ttE*Q zw_)y;X=Zb~FO4$YPndJLDvwr}*RX2J#Aaf)6I+i$gi}_-YB@=qvKrPTYbQ2ytZK?S zo6aMN%>^o)vKrQWX(uk2WxgX&H@!yP%w6i{Mk44b52;|5^_)GBBh4<-)Rc=ADJ6Bc zGS`nJPTi2!!x~AQvVHCQU8~dEpF&6>>5ks|@WD=>?ncG>q$}~MPjP)+C2pkZ>%z4c zV+CMtg%h#vy9dmM{%dxBR&*8;?juqAHJVZR0>a@bSguUMUl$deqP)1pkp=Qigu`xq zlHEH@=PsYssjX?i9)hJ5S4S$YvfR15wmuxG#ab%)^CS1L7IK76e5K?ed!~h^-&<&% zt;8Ojhrle+UHe$|Vh3?!>;n%(S;uZ-+y@9O-d+D#o0Vx#@?<;}NxXDQaf%aPP2kmI zG~%6UIj2_b?dtj`#EQnvne&*&+;pjTFlDcHqx|mZt{mVQW%}WTH?3 zzB3I^^qi4Mul#9o>vZ`^(LLQOQ~IWz-FfqBebbR5DsiV3Puy1d#inUTVe7ckwIKVN z*M6hSS8Ybvoje+yln4?`l%JFc5=>-$MCX+aVInz6qj*LFoF=*1y{1oB{^-=@TdQ6v z$7+M1r>>A$2}}B<2jA|(a+=kAnm%3qt&b-qgIPNVICVAC%=y#OP5OvP-Ee77r2QSF zR4LthVATB4Ne>Z5%`Zq{=O)?c`J<97Kcyw_10Pf;<;6$aDJ*#(1u5!`56DyGE%2{s zXFcJzhdW;QZcW+F>Ldl@`KQF-4MF08XTT?60aJyq7vmUgobRBWZxa2|Bo5ya#_IVM zsvR}04xe=M$u}N+M8gdg)y}h*M)N2pz|*jLi{bUOzbk7v;;QZiy7es5gr<{xQegTgAv}%V4}!{_~h0MOUUI#;@bY>Un>O@q&TywPb*&4Bzo6Wd^W}J-<;Pm$?84P zai`{qwe|RXw+0>AeC&ziNHMQFN9X1lJt_69?M3IOiFVeZ&dWu=$?Xv`Py;(^N}|W7 z$}}x~)QnV7-cBafEUB@+FiG5M&1sCpmLyh$_Ew6QnRdv2m@gIOC z$57mRk)+*oQIoz;*>5R>-N}hdD$_rtl#y8GM$7|d+_9u;Dd(kTa2?B<;%auE_~P0? z=WM{&pn;1$811c>zFI0XG?-xncbZi?s@b)c9Hvr7mw2Ngd8zeSPiox9I`4oBd@`@*NG1Sa9c zrQaHm{rSwVOkx3SdT|Kt^IABbRE1U+2_ zwoD*{xjX|AJcOX9+XIKsKnC-$H4tGQ=q5#^+lRTm31&&#wC4F;FiX0M`L{maFD1;y zq*}jJK1a5QH8sYjwkSn1Hn2%%4#F6l)uQCm%+SST)~I1Dxo+0m7?UiyZq}`_NtRqU z>n(t@7{Rhso_8f?EM2d-QwZNUC$5K(b;C{ILAzqz>Jpg5dt01%NE2f%G3k4Ivxv{+ zg0pF20(O_KftwdArZEuZmCq|)A9N+fT7#!Ef4lW%W^-+(h%|i#uSDI9{uq@Q)3Eq; z2UF`QQc?sPOk=F)DM=A&vRG$Kvmhc(7V8XZ7W-+1_vx2ayb+e0I3dOLjm`&Uej@}M z(>fDLQC1a$alBxXBDnWp3@@If2$MyIf4jTuAmzj`Q>rs+TXI6{JTjsEbdYE@uWiXC zTIY-j?bBu71_ka~?5L2{DTB_PN`!SNK~J5*IwZ+px2Qx|hZgj}9zsc#aU_^1sRD8` z-Ccx|cIpUA@@_MnUml1B-iJG*t4E=lXrvbRD1OgRJflkdUNJkb{h5uIi#u`0%0O8h zAkFV5C)Q0?F8tDc;wgGyOg9<0cZ|E~ipRj_{!**H%t-titCCj&jvK2c|4@-VP;&BD zF@Xf8b)7-gYuZRfjq^uN=q)(n?T#?RthM=7O0eluR?@G4|2v@^AG`=}t#9sOg3d+{ zM6iK_o-RYWJ)1TOlw{UhNfBxGVQ#ZDhjphCh)8o3);AD=3~7$SdN~ltknSk#i&nFk z)_N0tUKp43I25*zN7|UvVXbPPqlv}_>%qG z&jeDJV10p=D#4euz?bE=pD?5>{>zR)o;&~n?(ZI)R!C6CQ88Zz8n0ppC<86RbCpQC|B)9sexEVnL zoVxVDowNUYIois;dd=;1_4i2OiGj-^3Cm`0NHt)5JRslN5qyRsl56(f##= zv#Qb;e#!4_TtR8}iyU_m>8r~Y3@zumR zkD9FC$YDgFs&MNDRtmD*8jk%8Ch4@eTOI!xro|IE{Y`#l;lrhM_%=qf$OF#N_UIdk zu|^e%k?}4NlP*tzJ;F#H3@v8#bUK_{+d$Jf9nO7xhBWI-Dop0i?JkWC9OK0tsCllLYd+VB#P9F*`{ViZ;icL0e$SklIn&OZ zvuAgE3@UKMI2?{x`oFx~;aE$r>R+n<`~Uk-E(O=Retj*+$JM^NVQp0AS2y$>J1#q6 zYSxs8vxZMf7&(0M2M5jIBu#6qH20>4_t5l2P(zy9Tp zbvly%C9tr66-NRU?f?4M%0GIBp8|U&&@rn5sbi<(sQ7P~!!fMYsNw$MBwtnakxFsu z&}$OCR`{3da1^%6;)n~|P`wPJ@H^=Bu1fJzZ)?Lls`huhN(ZK@aq2%0Fjb{DmDAI8 zL3QG|j^briikI4I2FJ_2mcmurHKf;qe~C9DJDWlruMVa^XZ{(ky!w}`@*T(N={BLZ zaok7&@BA}6?G44oBzPQn&(hmHWV?_g$s*I^TACdlkd6Rb^aA@l?C*|Ce}eQdBg@74_$c zN$=FZ#B1AD#c))m9_~0};{Eb3@$R@&$II1!sROEd`SoAowM*79W~#sOCf-%;lB$lk zJ(q8!N=J2Q)p5@KOL{5EI%8eRWRu>7e~Wi}Yf8a!jrx;n;$5s9PX+&f|Nh^vfqp*g zq|fRR@3Zd%X#5Pr^6R?x<@-5K}UX}zvepC{NabM ziE)%wU#GfYcra~;H`b4#c%GkCygF8VcscW@;GtEcRXSBisdNGtqjKCt5eoHLMd4+2 zqWSdW3v5o1A|z9ceQJLy#&j;m42AK&kn>^1R$5|$*ZMj$Rt)lRA zbz^An=0pXQcCwvzN&263&QnEdLq#fa=p*Sw#!COQpdgt3=l>PmsiRXwN4VDi6@99& zB>jwvQj&f`4X$ANiP63w6*zsrFR1=cjic=_l|e-+yMg2xUx)N?PBl`hBxULA!c_fK zx?T5Hag>+R#_5sjh|_5^Jxs6LD2qb1w=dXZ8@WPXuwUsJpZmk=6nMrLm~+OFvyyh( zO1p7|*Iai!MGv2(8Y#PnLf+LO`Gq-21g{WqB@mp`S#6@N{d%h?Jl3O{yTlp(!KrJC zf+p$RRZBKA`}-`lOTO8qE$u?sm9RulJ%KrYI&!Wf)l|nwUrR^u0UtrX59kQi`W}y` z(DV^>oK#oT*BTSv!RXbill7|atyBAymxlYgb&q<6^P{NCx^h){K2rNw(bNqZ{8e7A zI#^*n>Y|OP3kIFl=n#A%PZfHhfNZMm@C&gzr5YyZpy%xODgi2u>u0qU^z5e>Y76?C zVQr(`j$Es{)py!{Lw~6jT~yb!MS-&@v!JtxwKIHzDm>_Xhmg9|ROx{URO0Q!E7baz z6=8Qp7(w8((l1gaU37oy7}Kf8Ab)jTA(8)5T__z)?dTh+`o=|HR4K(hLz`!Uj!tTk zg}C!LE_IH=1{5VsQNom*`_sZsHK-c=)jHLn`t>(2dq9N*Q-=gD)|y^b=Tqj>Dz}u9 z$S-t1Q|Mk(Sk^{4O6Ul^9toyStPuTLt#Xf4jPBGanGxQ{7x=~J3r$Q+wR$Ab43rd1 zPY4u7`JVR`9f?sbww!9s90&DJwTa;pSwyL-0s6czRBsDys|pCK!&cFX!e6RARa#WC ztD~Lqg&IezvZ;t2bQUpJ$Kj;njjo9t&iJCXsT_hmj!+*^``4xY12kSKW{30y_oA`o z<>gl=Mtha|N>m|LDd|Z1ouQ7U<|5zV3SB`>!@+c#ii^T0-Bf(){HEfaYbn)ouC;=H z{3{+arj`5pJe{`t`cy|sVMQ~hpkMz7R4pf|T2AnV+Scr*YV>Tm=X~cXv{9@wnV`j8 zT}T8wX&>_Zk)h%p@LA4=2c3@ceKc4uLv1CroST}fB3|&*MC_?gaeN`y`xZt0jyCR4 zM;h3iGzQ7gt^kiSK zw$J*Swxe>IxYtx&NWGxGFPKKbR@b<#Y676s0~MO8<&giUvc|qp-^8T!l)`X+c@$Mg z6>1tpr)IjGVeJpstxeSstY5a4n}usT=H_tXs065EqMT5NMD3teqdx3CqTYwvA?j(UB2hD;)`=Pl^?|4^P%A|x zLA@f%1@)Y$b7xJt7K_>kwLsKoP;*7S1~pyO0;nmXCP0l9)eCBvsJ2i8MKy%#C90ej z=(-AhqE0~FBWgR;oub}@Y9(qZR5MXCpqhw!5XvK}GgM7cH$%mWstNVyz36ghe>0{3 zUDS6_r$udm`bpHQP=`h3L+uka9;#GSPpGd%wS+1W^%`9s>013lR9C3=qGF&{i+cXF z+3{UbbD-W5H3I5oQ8z<9Cu%cY%jrTa5j76V5_KKad{I}*%#Jfe9f6u4>c3DUL_GoZ zps4;(14VU&>MQC7s9vI~Ky?>YMi;UAfbJDl3e{QECs22bdJ*bQQIA0-iy8%Wi>M5! zL{Y7vZWL7)s)4985mQu`sBfX7L_Gm@p(}dy6sS|8`a=CEsy)lA5i&}Tgs0654qJBSY$~9HgL8#H9zJMAe>J6x#qAVz%sG(3^QC*RM5+LDdzt0IHU#!BEvir9wGG{r8Y5#Kko9)u*7&ikc1; z5j6zrm?$sQK~c@1_KK*cl3^t4U7V0xm>!Cgo^$OHGqJ}`dDas4=il|yp zFN!)(7hJj!&xqO&RUqnfsGz77P?o62p&l1C5h_>I{ZKPS)raznxT{@3qMm{pENVJbKT&VcJ$#*fPf;_W?iDo@sawUH)CEyfq0Wlx2X$K1T~J|B*F&8U6$|yFsK{PZ`a`02L47M~HPmiV&qHk%H3w?F zs1Z=_i@F!;O;IhOUKCXuszB6*J*KGnq7FgL5>*J5BkEYCqJSqTYnMUDSN27NQ=4x=B<^s02~9p&E+1u-g>EBkB;8 zTT~%bby06Z#fo|Ys=PD0?-Zy@qWVJpPgHxTUqxL96%iE!bzD?WQC~sr7xfX; zUQy3MeI;r()HYGWp*D%?3iY|D7Eqsxiii40)bC%JqP{EYAk^EUzJPj7)EiJQiL#)c z6*U=ZiKsqMK~e3X7KmyBl`AS5YL=*J4Ka1wG#C%R5MXapb|t)gK8*hFw`}o8bEnO{k6lCtB$B+Q1PNlpsI@+ z0u?LD3-zZL17_$pllPXWQRhlbTKh!pgW4_XGpKE%UV|zWwE$|J zs0mOXis}XRj;OX!D?~MfS|+M|t10SIQ751liP{d8C+a<@Iii+A<%pUAHCfbyP-8@O zhI&ZU%~1VC)r9IL>g*O>ddrzmO!u($UHoFXKacy{Q|TJRzQ27Jy=6q})N42my>7Ze z(mRSZeiKn!p&E#afvP8JESAJ|ME!*YYP_gpP_;#sK-oLK3uW&Z1!c#27VFHKlGb3T zI8hCts)`C>1!>0`0Tm;$KEUaXXUe^7DGiuWka13)eGvl zs5fzS^`5A?XxfiN)j*@I5p@PFv|iM=P(`BF zLv0iF3e;{==Two3>kJbsN+%*|A<_bqVcw zRAL>jtcsy3l&i7|LH#IcWmQ)Fq3q7ov9h`W%I-{6DyuS_Tt*kzQIwa+Kv9fv$ z%I>S9Dys~r1Cmy&%Bn7u-P#v%zO~g4Q1(#RR9U?Z^_}FnsIqzl>RXBRKxNed%I=s6 zm6el~9zN?kS1Y5Go|qh?tm%~RTs)m zYvC6rt$@Uu1T|Y!Z>VXa?u42m>d%cPt#P7`LX8l$6>5m6RZ#szO@-RClOGqLQJm73G0yAnNjZlYD(qKSFs#ZHB5V>OW9*L@kDj7nKd=7Bv9MC8`rt zO;I;ORTotis+y=@)|t{*74Mq+ouk}83rnnBSd7}SRzv+Lv1UMhFX}<41EM-Z?Gtq~l&L5>`F)JD(_V@7 zAxRVCYZ#Uh-KE_9&j!3NgpzP*o z4Q1DA1E}9*$G>oY<5y8TDy!$9PD`v&P-UVrpzIuX;Udi5QLf7zGK`WC>n};`cU-F3 zlfZhu9T4(V7=KXqz&)|q)R#Ttw?oyI9G64Y64f2bo?4Tk>^a#3WlyaQ>F$IcY4$u? zj&r^}6K#gFr=59FcIn?LGHKb#_rV#(KJT=HYAB`u7PE(~)Z==NvI2=KK{t;XsZ`0tGQ5iS|g$C z9etJ6t(8?BC_Ak`KCD!T?<=dK%IYmBJFQ@4H5JM(YQM_ruFC3qC_DMs$|~}KDg8yM z)m>1RM6G6JIbWm=x#FU83O!Z)wXgh$`}$n-FbWU zt-INY$s1i1!53;WiBc*{kmq%S55@TcC2{J3*aUhYIGp_xJtb>x&9LYZ=7T;f5*|;2 zo_zn#7xg=pZaD3o8Ols-NkN$*%04{kC_Qjb8^OMbE$M>-?!1U9$l?z4uz88IIZfHL zL0gSv%OzXoG%s`LS=l;nwgMNU-FeNZ@FmVKC2*qz#<=rd(1CAD;0v-9l&v|kL##V5 zLGLieDDPU8wf=BRiL;07agW&=R~Ub@!;xk=TamC_iAtAJJC0S)Jlpq6^Y^$HUepJ8 zx7pF1cTV#yh69&Pos>B1ii&pUpVG1Zq;rS*SK|EDY?a-ip7XZ3#Cb&G?2)a_vb9FG zR?60kvb9LI=E~Mo*%~2RePyew*`mjyQ?{2l)%lJKc$3+R9R0hz9Q~)niTEYXb`td# zL~TS5f$RGWCC)nJRE-BNQ`@=7mpHG`nT-{-PDiay7**o@jSQ0dIm``vB!^FLPNY-P5oE&Y#D z#RM)#>yG244@p?AcnVF~u8!-KqR3|YFbX}dp1)_tBPGu2NMlNg^D>={`Cy~`<4c@B z3sOIyO_`TCOO1+5*Z4W1`d8w7-)yC9r@YG+mpIo;(7R?U(n#;~f&>I*YYw8+rzqaO zX2wbO1v*Wpt38K1Z$D*1&*OhhSDSju*ru4CjBmNF$$ZM*%q``m!6At)L*1i%MMq=8 zCur1XSetcx)stzuNhzl}q>q``tjp_SXbvm))Sx`nWA#>XxWO@+T0&FlGaF~aZ_xfq z)5jAYbnl}-m5qUZ1%5fmPM}XIezy0X8#1{rr=wcYy8)Qw_z@O*YNGk8^GatHZnQz8BVll{IuDutJy5+>$o-p!fa>2@nwkC`eY38gGW6Ft=0oUj zZsrN*9-FH^b02UM<__SSnVW)Bm}`JLFrP>N?ZO-|_gGxrOt@M5y6$H_1RlWL6#Ni# z4e)U0ooKI7%&Wm;n3sXaF$cgCm?wZIGxq^cW$pmZW^M}3VXgt5!F+zTY0pQQ4}s?} z7lR*TUIm`dycC?rJPZ6d^GLA8+#S4#xixq(b0hE)<{0o&<}&o}XPNhcpJ(0xUdH@7 z_+{oG_*Ld?@CxR^;5V7OfZt|r0e**B#@oBhSIolR^*-})X3JTJ23n>5-&Vbi`H%kJ zyq)R)ZD@n$_f_bh9hqam>CCIqU%N6d19xZ62K$%?gZnVI!2CDB?2rEb5OW{!!^|<@ z9OmPg52rIv06)fj-dw}Fo?zYpUcvl2_(SF(_!H*U7(eTnmx4DjUqOHVn)x{R5OWvs zLFN|VBh0JOp2wMIq5Xbh?vD8~%-kA$mbnr53UilLW<34DJecRdkn4XY9DIiP%3CJ> zY3AeL2=g+qDNg`=jKe#Ce`Ib7HsO2UH0l4$;j6*FFfRrF#+(f{T5Fux9N!yE*+X3hqi^aq2JIlK$F6>|<}`7 zEts#oZ1TT_`8e1d-%jw&9KITS6Z0}~B69$IBl84i%Q=WPDvgKcG`pJf!K?b&6_@H3 zdOVo+>x22##p%gI~|{n?DaY;ZJ(<36CPocmW7ggZFA1N#5pCLHsl>3vKEzxM9?854 zJeGMWcoOr@*G+k|nOB3em|Ltc;fCviAK~yTubFVe$H7xg`d9I4@DvVTb`=M}IUK&~ zC6oRP=HN2JvzfEOk1-Dh2bjBn7cjQ~KfzoV9AdumqDg-V^KtNE<_+LQ%&&tj<{|IzlX!nw|KG%11M`Q; zuLkCiWDehJZgIQXGA{+2@D^aRzpTGfIDTEs7wwpjzh~Ot%iJB@-GpO4>CIdW9>}~3 zJdAlMcs%ng@I>bB;3>?Fz>hG?^>h~VUY!384+cMK!m)lVVBT;Q2f;6McpuDPFEXFU zeDn%)0Q@HN1n_F+E?4n+tRL2L_#yB{X1U(qz%1(r!?J$(jKgQ)d~aCR1D|vFNX(Zz znCrf3+V2qal~)Y^z`P9e$r0uN_!s6FaD;gk=7*n|mx50)cLyJ1ZVmpCxeVjq-gRp39Fmx9ZgXMv+`((~6ya4d6oa8>5k;2O-0z_plTz;&3*o-yUE&%BqJt`}&d z()=1lvxk{qb9sKf(%Q_gacy+@iZNf*VvYgVG~t*J>N2mw_-oAE2YfyAag4X7%sat1 zFs}yR%-jOwsTp%?jGshixj$y&%X+vqhs%2SE@rtNOgHIaymVu3f%V0G%#E-_P!66PAVQvIo&m05(oVg71T@mwM@D}C`;2q4bgTG-8f)6uigHJIJ247(A z0*-F0$6E_-z`@R!W3nJwpyv{7k( zo=0n5Fz&kix z))(8EuVB9XhIub|5A!VW_slynpB`q8!S&Tqvp=q+!zWpDw%+KeUWq$sP`8?*kDE$R?&O=})b1}FY^C~dHG5K>};Y-0Lyv9>z|0*1Qeu?2|=0o6e&c7IZg*gbm z$eax}^)(oLmczS%P5aaUoAx}9@nYKh5cs&sAN-@qA8h)QjMx1fF5~rElRx5{@EYJl z9KI9d(e%e4*o1ch|G@E^f=&D`i%tC<;qVq<)1T{t&GB3bnfNC-{&BE5{+-~TIeaxZ z!n_QO@c{mX!zX}EefI(X#^D{n=>K3dK5BqXe?Py-l-G=pL*NUXUNPA8hgD$HUzUPR zf1U+4<7Ff`miu3Ka2#`Ma82e$;CSX3a2@8dpee8VLP(k)_kuml8^HCMUk5i}4uTsp zXM-Ct4+b}3?gCC=ZUJt}To>Go`N|Wze3tVmtKxjm-raaGlQO|I%wVd^6gL4Zm2wew%uHVh(>4aAAU-iN)KL$pSN} zdlrbA0QI=2WGFkNQ5~V|SeNp7M0@-a>oAmE`eG=% z(N;o@ksU)&BSd9EJt(R_)Id=kq56ut0ZRQ=0Dp?O3RHJdWqGFb_lhco>MZILsJlhI z2-RNHW2`LaWZDQiN6}DLLvb8Eu14ein-=y^{Fb+AC>ERbjc2#00;uhx@-YQ$6*U5? zSk&9*TEtT*Y9iEHQAK9O@A*j7T(ju+yeq0Z)CZy#nx(zxLs63|tKOB>7Z}5KjxRyk zInIN6Lz172F}zGvQ>dp!HQ-@wIq!K~Cc}wa)ns^(>WF6BEUNu%j;`KX^30Z9L$rrh zb487Z$`y4d)O=C(pz=f=U0}`}+i1hg$-jT8iqTyc!=2ZJJ3_3>?g-U*n>xZ%X5s0% zS5lk~T)ug_i~1TZ)l1X@sDYyDn)_y+VWKXg!bgev0V>*!F|!G(uf+Pp+{5w= z5Oo4mtDWO^sG$<86=skAqJngq)o0Ngsn*hhO&ZZLr{^D%QPYKE>PE7w;d4|&XGv%T zjyXfrljw?eIfp=vlvvH7hKRZjm1P$l_X$0AW#yt8?6gj!ciOeQr?Pqo%C4+>sH~n+ zh-=MC+%rhjVw?i(9kHJA*v(VGRT}cxu_kff40-GW>W1#>%~cieSxz%WIz6_VcVDcY zt{!P*_tF4wQ!m|&ik~ZL9@KPE7f_`p5!w;=4Lsu|Rv}`I67?q32vIMhEgls03hK{R zPow@Gl34efD=CkiRx7Bn66*?Td5WkbP<~Ncpk|7C2P#X{bTm)4s4va(*OMdaL#Sz@ zo`sqr3hQu>o#QaXnkBK)p=OJ^1>Yv5Ptn%C3p!3(fRsA7cwle0J&M zq3jOuJNlbl`h!r9N>RUnvP=I4lwJA*IBCz59XCSF5!D)}V>|fj2b69K3G|GhO!6D&6QP6s0otR+1Vy} zTYU#*AJ7J z>~a@x(t5=jCmtNl!Zy6hp4-udW$N< z@PAO$HmD(@dO!^qwFi^LI8o0*%@s8p>IqT9p@O0on41BfMWRMRJtb-zhW~O=|Al%} zRCTD=ME!;t;SEuKzB~?jY}Fp88(USujPSbbScZZ9A5o=HABg&WzUjMmtm=;$WvBHU z#?(u);~P-Vin5@d6m>V0C8{aZd{O%`re=%!ys{b(WjETrP`R>WVr3NtW#@Pp11L{+ zl<{e+=Migx#FFv3P*iV>y!LwJIXBYDGF@GUEaVSgucsd%!fl7myiFr-HcqJ}qSj$( zY!WpM<6x^O+|ToTDN6m0nO?8iDullKmBh+|+9s+$)J{=y{WHF~98uD2?y4d|$L zS_M$MCHWkveWEs@J0B4BDNch2MZJL@Ztv*Dnp2Q zpbklnc~IYr8V7Y))P0pzYp5S3R)fmwuX(0vk4UUzl~oDUQHk{~)G<*@piYRIR#^>( zIw`SIE32EJev(+#E34mVrK>yU&l2k!sIaJYP^UyKudD)4WfE&_Wz_@f7m0OyWpxeI zX^HhGX1!lU9fdk0YAe(qqEClc3s&x*aM*lpCtQsK!{63=(w~ml1ZX`!HbpNUUPC zS5HwZSy|2}X(Q;ll@3N-U#Ib%`O#7K5l`f8I^tNY!X}9F;^a0-RI|#e5l&l=NURTV zcz#jOKur_%C{&K9S8#HhEh-Oco~R|b2n&e18&$PP)N!2emWWyj^`xkDsK0;J@2gNv z+yXU6V*P;=(tJ^yp)64^Ks_&NF4WtiMne5pln?4dQMW>UBC0yt{xea(VFoM`^$pZ^ zQR|>~idqg;Dk=cAPt;1xM@K}>fjT7WBg`}hL_G)fov6*2e7+I&^$at}e-PCQo!L(Q zP>zXpOkx#c!ib1^8R~?n@lZdDdUA?M%ieJ!)Gre2!^tMrZ=&vFWjRkzQa9N5QJ+^U z+f_V=oVeB=QKxuY^&<=nTe+diB-S?=1wV^=4eGe4xlkr6O1?YPQHfQGv2s|{O|0ns zI@$<-QB+<|x20=X7xe=r^gC(=Ddje(&x_*y#!#mp>4T!`JxA-3zD-(#`iFkAv0)>6 zv(fo^dAYSU=yaKyXk1r6g#GuSXuZ&J{Y4v8$hS-(U6+_~9Ims>_aXcg^JH@buAi7& zfsZmDHnXJb2j;EdZ<&{a_b@L2?`FOm{3UZU@OI`Z;BCymnyJRMnRyL(BlB|bI_3r7 z)y$K@?=e3BUdenn_;u!H;Fpx+__*rn7=cVy6Xb-V(>ZU$>1{P2f#;}n}H88 zR{BJ3G=UJ`R)oaF9$!)yZ}6(c_=v7 z#5X5W*JI3Mz_Xd}08eMW4xG*Ww>j&&9$|hDJehd`cmnfe@EGO?z{8pE1`lCwb`{sU ziZ9IMQ>n|u-wJ+^_kRyOl=)d^>k^$i9!1y)4tv5Z7hJ=bdxOoscY#e_H-ZOo{OaHV z%;%w=Yyv*j|V@@yvM9%TqBv+gH8Ef1e^TlgL|6vz!}WF z!JV1!0(WG-5!{x!I`}r`b7mpsYQek%d^2+vIDxqv_&Vleep6pf%>Lj;%pZXpGQS9} z%RC?KVjd5!!rU9|V7?3d_W?b(-3Y$STpj#7^C`35bN!Eb5BMDOdhi+M72q=FC%`9} zM}U7|?gsvr`F8ME%niX?n4`f(%m>Y-sq1s*P2hFR?|?sHehT~{^P}MRm`8x$VeSTg zoB4L|8_W&CE11vmb#ch`D)T|`i_G1on)ZH%`F8LU=7!)!%+cV5%%{xC+7)2l1AdHo zJ@`@P72s*iPkpHIT}2Sd5>AvyM{8a2M=Lh0UpHs1b6^*7I+}@ zAh3zw8Tc7z*Mo0mUIA{&`~>(G<}7e?=0V__nLC4< zG2a4CWUd2l%6xg8Dc=pu$G{29JHXd5e*|vA{35s!^L%ha=JDVL%)P-p~;@E^=ifzL2M3Jx=m03T)U20p-iJ9rOsL-20q z%MY9KZD-yAE@u7+{2B8g@F&cj!S6HQ0)CHq#2AzQD&}tB|1dWMzs-Dkw2A*Z^Fi>d z%8{bevtV_@F3>u-~r6%Mws&UV?GG(!@LRHgLwql$J`CvnfZ2bC+3FW zJDH=ww=1O9{g@`EP5bIixUzcTLtN0>hX zpJaX!e1v&E_mUpMi=Z!^b& zO?pGHS~2n8!0N)J|1{2yFY*2z!RB}p!RB~k!6tkN&R&R*+1c=hFWAanfIC^-qtJ`crAaSL<)n*4NSoUD>Wh-85lN!b-6r zb28F;pSqCxPTxB}P@m>F5Wa0qc{x2XnWxjHcXX1uFP*2)gy_EXhyHMgGmMq~|CrZ; z&oY02wl`dG74JuRO!%wdUwHqE6U|79nDo(>KQS)^pJv{Ml^XH~oANymHsy~)+g{=Q z!)V)!%%xy;;8dT5SSgtNW`IrkrgC4foX^onRQ32NJ)}*~hP9!3p=ZPV!>EnEq@;8m z?&marp{SaFVEz((jQJ(-&&;#Irj9b1EoW=mP>g!H+Zd0zc0@47IYHxgGk1;pefE zd6~oGsVnGKe~I}bO_{`&^GDiHW98wM>eCC4P_flWDd3};OOf?nrW89c5}x2xJ_0Xh zmh0sZb2o%9V!j1jzYofPjfd>^rQg}1%cFrB|d76%LjQh7j z`bzjQ?%PWJ51mVM$GfH}o90d`AsVtt{n@pi&Lz6?57JT4_rUw-oO9Ri)Q=)kJj%kI zM7fb_dlxIp&z(f2pktt~RHyHn&7=J(PfFf-4JUtFVNQZdBwk^Dc13dZSA?h1_gUW;=hffQO{L#M|8GC%br1Sl{n-|tPG5zu4(@&G==ADRud3f% zPhSKbNv9O-`t$aaZ_J*N7JN6cR3GKeb?xZY+LvZ+>0x~tzBz~5g1#_2RW(vFePMP2 zg;I)J&%5KB3{r`&Rl?!Oy+?g@f2!J~M;U3;$D6#DO4AW;SX~8X$AyQtaOnQ~U$X0C z>Cd|8glI>2$82DX!VL)sE8_J$n&AXAsSuJd>iWs#~-6pk`QY@+snh9^P>^tHjc5<#E4fJ)Sw?SP|+Gu zH2OmC$c9DH75lUPK4M>Rp#H%Ek7~3>>G9C@s_GQHnH}W_Z=;H%fup{{xPX7p$Gyh6 znkFsk+Z)!RlevsnRj~jTyZw%IlNqwJs*$`KV!l}18^Yv@I^JOaHIrq?_ zbM8Tf=iRlt#8dJ~gNH_XP&Mjt8y=|=u+FNTdeKhdD_K#FvX$xx-0BF>zaxieI;t=} zX?!R7g4M#Y0o~Zo6sm-a?f&NrMpYc)1=ORw@x^Lp;D2;{^L|NEU&$GC=I)Jk&=BjqlO&%|eb$Hj_sOl)>Ie3{n`x8_^DsXsyo?`X6-wdlT>{m7z;|r#S zeZh|#=v7Ak1%L0{x$PYO8{Hcp=%T85uHLsOJegh#eDgM|0_Z`kQyi<)sIs0qGG*w> zSV$QJQ!#?^;ULk+Mg1o=(~y z(68!;2}mCBjXV>+iG1k3DBsBK;odq*iQ2VphB}xtW=bjw|2CfwdAEMOxHa}p{zeR% zbyH|zY(LE~roj6^g%MBV`PN;R)V@1?!RndTsUFtm@KZXGt@>GpXKB)_Sj=a2*x9gp zHN`yZ8+kE2mpIf@7ju=4vc(khY=(7Fis>_ZC(-NHv`+ku^pva5nwXdzZm2^!-)2|k zd!5-+y;jV(3bDzzwT|+y`IgR8hj=#p0ln&cRfg@>r36*;(YNqt258j?Caq zgT7Y1Y`dQ7tRkyRO1>63Q0@A4M>#Ls$ z$={O`M-aY;2K>5YildS?X|~hJSzY3-B9&&adpv~&2gj?5Qps5dC=L2a0vfe()D!>w zv%Fl5^;FD2UUe=>b)=k*Y^0w%D4^F?G@;A*^R@rlU&|Nl9OtvValy{<7LBF~XV3!m z`z#!#8%6QO1$)O^y=cH;{PKK42UNT}R`rWuMolUY6|z1Jz*ooxyT_Hss}j-lP&~%W zKNL2S@OPp1t_Kpko;RJr=1bji&KPdtFotT0OrK9t^n*Wk1xi=`L zwO2JlOH@mIg=#r@g{vibaK)pe{mrjGw4)@vltiYrRdsG_pU>K%&z7ou&TCXzLw#b! z+&Ws7xQ1$pKCv{RbR}H()Iyr&>r(UqM7~hHX{x1*j>UM_ouCj7R;dl7)TnV&wtLrB zsYi3Q8rAC5lti!U*Ci6-=vC+YBqfxxy*xf;k0hQ>g6g83YFn~w2?ST6Fm|~pwy5oCM@f)a(I4ao`Rq$Y)U`8Cx>C`XjZ)Rdr zc|1+Bn&OGnPgm@x5+tkC#uNm-aTR9Yf`Z~o?KAHewcu1MtYo#qQfn((x#<xd-Pt_kPqToZ1&JBoq_W+$sf7@4i%%U?z5 zRT9OZv(-gfFbynpCv6SS-%s8>aCTx6B{#Ccd9_Zvg%WosozeRed+KZc{#-gG?R6)W z(&?;(Dq~g>NHM91J83uZZc66|754{~(@t7>?xz?7_iNHenC|X< zNEe#_P~ZU-xPy`Qq}mvGiT;mx-SrCH^-JCLeskCEQ`23)%w2mzW3}*2RtsY49JDs3 z&XJUvNSz~zy9I4fFQL<_>PWPPsW1Z+_=3??lQejIRus)*?s~=Uy53}(%Q9%ru)Gbt z!PGd*n-KBS)T-t|S{=|}r0&=zQT4`n>U-32=x?T8`_r10wuk8cIZ73w`NYJjbm;M( z-_B9Lq>U!W)P_5Lgc=p&>(QS>OL- ztw|-I(rqj8(ie@;nD^=*t19t!R4Za?`vN5FRIOVQl+WB^)2N&P<5U~1xy_x?|ppIJ1sE5kHQUxk0>V&m3s8W;m(Err!Yu_KQ65mUt zI=`-$fydRB>QS^G6=MelmMW!6TstFCZSGP5$0*=yMc+_Ccd5YP^;F^1LI1A&W)(-`h2Am;^_p~^xns4Bhba($#w2zusD9LfuJ_EK9 z|DwD$4Tw&wl&Hh^Kjn4zr%^uO4;_@G`YN@5?Tlm!QzfTl+x@q=Z}SA` zd*^aQit_5VIa$CF|kW!e_^*#WY>-(<1$u-=gW< z{4baNlebb@>aySb&XT%*NDF#ny`kLlSN<#y(66RFt}Zr%QR>#mV%q8y<%n^(^XU4* z(J5MO<`1A6@ddp}Y7Kf#uppjJ$oYHx)6zQR)}u{#egcJ4nsI5gxS@NEb6e0&&Kb?T zbW!ilt4T5R=V9+wOHh?vejD17|n z^CW2vcQc)`tE;$6DQ?+;0^0vS^eFwh808*QZj0_WX~8E{Io0jfuhKfqR|UhRamlE#zfm5#jCme*G4A|Xdh1e*`^lpeN+~p^L-c2>`ukDUUzGdt8XP6+ zu?ML%J5_1)(VVSKcL&;_T|*wPlyjd7X{hcSsPenWd#yq~gwIq^IjuxZJ($+$Opz(? z1eJHZ%KJBy_h($FMBQukjo!`bSGz(5Ph76VllFX=2&>2NPO z=O$OUehhiOI*lW^O~uTjn07eDk9zE&sRsJ?B~@c~RV=u|nZ2!&&`t77JSB0P&McSW zs{Eqdi&N-uB2i@3@n=ZAe4!#o|06%i>2J~%bNZFI>}gtG#jB2cgou)QR*LRdNqtHu zwF)Ogb$;{q|Hl<$d@cx$Sn;{neK@FO26{e@``J4(`-aS;W@C&uE zQT@pbC6<;YVSVq;dxs)twBO`TJLOKh;&rE;@dXNfneC6b^XOiIBRHTXy({xeTJVvY zbfJ}=e_?K|wBP{VKIC;jU6>Xe7oVPgdTv}`MtqbvP)em3SdZSxQKFs}Gy?MX%&ij~ zkmR+#_BKC|9#z!-V7j|!5skNG>cJ{zGRGXyvHie~{#b9IFe+F()S77)-S>L`J8$4GEyl*DyWc;Z z7In%SxT3{?dL#~|MNvN(*g#|hTQ(Ni*p>+*6Kt6%GSNutAvzNuojcy&o))!(4zrCK z$5jI}+C&BRIK1u8xC43A;2G8?RoB+HRNK~%Y1USEeu#9YRf;PB?-1{U-l1N7vy|@_ zw!e~9%blM|(b}JK2hund>s#x`NIGrPdVOXaZh7w)sFG%0qXJ($sJGB&J++yhe`0Q(v|wsdnsqF# z`KG8H>D0qAcjT9=7|AMzx=?Q4tD`?AP!tuIo!FS(`@M(KPNS>5#%V1aRGEdO8P~ETI=O z&r)CVTKm$it=3m%JE%QU_C%^tlNZpSjcn1;xc{WLKa*8Gt^MWf=V&YEDcP;;8D6i@ zoDrEt*sLY6BS48_q9Kq z^UVdvk2&A+awK5x`4{Nlv8-_apRaG#{U+CvjdW2Pj8dnF4Bxz>H?F0d!>*_4)J+pm zui#R1+8(OT;`M_C>WtWasru`Wqig7b0kq`{2J{QvD^nS&>t{OMM$xHNbL9FOa(-5+ zwDtvaReXKLt)uJn+4e9^{^}gfJDsB2diME4yNdTK_5DoOx1ZDHIW0l_*O-!1yrf9a zrIMd1NBAX5P1ivZt!UCzk)1E7edB3GQa3nHA7r8bT3`8V!>D9qQ2xj$+D~8Kggi4* zzDQ41G`nmnT-T?r&dw-OOH^hdy;Xwa9!sg))nupfkNWzpnv`Fi=TyGYQZIWba7=;R zU$J&k6Ylzq4yCqQ?z};R5Bd5yU!PUt&da1BoqIQ}&sxw%U~K~`MznkJvnsay0%yu0 zIa)aa4QynNIiOVgmhxm)61Z=Td2J-7AZ7 zc2UWDs*=a46;=}}H2t9EHL`v~yTs}MYBhc5#s0lp{teB7UPy3DPBTjI@1K$2mvl3y zY^z;=7#~~oE#^qL4V6`wM30ZN;aJ6LeEe|L_~3kLz4-W4m6vrEWuab7XmLlZwa@eyOT;i zpj7)}e{J2>hgG;&N%zNHly&*i87e1w)k)r0Oe}L%;L9h2A!~;J-x`DL7GORHE+LS?4lI z%qO9K=eZ=H2A#T6$cXye7YZa0QMmfy>4eNsu(85WKy_4VgVV?8roHdHdN0$RmrGqZ zs6*1JtH!0ZFLW>ZjGQ}fE5%mV3X45@H&?(uWHna?RR476&7ipRHmKZn{pMdJ4s0+( zLS4V8H#Iu0yKX*hS?9`^nlaIFWt6{qNgz@0=Wwi~-_tFj?UFzeOq>dDX^2ydaW8&? z1f{dC*%DRPk{Dgl45yHlbp4P{uO)$GwPQjIjW{(wg`} zP`b%^pnT~tik@jj!rxyiFUKjQVn0Ivu26q@Q+3%qGf!f0-bP>bZSqrT6;N7{@%rRO z`FmbFfp}FZUfB=&c!Nt-gQwGJx2B!6MYkwbcW~z|*G2ctRasP36UvvdwCjor<85BY zM7{uho}&gOm)9t2#80DB&C_<`?!4#e{7m)n79|>fmoDp7-LX^k`oR2(_1dX7p}$|J z;q>CpSJyTrfi_g)DEeNR$Vi=#XQj$gm#&q{GK#X~`j4jikId&t#m8~<)l{VKC8cPU z(zS$F*GF3W9qv#ws=mlE9hyrAsOs^6r1V7$rSz0e-}NCCB7B%?Tpi6C+KW3@GI``s;dWwO5Z$j?|;kziF^_j9jDA;P(d- z^!rt=``4Ira0{GxcGLSOG*$R-rxX1Q%7=QEK7Z>eHBS|v)=7GO5RRpyo3)94KZTC# zepQX?d>5w9?kGE|-++uPdzdCpyS*bZWBc_ z=Pmk60oTw2DpcS<>7g3+kABcZYFrB6(Vh0A)fl~deealmw3W&)aQU(6H5S&^Qw%k{W8UTRst(7r zsq|V-BW!`{B`Jl9;vLfud#ycIsn;qUIbgBwn)CP{+<1LENW#^uvBXNOa z1mQZ>O4FesvM{P*%p>NQLq}J^xorT^-j5Zb6w>w&j*F(mDAm%z(tq)@wU`eXjoQP58jQ8o$re(rS&~6 zW%B7EGIi;3^l??{!knkq#jAn^J!djh_t`^#Nz;8$S}#nXHe0KLQ}%@0t0K@xCj%w* zd~_vF6MRzmzbXw%>n6H~>!W#w-gdY|DgD|@Wf4zZ@&s*#uR&!?R6#)F-7I4T$_i@6u!ar^ky*&%1WC>gU{goZ3j;ZRn`8vZ)tkk41w_ zRsEJ4NJUno#?++3VPn$h@7M_4=bMX_07Mfd4=9mm}lBJCnx; zwaI) z{`V!^tMZ@c8r|rG;7?bNi~Mnn?)FUWOcN^2jEmCL z96Oo*@*L~0pJDB%^KO9dzeM&8rFXH+`yqSQIn`3?C0f>Dokf#*4t*~JU(?cE)AF*D z@_dxFD|~?VrW|fjX@_P+(M(+KaU!E)|4no$#}7+QRT1?QUEz@0RC&`0M4#7aeW&wH zRLLKt=bf&)f1Vk-Nqx6O3f&gJReecBi8|@h1)o2$rMfX|9Sc9Il2qr9Vs{=b?dfh& zZilA+liszoPM{MbJ*{*gd)g2%9!PgkSr3F8s{MkV30<6&+>EG9Rs605IxV_( zJx9Cuz#;tiL3;mdD);9rtG2XUz*-pQB`h@3eTgmE$m7Q^DfbLA~_p4eR?p8>gWNAV(KVg zT}kjWX8M*!#aq}k52HPv{)29GS2*B&H=RlK0jv8LRBx?2tD~hlt3w+)?GpbqsjoRh z?D~qt>XR^^FLP&Ay^Ts*RX5io70!FK-p%cBTh1SmA?jQv_eZF*{R{adU*vUCKPdNn zNUtbbWF^H$nU7ph*C=Xtdw(@Pl4(&zPv-c$s3CG4wbXPQjx{r0_jd zK)q@yTZHu=%AQC;bmQx3z8(oS`8bus&{^aDTq{1z>kU!Wj*H$|_Y!}Ta1 zTBmKv(w%uLJ(;YYJ6BCYpUqIx^30_8R9y8uIfd=yus1ktG>0wcuunOxfWz)rVV3h9 zmH{oDZ!oFnsCA<|>ryQLxS%Ur1%y0_gyDOsBdOt*Mh#!?xt0`FGrcF_T1?OXhcC_1 zS3L>z*z0&=b@S&ARxV%YzPNC64LSY7!_Lu{GO1^s)S=_N6mm8vP5a(XFZvD{tzxT~ zZF(krViA>j@jbL`uI-#oFOlI?2D?8{5f4n{OovlG;g@RBpNHuoqf zLg+PTuH|V!_uz3nWD_WK0KF#DYdq~m`{xX>Jk?dK0?SiNy;A4o@nd<8P5meDx{ANM zig)~j>G-#NtKJXZrr$q}yi51WDh!tb)&B?k0_`3CPQJj*IEViZdWm=VZ=(l)sZJiG z&kE36^>jJl9K@Sy(cPan8_;H7+N5JsU11vKqt6|YdnpsNUy0L0QPe-W6s==#y6w%H zW>cqQxh_r7)y2oJ%2%Z%N?lB;(Ojh@I+iwjQ2(mxEm5gbUD>TYhM&(1dPaAma;sTj zr@x=CL^Uv#(7AiOzW8ZD?_dOb#g(Me?OD2pszK>i^#uy)Iw(fJ)kn{5OZ`(P{^SAb z7xiYTxsrxJ&Ze@1bi8y8b%L(5DZvuwZ2GJI$D;@Jq7zL*X{ID{Lk~`)DXR`e>F+10DPf&uTd5%l&;;JsV3$y@^2bHmi#ToPbL2! z<7besr+r>xu5zJy>ai9dT}#q+lgj?$UX^cNBHblcq3cX2-!XIkXhL7*(9cciN{!AE z6Z!#%7Maks9QwWqE#%M-P3SfbU2Z~4IrI|~s&11|sTP~iBOJQLgoZivQ4@NGL${mI ziyZo}2~}g8j_4~B8pqXrp9ziU(63Es0}gFxLK8UjTN9eZp*NUN{sh>(13I*CB7I2a zn0lzxN383z9Y?>khpRS~6FY4}eH{9;3GKz9YLTJZVE~7IXF`W@=np1zEQfA1p?sHh z-cb`egG1jkp}D;8NfWw|L!UIE1sodDp>&HqFY$RY;UD`_p#v8aa;oIcO=#(FK_@N! zKF5^b^kQ+|=9Lu7eiNbM`{DGQQD$gWE+wccBsBR3AALZ>qQ|-EF1xb6VAr?|E1jNx z?MjbSr^nN41A1)`p1^rNM(+tse=>J=l^a^-ETsGGoucSd0X|yQ(ftZa<3Kou;*pNRmMULclV%S}Gm@cCXFQ)~g!)H)PRTmHpb%OYQqQHz zxHn4G06i+dj}AURhC@l+M$gAED$^ z_UOA~)}=rGrdw+Zca8B{4@7%&@0sh)-=T6}Q)-1T99+0n`^cxdK)IA2XJM!B-C89k z{6TLzT%EPf(`(Vq&@^i8kS{9yG=*84(yWU`M`Ej$MjoM}>UPn`r4H!}U2t_=`jGU& z(Q!qZ={y~}n31Lwk=K9v7<8avE9{$B{3?}<*}Wz{)a{GhOU~)ez@~(S)G&onzV`cb zex`<)`igphJyKqvrZZh(kt1}4{*UvKc|VpuW8?Hvbl=Dio>yXD1p znDdRh@Vhd>=L=ML)ZsD@uTSR{Yv2C7+q|C=%+-5d4mZDFzhU+Roz2{NeN`HvzzQ99 zfZiZGOk3_Xy~=Yh$GG#ZqtM*T^v)7J{zW#vL%#%f=mxb*pfEPGePPbG?ll25zjUO? z_mS^NZ(^zE>N-Z{#8Z$;tbKaCd$F3oh>x169Zb~jcGP1O)PbUgHgQxn^Qcr+&MH+k z)tOX(Le%Fts+xvW)XpZV8gvx3CS`bvqt2O1+o}vIn$DC`9BM~XqR*YVv+O#xZl_8qJQeF+v!{K(WcQONsk3nV)3XS4iLIBU z4?Reqr^uy8;PSWXE={!gDOsPTs=f;G)i8~yC-5k1u30*w6-xfZQq^M0cG*MKX5=dhs!+uVTB^h4`c(ID z=UpB`Z87}@de|m&SMLo{E z^v)8!sT4BL*MvIH^I5yBy>t>xvvyC*2wc8?T3T?PDh(|G>4CT-yM4h0^!A;;qO58i zq=~%z|6%TL;HxUG{eL`x1OuWwD6vsdQyXniRASYVDm6z=@I<3VMa60@SK3soRa;L4 zt;MA01lZji4GN0ZYrWOmtF67Y*9RW7ng9~;5l|7tDq6Ms7#|Q72|k?P`!lomIXe;B zdta~b|MmLk1!vFWnl&?P)~s2xW@a^O*8>Pn1i^cn;6VLh_PFXJ%8)?5_1eb;$xCD( z6P0FFztr|!rX5R7Y&&dJ(XT%zlbpljaQdlhPnew^VZ=7Cot->`&2ZpQNee$*chHmy zFuM}8XY2Cb$4$LkoWtF1^u{&2w+_cWmyP$LYX&9ZSM4>x<2a#-=4+y~e(n!H!tI1F z`NsaU`Vc?)pT)OtFJ>DfnP}%U9^bL>&7rr!X1%s6_O){V_(W+Zs-DE??nHNA>BXz+ zuK)YEM%iSvQ9o?^OZ4gODfp>fQSZXQKd^lhTHH+Kh{=eNoo4;v#VYZ~XP1=Yr9O=Q zsZTbzOTA|74c{n^XNLbe;WbyDGIiC&*&~O=I7-;$O|E1kDz;|35}Y`D(yq*mCa8P+ z;8^$eL4<}egPO=YVfLh9I^kGDDP{c;3hyso{8O-AVmN?zPsZ z0}3_u?Ps4-MeBQ%6!-3~Zle`-PHSqXX?^#Z2KR}$`>bq>B!YVj?k{6(FcWovdE8sD zdZ-32e%JUrHzPinXVOx}Kf9bm8884$evQ9v>~8WS22I27&2HWuXv7+u*Ul>(e3W## zNqc{1xzZxb>4iv29$`!Vk%?WfdJn}?vc#vj*SVRyolVK-8vpF_3i*MpsVVNYjpfL& zDS3jR&2IjhEm_8nUgs5+u(*ws-SHbzF98oL2XfwQZR6BuPMP{KN6tzUncD}cSFDZ{Lx{kaFcS4PO zC7fsKSpQ1QY!Vr-i{RLzDigWgzd(Tq!*`&yL$LNuurw!9w0_^J_?DT%vuDEEm*@aY zwoxu|x$ND7wkX>NQ#WU9 zvmeL>brxkcaW0`Ji?SQ#q7#d8Kil*eLUMMd`f1|#63`HTRaRRfkehM2oqYb?%GnA7&O&KBnoRH&S< zFa6-;5;)l**mrV3jYDVWuFRdC=matv(E;PnVCR+22f8OM)&ZGc<(`vme95$F=s2!^ zf`$(_JH;PZ8p)o~pJb30vY;A=a@xyi>ZpGX*hl?s1N^FAG+rFv_}Sk!x*Lk$v>Z!} z?CZz~HwFAGvhy)MT7J{Al9yB2+m(xcav(Gd?n$XIN3FBsY@+9GwGRfWNQET=a72E zNz@^=ZLl+I2r%Mx?P{0g9dS2h8YnYYvr$u~T@z96oO+-(yIXqi3;VxzmBFjK&WOQI z`a;N&kk%T6Ur7&%%uSyFAWv^?;|8}5UZIwh+`hW?1J`5>D}?>4t!G`k+TTg<5x|?& zwpi0D^5a8%+8&u}J+~XrjnG4_=!%q}7WMvNMM~uq z^)6VEQprWVGgqX9Pmx&*tw;&cqU>az_~Yn@X{Tk{C3dE^ z?S2)yP6_|XvP^74YTGZS)#Ppxf^7ABDG|H7#x#K`IHXJe^H+m%QOcS5I8*U6DiZ25 z{a1+ZeYK!)v-@LS=?ingrxK0&{+ZJ^wQo1Ld=s-Hl~^dIFn;F&$LO9ZN4*j)V>)q< z(4e{Azri$gUqWfCz+3)sLQvDgnx-y2!FeoQEHXT9aPnxi;M;*nv|svSjt$K*_ie04$3#mQoAc>w6Kjmlu4ULBxm#3Wy66aTsNPUjD$BK0S?d8PWASDc1xo(V*^2JY7vtlV&7ii_gk)~1-`~AMS_u-1Nv?!{iqz?Z>LK_?%3E` z-uqpep7|)@?JK)|F2T5Ra_bgv7h3(=M9+lDDpR|*8*;a_nXF3IN)3YfR_mH`rppRi z5bIVXFALyl*eam)6I6ai2Q~;?%_LH@vA|yf`VXTHGg06Zj*C$QWPiZe3g)vQVSc_( zWNuN(TBQlUqWo<*RS9qW2N_AhPkD~}VuQOJy1Uxj6U)1ky$K~88;mb0Cj-+n!q^iq z)du(V7}^~z%2tFGlI{-d7pjchITz6?rzY|jAR$PJ7c`<3TrB-(?Nn$+?@hp=^%iUodgt<-txz@$Yc29*I4jr z-JWpG@|}hg1I&+af0Xd+>itK)e4D=_n%}Ia-MQv7F#jv-cbNZ0FH*i)rn_^^XJG!@ zcbtEIG{0F`yK~KFVE*%Wod2w7el4V_zxfQzKWfMME28<$g4&&HJ_GZ=^3sm$|LaKo zje^0QYd!<>-@fDg^P~CAg58~KJ_GZgKOlc-UwibKKuAp8;O#E1G!uGBedbL2H}B>- zj5OweZ}-fUOM^3!DcP9l-ozqm&X7*RI86+x9gHGn5wXU0#a=}sy2;&q4}l1LkLPZ_ zU(sD6spx)j#U>UxF?YQYs{BXSseSTkLb~_wj)q5O*gStd`bO7Oz)p4E?yP-P6A6H- z$+Ub;PT5gSZnrfNA4(nPVF7hT+(ZfT$Gsk91I!!$*!+Lh+>-Qf!tzQ+YxP1LS ze__Yi5R1|7ZP09T>2MdeL8FO+9MDdi-DdtC>BAe0|33q&_2q`sByYp0+yu z$Ir!isyqIl^p2sr!|kjdcFh48;T)A829a^JFwzhG$I*g?5xkSn+kOb?>xb6=WFFtC zGb7_&ix!QNpXKpA@n7M4On-d8$$G9!-C#%eSKIr~JH>m`!x6k6UAtrYx!Z;>#X9KT z4(P{U z=!MME3W=pnFjS?yd=s|kNP~}wig2UC{Z0D~rUbNk68DlPc`3t2yRa|zQc{fem;l2~ z-3Js9v|DHHrhs3r%iXjiei@cOZ;ffEH~QJ*VR?>52x)s}n!*P+vR#E#+{kq1JX}ZJ zs_QAS@zLDi8B}WqjQp@)JS4?byt<}7oohX?_pHuF=dRDMJUTHxr%W>sXJj?HpRg(S z%OX;+MsnX_2QBr%SpSk^1{Kw}<&J60Buh!tbJY{t^<{hX8X}r9V}^v8TA%V#^E8QY z&l-v`b&-Gbn??^dy$t4)dXV2BQaVZhY8$x#rB&Q}QTpGr(b{T^dk=hU z)RbFIv{aVaDo<$C1OOD}2vHOM~f;veDac$Gy9bGD^%hO|%r6{uh5Yh*hEp z^ZqzVI!&8b>P>%>jU0zk(R(DxFZ&0olC+)vG8;J=8Pg3*U^oE6a0jrtea=tby7AsxHcpYO4@64iDf(t0VY6u(I-@{TU(6um1u#m@CwKQ(3UtM1~< zg?Wwcy2Qn~Pvj|nsn@~Yr?Gv7yp45VCNFKwT;GNpT>Sr}ScH++u`EbViY-zSOhR>> z@jPO(x=}Kmw>_zn{dZBY(FT)2AY00^Cm4SkGJg59pok;;AruJ zFGyUxS_S##$XpmxXaF1K|HW$!>X|v^yHW%Q`Oh2)Hoy8jmpj|)U%jIrX#0Q5 zuO(zX;)7NZTEl?*&~Ewpe&_sp>|Y}MdiXP}JYFv7b3*>~@zaDaE%3vmKf3P!E&ec4 zVIN@rf0=KtxIcn_6L_+o`HDPtlnq~6um>1lYijUeB9Jg0OjZ*;C&015qUQvLx%omg z8e)W0$>pT?frqBPbevfqLg(HsR-Sg!G92xh6_Kg8TABL6UCbml5lPhEEn*b^TGrpP zNRSDgN7jV*n5n)@oYV4WX8i}zNI}x5H)Ur14PWn}?0=&Y{Vx(=PSEOLV8CA2sg>~N zkt1f0&|Y|g>mnNKx{_1aQD?Kf^;y!Or}>~uw;eTkXO}sTT{I}}JUqEL&92)aPkc1N zIk)7txbxUr?u$(?(~5=xc)LBZKJg z*y#6y=wk!mgM+f)Cmi4(2GPq&t+JUw4Qjt5Jg-_4ual`^9Oc6;_Yfn7_-zJ%?w0%U zh3giZC{2j^rK0G+MQ@w!`sS5YjEi+&9WToj{#Z+=?V+>HRWDuHk_PADRTB;zIV|;t z?3hld&SIaf z#H#ep68}0#TxB+gNc_$}N&NjF@u;kDA+byzb_RKDkhn6dnvnR7f0B4`ka(oICy2zd z$k-X=b+xvF<=Kbe;F9y4hi?g6giNcP$8PwD{IX`FVRX-1NvHGJtd9x9zfx4pwPCy6 z2pr!%QdcOJ@76BQ)5fEw+p4*N{<$S=ya4V8JAk`+0Nn2gRBZrvv^>cGZdLX=!Oh## z13zq)0RG>10RPPa@LwJa$ThVV?^W`Q4B(HKDbQfpy!V9AJk9H(O&FncD9f|EX;y-#@z z9|%Z(2kAGN^h^h&|9&*R`gaHESDN$)airfpnqGAL)%58(CVe&OkI#NDnqK|;mGrIC zdnPbN0sAISB}M7MFoUnAuk4vnWzxS!dRy<~vkw}#!jI4P7`N(oeD;^dEnFVYK5N{n z$MM;T#;tOX&+d!c*h6NgAqlu^%o?j4<>UhJ&ClO~{E&TcxgE#B{>ek+3HQ6&?j(ce zeRiJ0`(6Ar=`H{L>V6mZs_No+Z@Z>Tfvtn%Y!M%C5wkcM?OFD*aN@Zr;L$ivIg2 z8&Fq2lux#*knBm4tm?hX(orZkJ-SSn57&UNfoV^^6uQga4{Yy4q(XOGr3$GyRmwKx z;a%7KM0wdQS)u)tBaZGGQ&KeTY(FMsFh4!lBIV3(qY;!+ICrtjzJd*t-F`!2GIcwt zcA|+I)0G&Fk?aF%rmz9bKD?oS%3i0flg%c|sNI?JzWHBB_h!33+q+a9X>$gZ3}V(0 z)0uCWX#3GHUD->Rs#-d^T)o>O;Vjv9yxBADx84ruV}2_Q!F0@iD1?zc`y@kBaj7Hi ze*X_CY)AAFZ!fob+=_=9MOrGFV=1JYE+ce@Hhg7ta%m^0#*xx1#;`t$P#hL{F-V zkN$8frT%a#q5g2Lx2_`pVVlp#@BhKNH_Ls8b&HTmzKM_ia4Mz#a4MnxaBj1%BELoX z0xO2>xp)fgCxvZfKQ3}x`($goX6y!em?3$TwPAD8vjrXd-z&9VZM#U?87S+b&^mB( zxa1Z8VOMK!(s{FHv&Kz$H?;A|G#9(K82{9@y5QRX8p2%%r_wkiKT8P`Q%!Pf3XA>u zQf@q-6_nGufUe0G!B*1v#XlDw;TB=?n@wEU6K~4=P{&R#t!}dXgMGqgdw-|hIc$AD zZgE1zHRH4<_}*}e>tlM)rBaY5%i}nAdk1&U zZ{PsNSV7ftny7ldRmjpLBoSE~+5aO=mD0#~D3QCxoThL$=vuobY|aT8M{-+fr{fbz z#DgT{hT|u5QdtwYZzIgV?i&Vo(*-W3ieZ~O9aYN5g?70>|E>$pp9=Kleg3zZTu@pB z{=xn?@rRMvewF!q5#au!5;q3tc84I)2Uz(fu)mwGpm@NPAurGq`gXW1ff(gzL_fz7>>%0A_U}*Yh2p+LoiOtG#I@Y&FKc(?&Y!L)46!QO`e4OXXsZ>Nnw@UtI%=u+ELW zU$^^}?lN7I_k#P3vG`|^SBaU9lYWunPe&+OVW;nf}SC0MML_1 zCp`ggL){xO=f)=#-LT!(#A#UH;55A9H1I%)tyG6>ukBWBA{_JPHc93)dqv`2v{_T; zDBEVW8*QtZ2J4O6aoI2$W7HR8-Cuy>I}M}XsoVbZuZ8G2DZK|F9MTyzexYeWppm`% z6t_agN533t&VSWr1KLZCMO%(^I+9d{X3Jj7*XY5q|G12StE;DSo#LtRI52S1S}!N2 z5{$iSY>{ZNDYIKeAH%1)O2g-R(VPGA5}L-Qx9huYkAqJ?-#HPk)t%v)O5_w$ar=R^Xr z^tz-Qt9#G6acO#O!gl)0F{c4S=*#{TOf8;u&p7iKAo292)zwb=5@DAo8Rdd(0&2;g zN+g-=d|kUS`y_3U_y7M|K1lUdCC^Mu)eypR`0TUJI!l8r(d}0uWyDmoWS@xbFJab zn5ihc?T0Y`-6p?{D2T(XC&^NZR z*$I$Sw2?$Z2)HDGAw6KJ-}yup?wkC0VE`Ze&I2=xqOp(9 z{)D$tET{m#v+PL)caUeSE9sNGyhlYAlitX-hlj}`{iRADN}_pDpp_uj`aoFKi$Nt% zk0iaAq;Zc+hu0=wS+u2bpqCz+hX=jiMvkz( z{Wl*r{r@I_vlj>T;WeqbN+h}pk$seO5dlB6-+L&`gS}(B@&sjs`TVqn?+5hNq4c86 z0}@`Rk*wU=_Om)9u3)?d>w~!RukC%!=-U(QGEM-3?X(yK$pr4Mqp7j1Y8Sf`IgIqo zB)pbTdptY0;Q-UULVKcR`StQ7!O7bh z71kSh`MCXE;iZ*xy&i(lgCyJ%V;+LggCz5++la=ldtVBU%c6`2IX*<#bkch>8jWqA z^rl*0n$_eV@i<59*0z5M*Yw|khck7X3qs0G>vXe~Wc%R`NZ$UGB==@PAnm zFZ>i*XChmh(arZ4A zEhw*8y&u}eCZi@SAO94dhT8XtxPUiL%8y>CNZv=mie3F5cYU+?<~4~YKhnAyE%^oL z7I~WG$Cnhd41U%=33e`Ty-n`%f67U}k62>2oZyyR-l)T*b?>!4;_YQxyGrL}$Hv_s zR*#d-U@^#fW(~y-)<_JeyQl;064n(~#B5F`Ldw*@?53UaK%h>j*OLr-IK*@!Z!pk_ zH9;q~FIQKG5{{)~lbo9H4$JpmWko-46#d4+v0jV)^{o1zH*5X>4Zy7bKjMXUycrk) zG5@bo%Kjb*{pW*ugqSPz!+p2i!zN=%!IEJdHPTM!bKw5LzUs0rgRp;|GQMu7Z2Bv2 zZ!`XkA2EVtkI#U)IFb3wCIBQnOPoW<6cXq5zZkb6&iCauH|O*t&WV~Wk?1D|lKjL< z`E$2en&gh-yhUPYpWrVCQtj4AL{Km8lhX?i-uMT8sBs2<>twW$@Xn}&yZxKjz8=WX z!@h~k+%BBfBG#0oZO9tZvL89i(!&-w)wm0Oz`8X*9|T=tBeLHG3){{%fcF>m=0#Jv8SJXez^Z!dmdG#mW|0yWtat2Gz3QC<9Mor?KW>AXQy*6~h|(NF%p zi=ckY(PR86z=Fk&iN;&}b2%0j?n<2+**})r^jU3JOa{+GinyR(B^LN2_-)vmI+@1} zoG5aZzPSgX82s~ZKEay%F4X#cc8$>^aNlEg`nQF?O&`@(s$F=F;krXpeV<%A5lGK5 z=u+LoQhm-eF6F6M4e#VI%dgGNePR7GrE9>ay|4F~+m0p8=VVDZaa~ww+b2^RgG06V zjXd&4%ahsmz6!p-91ZXzf!5x)%dP}-&11!tCe?|D{Af9GGXQCz(Ep|PnwZ?tB)pe| zKc|HDzM^AH!n4AJVSPwn3}A}Sx5sGGMEz>NsQ<$O{-J8WNdD6Y_Q=05h-L@_n-?g_Ed_>7Uxgv+Ad;n*sd^`t&y-}D~_()A%_mHPW1 zO(DoY_7?8^)ltY-@PaW*N=Wbjgek!QYIK)#`#|m`aOik~7)&$9V9_!SO9~}ya{rEj z(7T|+)m|T)&(VYE+NR9-a(#;b(OcQl*)uZ%-X!O?@@2}FsQV0#W3To4_8zjfNNxmc zW+lJIcw6H^%AT1`Vb;3OTAyxmU-$304}O}tek|cVGu2`OoV&T1rZSfxWDj70X4jYY zzOolb7<-PrduBEVsm%?+1%84tg^K=@N#}I@5_ml`CxvOH83_1YX1BjU2maIFGQGmu zm$RB`SSLMNwaQd1QuH&5&Qz?xZ92&Gn$w|oBrKf8L&WU1{!w#8*fCCrE*3Vo3yYur zLqimt_O`hHvS;Sx5TC-iYtnU7NbjG6am*`3ud~TR@D_qiDRiEnp`dJ^S#;a+M!Ll| z-5`p|r~47<)WGAFepMtr051vfam!6_9To|?$^=>USkKHUA(|toPJma>%=5$0&j8yg zy6m_b7f^o6K(p=f_8(M##f#s<6W{tiV?wmy7}d3SY_^+?zPx<;AUSrVuMh2E_PYxR z!3q=;ds*clWsZdV*8m3Jk(jrLj*5~sJwvoU#6VGg6=j5V+g_nu9XI9mzFv?AP7sRB zw`kR#2VRSXKFUJ>A=UdY@jm+TbVpZQFv)5JGb!(8jx0zas`^o$F$WI$v3El_T&tAM z#%5b!k}IKX;1*ZZzocJwM|f$ z@H!^b2r+YeJ4oABFi#{}x((amxUgIr2|EVStL9_djRz3f`+8(~#(jX30*tD7&C>xR zaPiKzQRd^W;fh-1gXf z<2bqPv73$Ku(ro;F^-|sp1n9a$|}{(;a{tVH^JGTJ}$MI^zG(^Fj4;Z2;chmsCGyv9U97%~Eo!E>xEg*JC`B!vdE_XZtRrgI z>M*ufx-TOFOHtFvZmNbzYmG_DdqB*;a+s75r6qQd3zphbuv(nUOW$MM^rS`wAw8)n z>LX zx&B&`D(`D1?{H@pi~1ep&GYMkuV&x8OH(HA7W5T21IOHi>fG=uEufnP6@qYtd$8Oc z+E-x7KYu9~+_;^SX>q(8 zzRYG(v?SyLtZMb8k$2qA&VKb}70!bRE6#-1={cy$tiuP@Yj}7N|8=9~9XqHfd6CxV zgtwj;TO;bf={s=&ElIWCX42v)?v> zZNP=WhTv)L?9F7i{G$*K8_v$ihv)hA(SuY6=|#n-I)o$Ys4Gy@sDsf)jZl#Ll6Yh0 z`nxf%@u$=f!mB@A{*4N+5+0ysBF(A50q1y^=^QVY(wsac;STa!ev7;j*gtS9#5s6F zdGnw1*HcVN+N*Yz|CgJX?$cqu%*`sf_qGW475|FdH`D$uH=&{-ez3ZTFV;xqXK03U zpOZyOll#OelFHdnq`I8MyI%Ya3b>y6hQwxuXv*AOeZOgB&0=UY)ielwTJKg#VLpX~ z3zQ!BMY!(_+*jZhUsCvV-1i%Iy1Gqm>ZKM1-r3gM6?i3r5i7=Nu1>Vx)q(d;>lOJ6 zY!xXrG~{N*k`lSgjoCm-LrZR?#+1lk?t0wKfjfbFa^P&xds}~SJYRuX$T>*@-EzVV4d@u5E0I4P(u$W>^JjgHw=`MFuxu&nK z&kv!3)~RT7HB`7#*iG%fl+Abdst$9wZ(-d)|(s|MuD zjv-HE2!!j`gTg%QN5!U4;K2RLkAI`}Tkl(i_1gFkPX$LQnC({5(FyNjnrn^U$P}i* zD~-m4DY;v!H1N5F(=1ef@cpa{sQi2p)F&S~N09{qsgun~mvUl88G1&?+UXZ0<4sea zY;wCAyI(IkrMSEIYo^_KgtUZR!WlXPP~KFxGP!@;)3)JtZ_^3q>>jM5vWEfQq-TS( z2E4-cd3L0Up#3uy#rf2^+xj76`iGU@j`wHZI7MPyet!{rTsU{Q&y{@;P$ob7pM`{9 z=d$DGqU5l`{KC$p?0*KfPc|9m-)Z?57QkwMv%Y`%M}_&h57;JTC@huD-AaZ+w+>*9 zWaP$-X%(w_5z^kG>|eSu7(zY2;~SJ0Ixz)jf>C!|<5aL zNMYk~fGiu5d&e-882XemdltdGFnhywd>iUkUU!XYh7cjWrn6F*ar8rj^g>JmFYV^t z>x+`b%XpTQCf=Cb8rM}Br+7CugDl1X<~ly{eY^jejR*KPxZP$ba^DDDJ7aiBQFb&@ z`SvAnE`fTlSR*z&T#*s_V+k;n`Ud;UG1bvAbsWk?4=2bNP4`we(_xX;R6|ZrmN53n z*l0-NBieN`sD}96y}-6-fBX&?elAf3{C3*kKR>x2y-n%g-|+yx*@NB5rufe70rmp+ zKzo3TD4<{u@a8Q6J?|91wI}t%?~4BT-5ut)`m_Heao!%_ zLI=@57l(OIW|#Jw{evCaX%+0w9z%Oh8&1=gF)5mHPk;9g3%EhB9k?OC1(@C4loHWf zWk0|9EA-IU7~r}qfTx>k)#v;5>93|=6;0nZApPxMO@C{ceoxCC3>{I_Y_f*!k^Le0 z#7{!~;n7o-i;eN8=n(GT5nI{Sa#hH%9;I_9*}Vys%ws-c*gg32dNZSn>NnnP&g_@- z`_&$LroH=kkjeCw>VM~e`aIn~2l|lGRbl!8^;rz2FN^g@@zj(t+?T z{rreL!5tw#ZQri-Gf!ra^}27yxrfXyj9J`1Mi2|*Zw`$vj166oiY0Jr+c0O=tt4uH zXHXSu?(IXJS-L%eHMjCKPA%zdD(;HvJ3{(=VP2r$%-_E@5|ZnE-)q;qi@k4$>sPP6 zQvP)X|0wxa6#SH1)cbJ3f0X>cFZi)nE$Y3w;IEN?X2H*Lq9}VMLXfE!*L%u8HSdpA zX3sVr+uywZZf|Q1*4Nl;n-@bvco;FQrU8QCD1AlSrUCx-1N@5y_`9NhuDi^xW9<}y zzp{V)@a)5pc-}+L{yyT5+#40!1<~q!gnVP@8p!!xY;5+bNP7E(uGy##$ zF|!&wB=f5;pbhNwVB|5>6ZHSIgXruF0ZnJOna(!(Li&66MoXs=JQfduX}=JJn~@iH zPcKXU$c($tKfHBwBPv;{iWOjFypdEFebT^;SUe+JSCX!XKC|fvs#H9I5VZmc8D#*p zF?%X}a|9@G%zngK4UADEKO)aHTc#PUhhGi8lIPB3ol~R4tOVlN>WoVr%RoB6x<0A7 z?R1Q)W^h05XSr*6KjyM7&B{;4-E$L?8H~6P$E3DCu0za-q*_bd4>r1rD(H`(-OkHC zFjo!7GxQkk*|`>0JiVl(=)CL#6J_akJ@qmifDeL zFn8yg&%pezTtA@xUg-v|0M5^km9$-iUb6R*K0?l#AMcH(7x#~*zxJ!?uMX2s;4-1XND043h~^HHyClf{lOT8Kzh;2{*AaiuE`ladw*{eJ!e6+i!G!%G z2-{AW(kvO^e{z8T9343}h*5($>1%;06Aj)I z=f(vd7fE=#H+u7`)o7gC;a-&wii=F}sbvaZABO)iA6~9-30WrnO@!McZWb@h9zg)J zM-ag5QFHqgtA#T7CNi#yTEg5}Pphv$AQ^0Dp zy!1sJSb>h;z}N5x7u!hAUSj^aa)9~gMX|4%e_r;nBobR&V+QJ$d@rnJOW;<`R7aj)xH1$ZSAH7fO zPN31RSHmW}Gj&+1$Uik7tkMZCHyJFTS|altt#H1nu~`c4p^0N}m7X<}4X_!Hn^`WjhD);y!IJHQoCkvTcpGx$!DYyO#`CGd}lC1AX+!|j|VwXSA zzfB)#$V@MH0o1!MP@w95>t=~-n;}bm`yj-(nJGZn~dDy!r7UAOD@t6fA|an_5JGv5<|J z#!}bmW{W?VfBftd+LjN;BQH8XgPh1)$eWK%39jA+n6d3x13dY4DzMH3Q>Pi*h(XD? z!JoN_GE={3E^3{7>?10)wd~kGKFT-Qj@bGP|9iN4)7>tymGnri1~va#!&gN@_-1^JGphyG`tCZ6q>af$$j|ELn(C`OQ~%f1_o(4HNjvir$dNitog42nmv`s2)CtQg8d*bHr-pSlDxmD7<7DKlN{rn zeyucQ)m9|=Ko_h~_6J>jZr^OWeOa{IUwYNn*`a#e zWMZxHE$r-h#8FRd&AZbu@G~!UdxTEhJe~5#H@<&v(Fq{nz{%nt7a_q<3^cXemSYEO7gID_T(V7Zo zi&<2vmqY*Cx`N!QL1rmlLikHjZ`xRO@V6bb0>7{j*xo4C&ayNFF_Ki#w zJWYP^%dp7X#s5wD-ds}{rbkb;a)@fBDY;eWlyo-S&}EMar8qQaCOo@zqv*4zU0H3o ze*-}wCYaFHG)f}HCcQ9C7`EfFt&E_&gNuU>p`YNVUvbj*meI4DYV20SZ+Yi$C?t7$9ppk z4R*DzMA)??ZbA%+AG1NIpK4@?dlJX~MG{o&Zz1F_{t6+ZykzM&D-?t*J`*Ud_Ya}{ zBAdQOfCmOH5}KIQqODXqx-_x3s3S$@5Bbp_VX@frr8P^Oo#HekL!Z=SoiW+tNiS&k~XlG8qVfYx(p?CHu zIMJI0Al}4kxXbgJs>^`Y%aY&73)*RK)Bi->pybb}=pg=|QNqs!2u|8RyX@359Kj}< zk)5({0>S3@cYV^ZEgl2(gSup?B0KgSUuQxKkLH0`P#)!PV=pmK8W$~+*#~84)2^%6 z?etIni-EVI0B^%$3s3K`^!|5*dd1HWkniPwq=1!jJcSp^qGQN2{HidXJu}`4{%Bgk z`@j0d?+*ZpYLJkm!gD;Zq(hBV#*-~vBV(Tks3J{KK8@Jcp+=xHX;3E^mc#fb*@h#u zhMHkOv^XQ5Y;oSEv%ZwR)Bum7_!axmf&68lKbm<*^|k9K{|PodII3|fs3dfM`}Ib1 z;9N_ndxzZOJeZA3|1?^Oyf%e$p0Z!->;B7ViC^`Lzhz=sP6wM(Yr!;c3e#;!DaUH|T1XqsbRr%RX=`jhiE*O9I_8?Fa3r9YB?L&1Ng{HqH7 zf0KW4!GD_k_ZIx)<-fJyKU99V;NKg+ec?{@m?_;w{uYSe&}XLflaKJ*JrfiEclpf= zcQ*YR`Hv~2|BL(w7yQ4Kf3Jc+BmWTmlx5Ox7jMXv-lg#Ot$V)QZ&>#%xwU6Pnycm3 zipofT>#cjd+`qE!Bjnbe3F!}&`zO}@4Y@C{ z?h3ijui4z@YwgG;sBcZA zw$DgTYu~;%+9PwGCP9XG9c6&Oc%`?FX8)jV4oXiZ{7sGR+lrXqo%F9j0#6B^)+ePJ zeIk~+x}>!Au_gpZo_}WhjcvFw5+kepj<2JTL3SM1gfujkeZ4V~B&&Mo?kKVUO(;Xr zN@p^dtP)AU>-jx=qBkQJnH7J9P@X^}X!f;Phc>(GA(esbK2-`}GZPUCr# zDZw9t+y6SpQT%tY*Ytaqnq;$s!b|<%8`YL~MLuA@4Y)HOkhTGv@&Q-cfHKXkpj+x+ zXalN(0J|>CHW6z2*{;=?e{Y%&cDagOsa&?1)@102zQXX2X#Q+XKHTX4+TTqKXPLA} zcbOzI_f^-Ipe>Rap1Gr1@^sd}WCIVz;OZk8(f)b(a}_TREj6K+R#(LqD_qx6jir@} z;?BaZ#koCLsm7~o6qBo1Kw`u=LAe%@Iy{9QTMRb2#n~?+xeJ*l3{$4QM${@BnDj?| zFc|ws%n{f*ST3xMkXxXq@VpX*c!&L#0xk4L%%abdhj@F2~HCCFHlz;Q|e_o?J zU^xs-Q^!MW9qmFQprcI1n)eFz`zuqOWGU5eaTxCZmnK6~zxPmc^)8OouiyD!?$BV# z)8rktwVLx|U5kZ$Y|$FnW9{17t&N#K4f=)%^3m!}B1AL^w)Pq`nF|ku0AF5LOxB}! z-K~_*_!4o>rj|~O47h~fkRzbEXMGbbNu|c&DfD~3*)X|lpXqM7X4A0lhFI<;&hfz%6Q{VZyDz(aVP~i> z%7Grstz%Z>h%4mtFVba?CkQUq9;$3` z7Orv@u5F0}Lr-VRK(aa}eM%AYLX8tZe?} zUXp-NwM6A`Mwd!_dY~>;tbwz!PBcq4T0vO-U|9h_3)EogJ{clO=B1lyfrV=|R~ekl zXw4(^Zs|Wk2(A=Gh#4|KsAcB}MQB-kH`vc>e|M8$dHanarQ=QmUGz z>!ECRL_dB99+uilUrQ=r10kh(0F|(bt{BuybXw8OG@1`GCWvJB;PZddFQ$aV#N4l# z<{iZ>Qp_>^V#;iuF2#)M7xN{U0d1vX2K9^i!p5vtjQAqA;6Da~_1LwdqOopy3Q8TB`Va1ke4nw_NM7IQ8T0qbqo+AJ`3C6SE<`vDzOSM@ z&&<_fXsT0nDz=C(Fo^Fqm-PO#QWKeKYNAf+lyf?O^Ssd@c?u_VrU^~?g0r|^OskFA zteD^KC}x{tIuzrkiZt10cDqt#pP?XAwqycs%F0x<1ZAIN;TTyVQ}JV4LOH4Y@fJ>{ zg>!-649mljC}Fy)Ie_EX)EbC(mR|y^$UUxyO%oaK9?1ExC*h7gs;BDxbJrA;Ok>U@ zQwq4He*(daN=dNfe1617u8Nr0;&F;v zQm5ZXcHy_hz3D=7lI{;6pmb%Vqj4r$cyGMR_#cs45=4H6x5f3oW3xONgnmLOSte0Z zZ5OFu4#MA4YTjx+lH@(9HlmLy{&q1TURp@S-J9m|?MdxL{JzECEAB~Mfa{#%^OWW{ z-^JbHK5~M|dt}kwJ|9Gl}a@QUv$)^2^icTv&w-^N82@tr}V!u7` z{?dAfDH}DNVZHkV-s`OQkigq!z27n3aQ)^yK5v403VVea;ND*gx$Z0YEN4kTUk>JDW(3s)~LVnWdbJ(*M? z5JbtMq9F=Rm{1(`a$reW0#Ljn5`Z)B;TbCr-x{UiTcvNV;`Kqy>K$E{!%E0^%&F2u zCRshDc5SYr8;qdGGhkzwrs#MHc`s64_<>mz;Gb^%6f`BV=zq@I|CGx=e2zy0A3z-1tB8Y4Q$A3pmf0s7buTt^yT3yHZcshq2rX{N`RnBv&c+ z+&3Y@Ts6^+Bdss_zuPBN<+d;1M6i_z^nl~+H^T++cNwr)8R`W8BX5U+qix_y+1qHg zHq(*6C%ul?I^Ntez!40}#&+OD*$Xo$^LJ2oQC%L||YnfMk=%AMsC5lxt%oie2_n5$ zs{gkx9iBYYJ9AXei6xB2qRdHSQ%j3o$31ftLzL@r7l{J2Qnu@w&eR*_$+x^St4OcD z%nYAj#V5#9)%#@sqMVN1=#9kG36+_Wj!F{hmABGy3|u`aUX}26HKLp39;i*0MEg<$n-)?_7kkf|s&VPr9n{$)=jNw9CwugdF5r<$ZDR2IuP zo#u_ka|H^dSKF06f5-Wqj-L|0NJVAqND^6J4S-|Pu?#HAj!?SJf#uW*PMPBM2+_dU z35u1Jo_!vdT}QubbRW%Xux4o@CQB7k<6pVAdD41ZlsKu^fb~p*CZ)v>=E@^@!2$Y| z-cIp6`+7U$SyY^9Ea~NFRLHMOHRg4@^wDvzZ5vpySCq^qyt+i)xYpMb-ql<^J-y6H ze}kn;B23)+e3-upPJKTW;5uk;zdwJ;4FYN7Cx{7FRpW-A92d02+!apxU@=>-bk^6v zD|2=UtfwhsmlGHRwu$kp^DEW)Pt*7bZ;n}Ozz*UFT_$oeo*yu%m4@5(3u!2Lq`I8H zL}p&K*~}tL%L5aJyE@_?6LD*KVbXZTq*Bf?ctM1+_6^G7kPT&(UJD*p%qOMh9sPxP zsxqDUiMmta{N-n>JXMP)>M-YQVwZ?AB#!z@%O4dM!V21KL#WK{)thiDY+D$HyC_7O zDNgZ#G+kNTjY~C66nFdkT@29$t=QJB$?u2zW%YCAC zC*+=J-Su)eSob)&BjM?)#J3l=lTv#SAWb` zT+2V;T3XM{@T{%O$P2nvW~Q9}omk?GXdiB1wHuHCGtKC>t9S9~wMMYH#2uD#P=U(xK z0|{q-9C^Fq)7Ys34o-oS6LAA9xq^6}f#NJoS67m%`;GFUtB;*HGI>~ldK9x?X}e15 z*6IGCQ)rfzkg3IAL#&?cfwm?nr-%^t)xl28@(NwEQ>BCZ-mbQEC;cr_2p|XiE4IBp zxM)SX`gl!0{*_#VBs(h8IPOyR`eAa@}L1yGc1sND2N`eL(n_ zTwIs^9&l+ z#Gs2~%17OCDZ>mFl=`y7>3Bo(I!hgJk(%Tnh7b~gU;(6P2@|eXT>F4kuDMF{(+e8J zsdw9grAXZa3m_k-<3Zr`{=xExx(60PIapyT?B1#X@NtllZKtfR&(GI)I^+_-Ig@ZYe9EhAD(`9C7sJwu7bm>>a&V}Ex(jyqK|p^0JaYf{dSL5z zmpjeZis2mMNLP0ez}}fR*C5A68rQm{zrlzWYi?%d)Q~mPBOOHuCu?$@D+vp*hes@F zHePCx7(cvdmo__rj^E!nYp8{Wz~0V1O3wTY2@0iT*zJf z9tx%L-^bff`=&1D{tnLGqf*<8r+pJcC7Dx~n`=n9WSgtExx4#rIK`ZK>_txB4{@n(?R9_Q9<$aQ|3sm%Vb@oN<9X>cOd~ zW&F49n6?LdgBN#|W|2btQ3#mvbXDAxtrGlm^99DhrS8M)ZeX2dYsq~ncLKzz30u3y zwl>=Z>Wy&1kM73>BqD;~IvPtacy`|2w5)5aU9hw@lvc+&IL!GkDDBI$4W;>SGyAi; zZ7T2|YRL`?Y<@#5wY)gi?H}O(8*pvIW`Dt1@gvI&As8gtGe3(0Q{l4JT$a>zQG%A}YUq&S=uRtII_V7EWH2RL0E zA~Xq4RYGRKgNYY3Vf)TTR(P6wgzM{gECLsR>-p>aQ9%wvvrAYL7YYwjY`-ijFF6b2 z)zy%qIfc;jjS{x?^qenb_6F$^sV|4MzTzyLR9)?^PHif7yHl$MZCzUCG<3UbQm>b! z-Wb%{fF*!7vy&rJn}(%UmbGxGyY(lr)TU3wk4z)`zXw+KTsUdx_!~goNk5Jo0yvqE z?N<&d$5JaxfW^LAS8GLTQz_oKOhLDEGkBaUse2!Y6fjf;#NDpmSNI`k85w$4hwqVj z`z_~0c|o#sI^Luu3i6!Ou?2r9#W@{fM4{~FbnGr)Ah9_eT3UE1E2lXfdgaJVS?SE_ z7-xJ|CUZKps^hJ4=2sEkBzy(=%jvk#_`}z^osKJvKa{ncjv2-uzQ65s%)uYZRZhok z@&(eA({Yb{`6spY>VV$ zSJ8=EY2t!bOfRuN}?phbIPm}#lGs3~m1SXg|GmRtt@`c4voOw5; zIXsV$$gqXX=4xzF6_oDpM-tt%RShE{cMG|7{qy1PT9b3K1r}_QFWTSjB#(By8TS;j zSo%XaZq-G6t)kR@smXnoDY{8-(rvK2$+?OndZj;U-P1}l?aKv*IS1hY{%YTp0EdMC z4=9#&>uoys4kHH?l1G}Lv)_Gj%@DdIim~SVs99(uJcMUKuP`FSNpb(BziFCzBEfCx zX$2ZxC2|EanU>*8pmrUCUi1onwt%YV@wIhmMBd;@vp(#D-Z7^rV2v`<&g_<=$#NAP zZz(jgZ-BM0zy39&>hD;oQJFSs7?}0eC6fVb7<>8nw_ZYP$XQGgc3V_kS-; za!40B)ebx@{3*PDv!d+@Q0LrjHu)+zJx>i^(8u&M=6(Hq3iw<_nUHcf8xbMv?{mE> zpA}*^oQ7Uzt5rRfC5Lf&Dh2S5-taV4QZs+*L&cE7HXg}ZtY+bs>)DF&#=iqp>Bz>B zeg~EzRRSX|6XpsXTazaPGNFvA6|~h-6(<1JY1?)Y<07T_0R z3C=xMqmis~6=VDo9qYBU!*ezgk?MtS`*KlLC>iDNm7}v({k_Cav@|ju5L|%FJ9JOr zLtUJ`&Wzy3xc}pgd3x*U)XcWAcV4hw8KD36S$|6Z&Dq-8jd5?$Y@>O={AH4%T~I|m zHvJz)z&VbZn9#V{TzdxvUYb>?3SPfqz9a|^)VyU-zyD?ck zuD7(M6G@;@P}c9T1xZtd@t2Q!OF2}UyCo=W30OmOFEGy+C%YOtax;!Xyl$x7YV><2 zdBYF2>yh?uLoblvdVs>e;?l73hD}*R-KVWDsR*_X-|@GzJPMVVp8;EkCdZlex_|i` zo)!)^JF{m%y*i&Gy7SON4``CncDb3=(N|%TF1df-7=$DCnKyYu1{&kYZA%D>Spmn`LAD(|) z9{Xf0g84%p0@2u@oSIXnK1V4hWsc(vjsd@$c#ILQ=a5i96Lnvu*IoaCwiINIb z8~eeshPp2jJyLHJIdj%QgxRa9T4&?b=Zq5Hf8caA!T9^W)V#q7h0@;{*M@|5i#hK< zwN13x6h6~A=YB3{y96*1W-NIwZ_gthtgT{JXV3V*=avdAOALdB?3yzFX&Yx*Ej;Xx zOT=fnmeaD>7I&UuxP}YoD(-sI>fApfjkX;=*Oy^Dj$yt{`Is^4#lU&h3WYr>1R7`D zub)e)L_;Po`_FfZ3AZ{7XrX4CM&~OGQXm&H{1^u!BItN$jtr*&E<}Z(7Z6)ibUv+Q zk?|%BA3ULK@Pu;kHnoD{s+1(t{2>=sEs@)qu+&TxTgW4%a~0>qJW^dlQrmWMX1zcN zlkO>NX!=l3m8U1{wO5B~*IXmrc8KUWug`^e_a>g~@8pxMCz;GUBlXPdyGVaI345On z_)kyA)3gTv?D2+9;`dkle+TjSH$8`+N5a}jBv!kp;;%3G|A+YOFThIuPyEyJUxFX^ zex%L*z{I7M-JF*k>$R)UjxJ<}Ry2QlDY#Hyvq-a@pf(FzQx*r`d_y;UE3f`+#sTyf z+OIP0CwTr*$JLgF7epqwr*MdU4Yl7K?SFf{XoGukz0&(W(_Sx+nP_3sC%UT1b;|rl zBD;=2Ycks#7NaF%I00W!I+}WNRB?!kP^{K=h1gwf>|ijqx9J!9kEdAB=QERfhh6=f*g6iqK@{qYWdgLPSy!QTZrMO{kL1| z&3WM(X18K7Oq2%xZ^2lWR4Ey5!0o_oWtL2Sp?7-J0+&R^Esy9o; z@a!vwhWkovm?9TvSmM>^gIkk9I&(f7R|MA=`0Z)cTYcoZ>=?$stapp)~NJ!KKnD^_I{Bjk>lsc;9 zq<;-cLc3W;Cqoho5}A_v#>_~D?MZ$ja{?DSf)_X3l*vJZG?!-w8z#t=knH|k(}eAN z`*JmIzbTsb7t51m6Ek*3Is>i_rZ_sd1_mKEq*r{vQJ8d~i_N8YrMr#IvBXq@&??rO zN5Kc3lH(Sf9&d<|d_w+euH;!v!9^UT@gHNR2qxwAX6Tc_Ov#0g2`6RSE2c|0z8tiF zW9FQ)pleRaoK}8H=EUKR?k+(O#M~1{`j5JZU^0M2>#hdPRP6I)I6TI(AJO|pKL%dJ zd7t)o)zz@44D)%OL5U|mAFj-zyh&Us8FHo8SJXtwm=Kgyk;t4W1lQwi%ACwCGZTg< z+`&Zpgt!w%`lG;ak!gmV zQ;ro*Pp+bCgPG`@^bz!scXmn4@Uxg1#;lJD4D*BMIYf^2`)m5?Vv$GztoeC_T=vu6 z5a-VmOPCza)+tM&0RMF!K6y|KuFh>(C9})$ePJSJCyH@@9=HeLW}uR3c7it&_+Ri!&}4@uJx9+Kv9tR>4kLh?;ofz8;A?OL3gR z7GJydi9(+73ZF~heUZQ#0>LR)@zPr48d)(G#+Rk%W*SQZ@>AB9_0y*{aXRTgfEabU zXtNe1P1V)8n`OXH%xn=TQ(?7s@-z_1Z+1Rx{)HxgD;2_?LpjIr=B$yqr~jDNtzq5v z1FI~GEdm=3mq|<;4R#67n*yaNBX@NmU;vv|W3HeJ66UZCJ#ENtZ0=?q4KNk^a*cRR zYI(B;hvoMc5Fp|PLo-`po&)3zD=KOrENPg19{_pMiW}OLenMkK5iE>nCB{%fv@h9kJqlH z3z~w|w9i;IiBuK!@zEd9xmXOJTs3^#VMeW#VU*oorcpA z!)A&wK1Qy=oU3^6u7iq-KFhH=_1uu`YXm~Ksb7pNZrFQOEcN=fSbMjsX0R(24DMEYlde5$BoouDA|Dchp-*(KU4pfV zASXS|3=w-HouU5=fWd6j;&!MWPWl)Ua0qZA4Ys(5If_$xs!12Qk)~Lp|MenhjD+78 zByR#2$#gks-KB~tgOw__%dT!YiS#G;)7EL+FGSoD%k)Q*H?2#~E}Y`}xr$*RA!Vds zA*cyV+(m;;mr{y9w&{POZithfPcz27V?a}65%*@4#EPGd758}a3}K9Z66?Py zctkCum*TlJ`=*qWoxJxj9Me7CX<{%BGlO~9Ov_B(i>2OznLJ^b$$=OPYF(JRx(q(E zHP-&NSWv8e1x#oc8~A3d9fDOLI6OB*SDe&q1QtIY%%}bb-DcPNEyJ?XpkjA4>f6lf zz%Yn{eOP{^2E#P_Jy7U0vKPY-0oG9G6x<>^v!fs5!QMZwK@fJF`Jd`!o+aiE4Qzkr(FBBpyklPw$sPsYl)Rw( zjo5bVNzJCSM9f*4z2)_ZL65{9C#KCX%(m>0BlV|!hCUXy&vRX-eVzb7D32#ZCyf6# z^6SsqLj>cBaGKiu?UbWXx!I&pe8^ekely62vkT`K{-ZyHiN?K&^^NWdj5eJPoky{= z?2F5-1ak@@P2OKMuLff=H(%2$bZm)Kn;>lfqj9U)MH8b8kkzz3^H~UZo?Usm*+b`< zq%y(YM3(rW>1q*0u3{m{3fVw$CBgbbOuoIk4Tm%0b^|3>F#(*tQ^$(M#OW9B>?)?p zjyjxL{YkEB;!GJZ=Hb+z+_zh&ao-(r-xs(^@?gO!AeQ%D|BM=FPW3&6Fz*L8*|vh3 z`bQG3>Ih~y`=_e~H7@}ChPq{MV^2Qher?m zW8Y!#)hUlxUyBNLm2*V5GxJ;$5@c)Z9I?ikd7=rKVNX{$BbPeot#Zy~>+?Le_14+Y z>^Ddg-p7W(TkXeE*qpON~(w<&w4q#b1PadyVuadirFX7X= zKdR7VZNfW=W40*3Wixls*GQh{pVw<-7UDjm$rz8ds|*`~F7VTCg#<4Xzg#J$^tb4@H?sO)l}9=x2bq4oU1t8Aiu%??x|u#foQrVSF!HN$iUcw zurr>3!VrU2YVfJhF+_MN)2Z&itAAJ{OrS9a>erd|)sDG}N5Hbz>&&;`VEje_@jDz@ ztfF8^ap7mC_kPXD@S@GsfNDH;2ahl{Zk4ik4lnxIhTHN;e`T+Ry5JRRS4p;yoVN|V z?AammYH_~SAxg?Fr@>A61LV_{fEZ{a`Tx;&Ch$>K*WXWoXh7&hC5l^3HB@j7ik4KU ziDq=rptw+XMNvyz7pfCM1x=h8!t^l^#kF;pw%WQ@MHGPqm7rD&xD>TjTxw?=ZBbhd zF3kJ=o%=kqgg|ZkfBAgK+~+Rmo_o%@=bn4+#=3?rYf$@(%y1f$FJvqK@+_^<{I)B4 zUd#He;KbDZTh^~v>DD{=8(V*Ls8!o(V(a@{e0^G3`YeDh9@aLq1oUFv`%T@668j=5xf1Oy>swSw{15ueto3$&ImUnql>8*vW+*{y zd0I^gX0i3B)l1%6m*?cYZDuJfh;?s0m9^)}TMO$jy0ol+Pk6=O!#5FXy`R4=>+iCd zpYyk6{ThW6mjc6X$VLy?Zn&~j7WHM5WX0In72~1(?#n}uJ>~C*(@u#Mq4)>vBf@u@ zx@Z=1|9#3kxp%|D;>B)Q*oF9r7Z!;_@k%}#yt+CKA@OocDQnUmb0%8t3&hP&jhoFh z7|hCqS?gWUzHKIto8UxmS>Knqy3h^JJTbN(iuR32=&q%qYo4laULsXo8a;~8)fNg9 zy2e5W5}IqF5rm}dOQX9Ha!)t!M996}yfvX4EVaK7s(ct4r!M}8jUKS%16s=CT&ekC zzJuBt3Ax%1Cgf@xMab2*Hz8MBH6d5q_Jmw*6@*-E+z?bnRV{ztg0_w5h^;RTUC%y6 zClNVG*iZ~~)g1jU_0orOSAR6N^P`ITDlK0fz{i3EXfwM2*Wk=cv448(q^BdXFSl$6EqUZ%9p~o61|=gcU;dSR zq4?3VVZ!YHNnQYGa#n;AXA{!PDNLK64JX$?swZrZ9#N6harmnB9!ta}K5+tdlEd<+ zzDHtuJwr10xi(K@9MC@*VV$Y3xQ3XGj7}n?U)o|N+#VDe!zKj8qHG{apgwuq-F&sI z-wdElE9<@1c0Q$2e+$e^N7(z#bRxou_t<+t3>nlt$Mj_!3B`X|K~O_VC~+Byy5nTZ zSZ~fmO5iZdR$loGZ|CH`SD_?I zxD)msa5#JvN(|lrhXHvUK4<8ccgAy|w0}C%)f|kV>D45g-P}SzoY;eDe(F^QedonX zzHTDgVg7aqr*jZ^!G^9Sb7V~2$13s|Cm z#??MqtpO{PIElot_n3x_FgrQohbuEf+C+>ytT~8tGiXcO4^cTTQiX~YP^TWFXCfdD z1Z3#SV`;9e>mUO~$C2R#G6ZEQV`tm8-x4^i^(l?FCc;DR=8^Vt%43Z2_BRpovvEx{0X=)DDh1-rSwPcoZ*dj?LZLmr_Dlz?dY_j2Nv`JTKYNR@OEjKPT z-G|mV&K@vb*O>f!iW7YdB~;B;!e`3akA!z+S$V3T<^R0tAH3k2`g}J*V@+f;ngdB5 z{eo|lRpJ-S#Ol2{>WP{Vc=lPS49214oHB?XB~97Ys=WG_KFwv8dK#Pj5XFeD{jee& zTJj#6CO*AfdUc8Pim8>vOYHVR72NzfcZzs7NHnN;9zTt>3tVR!QKa|4mqwY~i(5|Mp1$f9wG55KmZr zTK@>jkkdc1P8r58{`y7g5*M6X9cA4=bb%|$0I>YX(6VP5zuD{ouCQY3Ibg zE)6BN;))bsvc`JHyNPWmeobDwS_wkP)7TV^huF=)YQZ`^n(^Gme-3J6m!C7Hv`;AU z)Tgjkfa8x7@|70DN=f=2zC(|F%?45ILd>*MhCKTG`G2VoUHc5*ee{AP{p!pwqubVd zkHyhXjO*8RjQEUow7SrB%f7RWA>>c5(D%?S|Dq%hew_JFSp+gkLUFA&hie~breJgn zc1TOo%cp}xtpSS;(AJQ3sY;7lawFNA`q$S!aI^C7TCX=uNS3m)wA3Kw^J7!DUna&K zWw);H7F6{UrlB&%oXF(CH>GH)6peSUzbeddrIFhoIWw9OUly^GSYg^(b7SN zs5fEY@TKUZ9=a&0vqmbmob{e?zw{*(!)SLR);5#snYWdOTEG5SJ@7Du7G1KIkMvRG z^xM@o^Bt_l*qX0HiC0KsERjD^gV>uOE)w7QuC?P?ah!Jr+U2{JtLxWfch@b|JmggM zwZ!D~0PO94Nj9dh34(qr5r?>zVA_8yYYOVh^;vwz{F(xBD`wh~%vxLqHfZ%x)}T6g z=&?6YPtuVE*~Vh%2}~&7;%A?tdn725g-)`(tou*nFD_|yeTzjMe8GNm`x=M9+V$h% zlJuSo?aq%JE2(Ax4L$a*n;T56!dz_CynyqDtkpy-_9|J;fxk5a8rtR!DXEr(FmtdR zsq5b|n+Yd6lyvh59heGT*O@x-dobb-buia|1Cy>kwEN3sW-Q+Qh16!F$fSF%+Geh! zgBm+;rDwSr>mo*Y?m`vYK7U3|r254t@;4MeN!W#!yx?X{$u$jZ^8S3K31)id)OQX5 zXzCx7dh&Z@PoMb--9gk0#a9uF%>D(CIe%AK@;=z8VOC13nJV>9RvM#7>qM$jcv`md z41D3isD!SNK?C@b)YWR~D3m4Hxc^fF$=>YcwOLzz^6B(KHGruT`EeHQZZgH5O{_Ei z>e$JO`_0ctFo7UCfd{A<~(t;BYEh z1%3C-h&N)-^wqPu*FTlMm%4(116f7ee+zWd+rj60@8;?onHKA<@LOPSBxb4z*8G(( zwsnn6?YqipFRtuJy?V03hmG$s=+O2}jyV3}pt_<)n}Pu-Te<6FVhuNBM7oxuL3C?L zGkFTB3Rktx91ZA7Z8^e@TtG|yNav}4GEA{_8SCv(C^e9oWV#w5kf$ZQLkIJ_g=nFGHoNsXj35`ipxaoGP&Ob{MW3y_gzV#zZs1AJ!a~wC`79zCDEDU%Z{algOvYOigly#}Si)+#UmLowSMsg^+H{EoS%`Trc0P+Y1EC~ zYZAdAtXnTQn(+3CP5@F7Bq2S?Ogd)-OFy#OG3ya{@VT$PUrJk-9)Mhm5AhUbG8y;r z7-{J@`3QTb55zbhFjG&)Z;d&`Vt=;fZ(3Dr>C&PaQG>DAV()jiEVFevUCpSikj?D( zc*qFB4zAN^6$4|0wvTIzSD%GC&c5{csl*0^%D$w5H$7DSL$2p(G~KzLABLRi`9Q6Y zv6r7NP?{d3T+;iPc4`_NzVBgjRlx+crCV)z2<^3jZ{@pCAjkf9a38c$ZX>>xOG$#$ zN3aVj@U5H*OwF2`s?)QO-?83R<|jyfz8yT$q90iEK2a4(EOW_)0EfXcZoY{0CG>A2 z7~>ZtE!rJH{6Kp1DR9k^c~#3hM&_j=bGpIB4=&w!(NCl%j6=mmUnl-?@PZ>ZJb=F< zPxgB6TE~mZ`;9Iz;894MbGf&R0^0Mpj|695gpSG6Wp@S+^zYE>^a4bMA$>2hvObQx zLnOd#3)~&)fi#-qLmttH>TbYqsCCzvh7#NHmA)7Vd4EIhJW2-z*w{W!8<=%9H*WLP zKmOtj3U~f#Hymuy@A1CO827Tw&!~Egds${P#*8t28G}OcV{IUke;`#?;fl)_*x;Qu z8les)eu}W#8hj`|-s0z8s}gLF8(;q|@O4&-bOmIg(z@5aP!Jjcuai^IH5B#k3teGIcK7hb)OBFcm*kn9Up96@Ugg z4rI(cIwc646$H+;fM;7f$Z1mye#c9Tvp*jEB#6G|mKJAsg&|L-8eP65y?tC>Z=aIW z+tYt9eRb$aGZN0G-fOBAT2%IaHuOu zeHk(S#3zA3{!O98(T-~}^3xH+&CL(|nq)XMYNw~&3wT7faxh5wt8UFgGqv>@4(=93 za6dZJ!Tq`5t_6pDmAi~|Nf#*T6+^EVdaKHw6rc6m0*aT19^+Ue7!(lvA%kXkx%Yc; zR?KDdazy+2+&iR${D*yA0J(|3`u?!m@ExK0oGVfQKIKf@ndRO^5oqh*3u3k0eOwxZ}Jg6o&mh- z7}xhxtrMHi6;6!|Il*_)=(Vd+WG?+HD@btxu;0; zd)M0-Tshy`am4ua<(3?KTifbgunl5@aZA-gm4PXmOgKs^!LyM><&$j z?fh%M5v)0Y=M`g1x!c8`&w5*i9yq$+@TK+170j-8ZWyt&fsfjimv4s8xjwlPKX653 zZMx}CkM9d?)iFZ9$z=qYsM-G1E9yfJEc53z-h_Uk2PSAw(#pSj1e0!_KUMVlhd3# zMQU$yZm$jq14X7Vv_lf7Z*rV7wn-rB$b|)H8ycBSrFOr~SzWsG5(a+R`Ar#klL{0u z5F@2A7`pdh=eN&Lv2zuhj|0Q71;LUW_SVt)Snwlls zYb_Jo!1lVfQDJN^WV%kxqAb}6*to4g4|~iDtgo|_lk_@ujcnxyzcK4;e1Y{9DL_21 zz8dn@S7XllT7UNj_~-X$o3w}GzZYIPS)BK4#_5-BKyM}?u!nBBtI!@|GW{fw8{1~u zFz}~%d%twO=w_>+>{0e;`e;Xrjul7{ z3YWdIugKdVJUXncuJssqJsKSzzLFPW={XVH&~*)^OZPagx{0J68l|45fImP18dro4`}RjXYd&ACqrRLE zE|QK}wfIw+V(KR-lxL)(l$z& zo{N|CPzWXNRu{i`cIXQfD$p3`v_H`w8U^69L+|UEUU1%SmU3zw^iKr+3<4pxTpMcY zLR(FA-Hfui$!`6r*!3p^QTBqLiDAh`jUIyq-Q0!=^>>@R^V?(AzB|=PZ881F1vBps zMv-G#tGIxYW)-1)q61==MY(LIRh9S^;+JCJ8l(KVjhJ>si0uU>rP#emESBqmO;5zF zeg(4AXm$|Z%A6kD^qHZ_l-9}5!j@wYG*dxIUxrI`Y9#d(FLZjxMhTmvmH39#1H>Wh zjp?Jrc(XSGv^5WA7rl;9aXU-uU)P((cLoP|o@2+OdTE`X)!*PfqJu5UEbP*A-&P08 zUMFKP`2`+xM&0XH^P#ozx2?UO-A;Sib|P$2!vieJL@J%-lCt_lM<28S@66li)CQ#T zc#7HHd;{_jUqx?w$=-73b!8u+$np2J$a|Dv%Jh6wwU02zopN_!At2v}GkKfMcQwF% zyNyfeRLk#x9BqrV$wsGNlUo12eb&pjV&w)-MG+=8XGU?MIAH{o)B`>#v(-1QC4y)kilK z+iQCb1bkqxy;5wiF=Q3mYlD&M%=lou^||RE5}bVtlSq+grj9l`X7H+$V}E`B4$HPg zC+f157ea`%uHdtN8d!kyEuxNNXC?A0gu$~{kMb!dR(KY+1ReBY0|y6Hvz8K8McXux zgtzOcZ&~9`=yaHu`y$KR3}}RiRe(8?rrKZF>?3hIHKW)TKFZ1O2gGaL24~(~Y){}%YMSPr8N@_cvk|1U;JCP7@x zWP?R%_mT>zO44$2E+=OxyY=+ychRk8f2XH6Xy(W`Qv-{#SGe)oSQd((Zt`vbbIhtS zW<3Lz-XVice}29?vfXE?hp&bLu@~3ln~l5ip{$v$OhND7v{f=+94MT6VMQ`krXOp# z@L{M^*rYi>bIG48Z2+O-?hpqiPJQfitjJ5IjQ5;&%CqCqU(lIPV|zzW`m-N7fN#Exvu=b3xB6P>N7of=q5Wq9 z#&@P?UoF-`cvmiBBCuGUmF-iv>5cj1 z`T`41hTtgKb2@OZ#pwpM#IK-Wc((VY>6IfWa91VUv?Ck=xML`o9R2=5TiL)bK@hSr|AyBnyo=L5_{d>T1?C^q0euXezTSD zLN6(pNFfJv=f~hj5^5RTpe)NQySW7yV)k@UW2}PyJ^Ms=iM{_; zzK+sDIKlYc^8Q=d&$IbVe+FSS+bUyRm3-G&vP94|I9vH^YO`iFh1kbZw)4+nugW}y zJX;I>E7OGB9-QM9avjl^9ab;shUz`!G4SXAWq(U@gWO`%%T}M9A5}r6pfIYa zs_=da_%((0SvIxb-Br;;nSbOc+L5i?nuf5wZGVrf?((GE1VlHXE|X)SD$L6vhB$`k zbHF~Qok8*Qtfl!h$)zu~bjX1}r|dfIO{$%xH66Q0H+K&|#s}k9i_}5knK8+g9T=0{ zt$j=Ib=|erlRH`T{$VUw^nNmdb#;HH*0lb%PUpzcp~7#Q6v~AYZyN0QqA; z+G>yVk9Dnr`*Di=6)hjmLTuT#F#lfBcg#M3#K0`4f> zT?wV}CwP1iez6m_idOKg)dXT+$?&)qyCQCxwmI3@=kc`ZLG1PM$I6J>fw-o%wAuL2 zyMNbEjjW0*1-7m7ixzOv6t%zVPjs-55&enokLheBq3smhE8hMuP)M^@98njPEg-?B zu#riZ_4Bsi$k3BB^lGDS#YOd0H$OS%MqrmVZJ#=Cfo+sy(^{9?$<0zwYFbF+*i+o0 z`DW9TATBNo@o7$>Ite$WdH~hZNk`FTM<-Ws2|b&#pX>Z!=at>1&#nBT*3N)7mDkRl zhd4xQ8L-*%$Hg-_2st-4cn2p&PrBM*jszwi#T{vM&$+Np%LVp7@`xy1Zn^MTr1z91 zkryAj@`t*~MmMB<5<0Q$zL|CmrYgH5r!^Yc!kMkymo})!kFxbtWc%^o>P-X3Ud&c| zxiLF*4^epMlHB=EHZU+!_%|v-HP&T7oDpG=V98+tign11ojhWT0r!;wLLRhlsqeE7 zo?h^Ye@Ei;xu`vf%3s1~_@5IR{G2H~4%q5IZ}N7?X=hdfM^3h~y4|R@CsczT?}Li9 z>8ad&vM%C1?iLtXAmqF&`6FEG7GEeS^Y)*vZyRA>r43us292$nXgRsm@pu65q$c>| z>l;Pm>mCA-V=SHLnmL;qU*97;-Q|WJ2W0(sDeU-GT;5{%G5j8NM%y%;XTHY~V-%Z}y+uT~SI&;f<2WuYbTHEahR*|k&tv~CZpBDmLMuI#3%l_2XQ4)DM zn8_<(me;k8)6T6$r=1IA_tx2kvO5t3)4xF=ksEh5SKaz2rk`}BUBS_0^y4V21O1dg zmyG*XoU)8@b!#9tIcgjt9_eTpFb$x9sJYNKIb6OtCV)>xlKnqhycmX^g|9W zZN#Y&|G`{RjF~xR!Efw2zJz8}@+;hp{qD`Rb1iJuv30+I(Ma{aDw+F6UEb7IxRNR= zjxtq97nwTvjyO=+g_qb4m@_-Uf(Aoe}RwLZQKQl z$u?X$ledYr>1t~o^Znk=FZ1LXnld=QJc{*pnjj)G_=(8NIn&|JFzSH9N!|=wLx0%L!2m==TD{ml%lV&@tT%9M zlvi7yO>O%y?Mie+l82OY7Ql`9nC8+?W^c57pdDV3?g4|wl_z5o4tNTtrNZ*%m7LN!Q%zeNKb#iN^{+`QH2Fl- zRL;o;WD|4Il*UE@2EMyc?Pn1*lY!&kM zy8JGYVE=zWN)*A37TKl&G^SuW3}NU~N^xB& z@UtLSJ~e$E`F(ro!lVfIgb^&?u2hLL676tySzF8$|J}|#eXKL zT}kCinrTTjE~&FXg53TOT3hz+9bD|e8r!Uq+lznC+eXO*9ORkUQABy|qxmbxShY9n zTtuX0T_1L_xcyu$>-rLI8nF0V?DEO%fm|Za#|8QNkrutUWnCG+i_6K;bV*L1+dVz3 zFwp!nxXqhW2i{&2%1uPAI;3JCO|?H3XK zC#lxtOATCj+E#fn1+wjR08$s*+T8b!>Go2X)1NJ5!YcrKx8==7^LHQ>h3dXgKa8it zWp7g*tg`CzWU83~1orjngpXfWC?10q+G8EBo~G(`PT0o7YT{`N$NkYMP@AI%$+eg| z6XBgS|FDmKaiGRco7ji8#9qm+FfB8PI~ikkbD*q6j`-g*+D6gsg}$Uy52sA{a6K1M z6yyWDJ1+6tw{RVTo!XZynTKW1L#_R3iB|!=4{Di)8Ms+V(>6Y5M5coI8*tthoNtLv z|HuS%%JDC_-}Gfm%DkH!AHC#q#7k=_L#XeWTg8yOX&gHZm7srA)zg8M=jU8XM7$f< zst9Y_Opiu#+l77cj%$Q>UcjEG-9Hw>ETvB3;y(D5V`J`|PjgCz5QF#FCYfBC=_-)M zyuZcUe-|p07i{*t?dVMN@a0Eh)5e!1Z(ISo?9-lEvX{x_PAHMpfVBOCQiF(8f&4qw zn4?dz`{@Vh5t!Q5^pMS&oKI@FMMiH8n6GI97gDCMvzSe`GXL8XZ&dg#dqBaB&o#jv zg7gy4>ky$`y{Pr2R}|{w;`$d~qWYVjFR1-@78PB9lCENJXfYBu_(=Q$U+tcd@EUKf zsF7Up`RY()X<}7!StyR8F4+H`yAG_LewIw#x5OtcQH$?1S;R8MD6xhuA_PS`w`cOtSHAM0NUz zmwC1UO6s^N$WNk!v5E`)R3H>*2$M3~L&+1A?0ME?T@pn+hq00L*zM#^9Z;BDxr0kl z?@8@gl(Mx;5lK^hicn2_V>J*c$?3}KR5Z&ZWvKM!Y z88$bF|7DRjqIWLIUFO)&Q&9{y>zZ{76Q8X;OXpKA*vbQ3M1^Gmq0%|*Tjpr zxLsRPWXZ+7@8Z@L#(m`Cx(nkzbaAWmao)o!1b5cxM>lz#1-b!dojOJ?eH4o0wu8rm zx`2>c$)ae)FPZo(87oJBa1%gTuLZU2PQg1>@Wvhm2Uovn}8??C!9^9kgFe}*5d%;It!M}r0s(+7y8V9&X zruTw3yMoWD;EL77#OY@Rx9Yv1J-FRAXr2n*(4*jBE4WTd&hHEbBKI7fiZ(W@;Atwz zwvyPMl+YG1>m!;$%$4>?>uFa^xdxS}Y`G{tGaHg-bx|UDj3k9^d18^B&(n?(uzsU>1~rqR02odVF6lw@N|&{d;_W z*dza4d}I7b3+Nw?lU&2@0TnY&(!vlJX-?P1@>+NNiuH!u$ z;=ekV{$;P}SLM=mIn;*bYjWu=U*yu;D(in1(EEmStv^(8@(|m#Kf0Ou%#9|7Yz<1! znkanXHyN@J#-}>lte~b_H05Bby{J)Be$1OxI_;nd(TGmu!L!#b@`I5Bq|$ zFVPDP-t{*44Ci??3M%@aL<4m%G-HV9+x_T|h}MBYq8{|4K2S~&Uc#MTYYe1H6cn=8PwFZC{md%(9@ZRF2XP$^qs~KSp)itSu zxG6XB5w=5Ub}i&-oWyx|gUf4h5u7kQp@NkXAX2|1pz`=5+xFB=bS3F+iL(BwM_}q4 zGa?wrHd*6>Bw@ieujJCtSH<3`=I#j!C6~6mQ&)je{6!u2d=tT&@slB$wjhaQk_5dg zTYBKpqe6+hhz-qete(m}UGur3h6SBYeP2Z29mSl>q7JVLod2w1^-#iV3b0~zCW{>B za_YpMA+*%xIMUWgxko=9Ew5$OWt*u|c#j$OBK8@Rh zcXjCYq7`QnZZk))sxMEC`{1!iea8W~LVF|Wa z1KRR5)}})7yIh`ai}Um=%5#Iuvt4nX@}fKvWa`?XIM2W!k8$7%muHvaJQYD6!{;KG zXV>C9gNyR~$mRKVah@SXc}{kDb}r7dV^N+Fj-pk?d8&%?9P9FYr#Me_QJzwlr@A-~ zdqN86{4XXZ&}XZ{Jj{=SuF~M$DurtYvx+>U>pNJSF?4L z^3GBuGE#5>*cWL)B%v1LN9~X>&zV01DJQrW2_&FHt7T7ns zI8ej-_0@3V5{5r-whX&1X1lq{}-|%Pe#LvC-vQ}02Rurx` zQx>bf+WBhm7Rzi!A^8sha&KRJDhUDO8oY^vbzHw+eQY_;ojT|?4taG|Fn)LMep$QP z(z%?mZA@uJUwZy)NXeK!7JQJPo{L!CFBm1gvD%+>5TYVuT^CgBhf8?8%AGFhT31TA zY3InK>!8>}0cBby>&6cqZii4Q8@qG-oMPB2uA%e^A{fuqn%g|eLcdG?? zJgy}#Pp(-xhXe*dk38SKT6tWBKB6}QA+QSK)){C7Tlt*H;5STB_<(pU#myKnp=4aK z(?gtWE9>}B^PBze;c{9zl2b&raY=~6cyssvSmDsQ&sZ9Zrzf~lyBB2HTSW`9OeTvJ zH(!uaC^t>zTAr5a=R8C8r`nDRSwuoHeHPcn6m>H7a&FF2YFEj7- zCA?gYa=6!xH~3>G2mS9Kc-Tm}hV1ilk%wD$JG4an^dASAmy%iiZGcNZiBXL7M{?X*76)whVOS7)!!`GI)R+vnpx>h-!pXdjspoO{1sjwwOxxdgW%hRzB(CPjUJ8&S5#v#a}9uzpe6o#oMCLSDZyu>t5y;cv|#XscYuXgdf=HhR1@wX5U$H!@moM~g^ zOgBdU^Pft{gaZ4|M(oQcw~UTyux$FM!MoNplwG`X3{*%jrG!6z7LW3s^aaWtta543 zDYj&|CjAG8(-p$Wc;19}17v>XI5zd{fCC?$3iwHYhXW7gav$b$>uO@+M@X0r$`Wz| z<#fgOHmrb+KQg4}iK|}OuPCd@p9Y;L5X2JOn~)yqQslA-y{w8C z3UY%N8OSZ><hMEAFp<9Tlv9eY@rH!SqRO)M(nBwJ@8O$Fvp}=-nn&X6s;o zTOs7Mn(!3sUjx)BU$&@BCRh)9+I_zP4WGreoqP@tLN^Obk?idYSU?46T-3JxAys6W zN2VtxFb-&a(PhoHcaRg$GF36Mv*q!%)QqPwmO2VInG~#M$$yqxFF6)~Ete1Lr?(4P zEa6>S?{*DLjJ6f2WyeGAu9zb9FXGL;(b86Fv2M2T%wFHF%DZAp@bk!05+?aPH@ZAZ zusl%2t{bQ;X?eVYNLjm0iOg;J^|2g&4__(C%B{DJJV638D?o7w<9Mn@SYknJ;ja|g zJ`a|Fj4H3WNG-nQp`VjmkKg~;V)!o$R_$u+DX-ny>YB-l(t`qLhyzReA%;tMG>Ev^ z^tu<<%ZcRpmBas2Y)VSsZOFhfqnef}*^Xe;Z1gTP9Lk(yaU;7y_YCj$4WZ7&j-r=({H94qQPN#Gg<9wzXyl736z zZUycj5K{3Q2`o|ZIRti6U?zck)woL?G97A&woemZEhIG-6aJ;bC#|>e9EA@j-lv~i z@73btm26kTVm(tDIkqr@)X+T}f|WQXNBXK0kD^PMbg?IlMXjt%4NjunppDas4%WL=P4tb>{B2?%oXb4{r0WWt&d{|>L-SW9=ax(Q z+bUlL>C}}f zDJ4ZIH@K8(N_qd7LfmG#lxC&;p(y1FmoiH!zb{Ie>QbZ>Q#Tf+T;Niq3R9OCrJUvl)II3cu|T*G^it`l&UF8X>=(Il(J1xO1(?bXp#ErXj&RuD7p*Ia(;QA zQR*SKP$cK8(|w7|d_C;GL|VRn<-SBtzI0C=wj;}^9re=*}%!~I|ssgBHy-TFglDjTV zP?trw%+6IC#z>Qf;t3?xn_ESMrW~khHV9Yit0Rghc~BUNv6MmtA4fa#t>DX#OU71P zlD1^e9fcCdsUBO)sK~oBLV_z5*hKTND^W+f=P8w;nsse$@ueWd*EpK`<#H_odN%g! z?z}s}rLis6&nlQ(VwFzeFAYn~1h{KPen7aM{Pd^gLUK~SEA7U2t@rTmFsj8(c8)Kq zL!=PP7`#=h{ssNg2LES|LPWZ(Yb(tR8DCM<32r~ZwW(dUOLk7?E$R!+|6ytDgCBgE+MHlHC7Yh+l-)uyXI4^bmoo`0C$Lg6 zO9-?mFrUC%7-5G6bv@fqN`3r9-WDs& zpNtJOh#Qoercw(xTB@S=QfI1EVxy%7_g<<&rHE>n|SFGk=q85+Jc#?b&lyynPCRM+sCZ=3W9BBq$>!pU&lBO+r&1pt?DU+PG zhPqOgg=)fHc+apD|M{Jm0ZA<(tei)bb46WS;L@9-b%K1kNKY&tN}~-y&QZ z^XBzbCGk{_C1%QYaO**92gcl@Uj=^#qx1STL%i=*{y(we&bRLi{|F?srNpQ$L< zFM3yDp1PtuFuf+>@`VzoqT>TR#8@e)&>VFc-|ja4*|G(%G{OIf=d;>#tWL!q^w%a|N zi#YLST6QB4$0ty6me62cg+B^7Know6<1n}Z~Uy-jUTlyyQ&W^ZZi8+B*p(KLw z`tjQ-pC@AiwasOZH^}tM+e|a*xB%Lb!9J(0eZE3kDI23>(=O1&yjuPP0HPDlF3t28 zP$!l04%uxQ^|U-Q+8b9LtkCd8MzBING`GIdOc8&PM!_4a0&kk^HLYOy!Z|LvHc%oy zgS17TMGTcv(A2>vG;0=ByNQ?-WhlO$zhftbM@m{&AuVGkwQBs=r(6K_3*e2r{KuMw zU&&}dn*`e06!s>J!ZX>lgJ1PbwV5R*t308~C#|S&xnfvf97ovaR0!f=pM0%%T){FT ztfawoHN6zR-l_6hWfBXs6TxhZ&LxE6M+(X84Xbm_y3v~T7=I}sqa>1icottcIezAM z9~fDIL}+$9o<>+;iUL}Ad50PCP=-mYi9PvIR0jqA&gjZ&%+q#CSCO6i^MNhU3`hC1 z4+BcEXUh4TKyn`&S;`i%z!Pa3vkkY(tHg4^pTw$>!d=Ag?T1$Yi8BbX6F`+Ltp!fOxZ0Y+fTb;g(`@;EZXj^3?6e;Kn+NEMrq-k89(DSLHX-DY~il{!$eYI7N zB9QBUHZgdAp<5MM%&*gTj}XFN%R^#di(L@*=Ew%LQ!ooLkQl;8JD+ni?R$&WNHiVN zacyU@GMGbN^YgV;o`Xf>^uSaH{JGxx`mfQW;CQ!$pkRLFg)7jbcDyLj;e~hPX_C@s z$Ji1`{I@uRdoIviZw}%*VEBubLbISV^A)(pM_}CL^wkFR-o@c_Tn~J(BQQSn*xKR0 zbpuDD%PpMzM9tJ%Q~Fo5uUJ|7tG2=gJ_QI|4z6_V5vs!P+6BJ(F93_(xA15&O{6&^ zN5_4z>|xTC&S0HQj?7zRU8BIO1RhY}1p=QPL*Quw6BJlT;93QKPvBGKxre}Sk0Wq9 zflH1ha07ug3d|;On356%J|o7CEfBs-;q#nPx*lkDJb?JE09fFUYO7416!85Dv^)5& za@O>ltcN>%2?X*;`|ePFTLd`D<-gPAzdy)7MR@(1s;U&Yg}~)%&9wvuDc}(}SIxST z!0!m8FLr2duCUG%6Tc8>>C;@eRbgB(innGR>cTguiGKb`D(mN8Qjq^5h5h^oDg1p` zc0ZA1y(FNm>;{E7)2Z;63cv5dA%zFH{1+AE|5HK!SV8_8gi4PS1`9#ST~gr zHhkv0aO+VbtfWnH^RpA7t1YH}{+RK>yrV(o^<_vX{YMov~|#J9HZBO=nqsnUtUZY~^Rv zjDF88WA(vJ!bupfMRtQ6{**Po2J2PTR^CrRUk7jpI=JuW`j$va8+oQ57)k(!Spw%m*f z2$&!+AdlANT-w0N)mwJ`v|$IHSKiawm{1mv_ z*dr7SB`&1H1ow0!wA&pY^y{5LPltIa8pwyOZ_)mpwwA-Ot?WBw#s$0vN;23m-_9NF zi4G1{8C>x=ImkmNtil?5CAa3uKgaV?;jZSD*=tZ2}W;|nU0$} zNJ*Zjcw*nN*jS3M)X<`ocU{U_rEF1@@}^4><5M4_S@Y0dbtxiU>Q6-}D_u&tQWh1Z z{K2JEDCI7t#1;x0aE(b?qx%w}s!DJw#}*27NtE~Y77A2JbTA4Ua0Qixy~vK?zL6pPui}4o%l9==iqN%^JX>CkBt^Vq*vP(0yVkFf zNH%&qeRLLjC$i%>Z?2dz4zEf48kkIqc*GvIDs${;QX>aX9d@67Bg_VvEwcBwk-fk8 zcTT7Xdv${w_!!-Qlr@CnTLXsK7UkKAM|jB~iYy+;AB-isgT1v-zmU!GbPf59IIYRF zcb|j18saqv@U6`Tz`Bat#Oel)o%DPc!S4}2cG9yJ9hvmU#!1V(x(e}TC^c9~GJxP! z!WS9TEh1U`Or}D_#JJsqf?f0_)NI@TMSlEw!kAZo zm!)uuuuS<@_cBiqd)!JMJQ#E}{EtAp@k>I)Fa8#P95EgjiRu@MM)eCsm~=|Ri`nCJ zzeKwdXz=6Z%*-NeIOEOFy1~h%_1u~U_xxt22N#a7)Hgh+FEg9|%zhHAJ7;gb^?)h) zmZt**E&Xk*AZNEPv2?91t3RKZEB6~0zs$vNmy4g{;_okz=iK^d*_Gx4{*%Xh&dVB; z&r8&9qd{)G>#OnZBI#vxmm3LC)ZBKBzSLb|4N9uZwyS5NIWN(@)SF31x?8Cgtjj<3 z(4eOto~#zVO4xTnB2e9)b}9|IyU1sI zZSOVmUL^m=_DcW!e`)V~OgQr^M;o^HX-l{EK7_&N+WUZuENriDzx)dR71}SxPuVXv zsP|&OoE6kB%cj77X>?WO?2GKs-yh74746xfcly8A_`g^BzZd$yXZpX#_`e7Hzx(>X zyZFCb`M-Vq-%siVKd_gwLs!{1=dk_X-}}FJ`oGuszgPOdKkiU%Z9_XKH%s;eh@sHsh;o)6lN)SLzR$ezEXZxFWx58_!1QveD#9vNgA# z)?u68BR(1K&I~4H7gD?l-Ac-?h@ID68k^BwqH9UY*y_XX&Cb-jUw>`pM@_EYDzEJM z-Q0aMU1f_1p_WThKO(=6U%L=oAUsZe$}Gqn!pU(J$q7Rmli@1rz8H^%$A+3dT;%MP zP3EhMH3qiW%ox%qx+{N6b>G3bDsKYDVz|QYdg)A^fj(WJ8Z(E2VR*7Io&=%!}%=QU@yRhhBt9WYeq#wXgG99jvEig z<3*UwrcOzAMA`-?$Vt|+$C+4p{rceN!~aUymYIp4!Q@Ls$c*D-PViC3$6dh(R%qG$ z;N!b|tPDQ3&H*Iv*FOWrN?qjOG)vE1G-7hS? zQ~j(H$?9+h!PP?R{c@&kwPdpTsd1wC?f0nkmEwUn=sQEX+${W6WD*w{tcY~3>7(na zI@k1#lrE2WbwiM=iVJ^8zID}`$QK#HL!p1#NRGzR4j@%w7(q0k0x@*u!E_Ak(q(s3 ztTy~r=qir=VwZ;5aoCyqE}g>UJ~m^m-4decrL=wNks^6#im42l2Tj^Yod_9vfTEexLfnW49AAL>j;3xjvGV6k+B0ZmXqyYyKJPXE*nC3 zFdb>oiyO;Ux?KIno3NU8b$H{t$06Ce@Je*~*=W%PZN5Dp^A6O9wRyPTW*yq~$}R&m zrG7ICwC{7RI&|ZBXw>ILa+mMh8_(C^sXEKq(+D&$muZV!rs#EpQ@e5yXA7Ru#=M`T;I6k3)3Pr zreh#gtxf;T6`~()fWul0jRLeJmi(uB{aDY#H)p+UK3FNx`h*3lqElntRiV~#@ zMY=1ZqtdqscUlsf&(5U|z%1Zv!O&(!OWtmG=?fx*taYTP+g#L|&|@W`$MQu=nypAQ zDgb0Bzrba9bs)S3tP65-vR7uoILj&Y1Unn)FG4TK9H0WOO$#!+N4ml`P7x`+0+L3% zp@%!#osF-tJ=3pUlH-jN@&y#aWwbmp+@KGIW;56x0{GW>FE#>)pyo#KUG zw($yKf9V|!EwNN77%eAWA@>}1E-@MI0N?_~hUL=-_@R!=_fSQR#*}euC}^`slm?cQ z;eE?khxdIPcc75pB={7a#Gk26@F>a)Q&m%c8tX2PZZ0NmpE;eJlC=fykwD4XwRED) zVflC@1|jH7O;`Lr`S?IK&rR9@Nq zo$TCTLPnEUb~j(9)@~z^x&(L_z`c&jv5rurGu4j~x?k#jozWluw6>;HA?~?^NV4n* z;B=LZ<*)vspe|)P6!FTo){kVqYerryFMkXlHm5t7Ldor&VxhvR*{gV{5oia)0g zqhJX@C2&7L+a$v)Xz396x*97gtUTd03dvFmrxcRqB-^!e`HqxpqX668Ryo}?K1Y!% z(@mz+%5GCe3wEYn>WlT_CJ8xXlR%;U?JIo-Gd0x;dYIw5^|NZsA%`4RWy%X>!;z_e zu{EXWtpN+vMba-}b?5SrxxS>NBh=bP+?cBfUUbNqofH#a0#p}louWjTL+mNJVCzW= zA~MaP_@N52^w|;JKdmjNWo!&QNbl6QYRf6THgEgZ9+tO#_abbzuXSH@tE6iC$()07 zSU{ysJ6K(^SQ?1~_V(05=Z3#oS?=muadWjQH9A=63@ z?3DpnWy*ogoDaQek`G!PAk|s~dRK?BrJP5K=3&vY)F+%8@@vWLWtd053lG*ZRWAg; zRgh;uduDTAB{3n?$yBfG*6p1HGE=X-Mqwu`*;Rh0v9YsITypZ_%d=N@7sU_+e_C|e z6GN!omTgm8_Avr!Fl6Gmxk*aqB&z84CR8L5BgQq2waM|CdhMs`Z9IQ%U$g7H%JB{( zM!)eccd~4ZqT0$f%VpkiKV^2RxNxMe0YU6e@|l!^R7v_~{!sYY?Sh&va5d!wzU)m) z3a`y@2t*ni1H0(}hyRv}!TPRLEk0bW^zOd9M=aN5eY%$0d;griW|-WJ95?og`1zA*Zo=vrlS-LiR>6&eh2 zBBEx16C^kMP#`Ws?IOWg^)Lv|8xj!d34`F&Ox}{Jk(%)hDNT6USq*h9)cTwY^$Eov z3PP}yKAA9+eciC>fAm7PN^;Kr42W&7nSthWlg%EH8`V`OHek15u*27=t*PKy|z`KI_UcA}#)+%@m*cb}anQQW>WVSVFFDGmNX+@INnnA`sa; zVs&|EGYWuwiwbWIhf?uE#C^>MK($u-Ha#>UMz3F9tk(wf31 z4HvE`lL~(+0l*hgKY)8_g@6RD=n7gpbnW6I=PZ6cg&b+==r0Iu~hErRD$x$F}&+47V;vk2xiivKs1 z#88yrRu6?;RoEt5p%Up6KE8ljmQ-E%T znmcIURz1pHSyYZGfA94F6WOdU#b%^(-a1y{MUCpu^#+3`6tSoC3^&`%E4woygHFuk zKX`8C>hs0-cdk!i82f#Ctz(n*S+7}34N3e6Gv3S9Io+G^UI6Q0htz6_xjL8OEKLPd%y)uLiAO3=%u?C z`nQiB9~o034-JbdLoVDd(vr5XSpSm9UKX;{seg+MXCFrYk`qqMyC%&J2>hL&VvAPr zXWS6pTfq&rHx~Le9kI2gaptucvaEu4q%MFepuM?e$&~452&6}29 z{>Jcxn(zZPg}R}p%-AciYoJf_6?@3ft$6|R07ex#j5gv=gydBcib3n7TE|Hl(rYK6 zE3{2K`K&!^hxjagyksHEq3&+J;62Y?GvCleQ{&~e%r$dhu?qiYNN98QPj{Tvk~(PIrB`Cy19Xca0W z^J_TMQ9%IOf#Is?-uX1sXP*ic-+Jb$SC@*qOIG;P+oF1Xv#=32enWEHu-J@Y+=`9V zavh6r{tgMvZ!SjwnLi*ZdjW5Vn^c3s1o%_|l{Yski^9n1cuhzlenqCi0!48u?zIn; zKnoBzm|&GOwwGIcv*K-;BRnK&nR;3=bv9x_C-PT#mLKxSnDW0|?G>)HMF*8tsIrFk za>PiLIaEt6n5;;@fut%sjjae88}%nkOY>cQ0IH%TE9pMRVZx05fLNa~uJP90DNt9r~Po(@zB^&qcI$RA~l*o#6TzIkbz` z$I{$pHQt1;k`;7iY^_6F1wO2&Xb-TUCDrFMv8@B3c~tJqr_09lJEz|p^J1Z{7R8V1 z3u$NS+<*C^=DgiDI)>uEa4qr~=u~+q9v61!Q&>hq3XjoRB1$sfvly8Sv88=%IeCxpqlVnAy1mtr`BHhr2D8doj@4occxZg`GF9VEsv;*h%4)4K0n-PW<-y- zr8bsH=%W?crb?gKLj>nDh@GCLCRy_}7MXlY8PO-LIg%f+$svG3=u91zL*O+l6Cf`? ztB7bFntZz{pFVRLhPe#z+eopQ%fMzOQ4FtqMK9`2gJG1JtLrzh?wzN~N~y>Uf4l;g zruqnm<}^aougGwoK=W<>1D`gkGyNtr0gMW2Rpvte8Wl=Ap3t-;QoAA&IvCw4^PL7Z zX;?NYBtLZ(tLvCuSS8y0&X08rsm851g(o=;gSAqe0S`k#o(al=TP$M+_ z(JFl=QEmFU?gV}slZlD?@)G)rBojmQ(-j^<0sW$c>{reKz7s5H**}76js6it*V{h| z_`eC0Sw6ZFAG$QTHWcbgq=JaV%YKOJ4GSa6BB58JHYN;FSc`HU5Q!}kiX}~zsT)29 zOvyHUI5*M@2Z2Rfc(t?Q&DSF0O#B3P?!h0YA19}Fwy z#FsxHrpscKC7sc?XVh^yl7A)Ik5Og&3ATKq<5v5z{_64*Ae&5>pL&u0mV{>iRz=&8 zC%=UhzYE8&vn=#-4L%mf2#rF?>%Ap$+xMtAhC6NTJ%ce!K{mu?=G zTx2AVmU~9~Oqo%M`_dv_n>a<)Lwp}@XX@LSnCYu%UF<$#R~qw?oO7}qbpITaZ)6cJ zvl4$Jck?3s9qJBvgyvfuNc;xo!?KWLwV0NkBS6JXeh^f1O>R2iKf-!4KGXE8z>F<2 zX^LK&7b`b5?CNkhvqz-|{tuOYijOvZ29>%RI{#ra#al!3uM-uFHKF*gM_aInJ*2XR zEcOiiTrbQNGQ#mF$BsFw<7;4aC045#|2zvN6MrV$l~|)F{vn*pT_})3laP*BswURm zKh!!;d3-5Kh9?HWA|`5WB~_C&n3<8@b!#ZYb8~C@l&&c4kWZ*rA?zq!o-5^NDXjY9 z$MTV*tE1Z%pzMK%(=Zt}hZ=B3r=U}f-n}W*(xz57^FRF?{uCtc#VO4Iw$XWB(c@#-5;5;~SHcbv^P!Eps9Ui?^WkpN)|cNzEPa4yKVxI%KHJ=Mf7uhOYXZ0;$GW_uipaRx@KKQMT)+%;VB}2O0L;0e1a99v7;Ku{hfn3Sdb$rX;O#~1 z!1bln=k~GOxKkW+vj4W4FL1ux(3rH&q57X76*Qp;>x*6G0nbY_XZPTFW`c{z33R5R zXT*M+?D4m7?_YY0+LDs?Oo6LCj|BGbcQ*;(6ajc{RGv0PecUN(UrjcRbQ=5QQ2agG z?mi}QXJ_u?kWgH1SeJ6XJII;Z?E|zB*8VA}od|W2ZaPjQ)wg@-*Ec>XRO$z(K1{1< zth?WTsNh@wMWg=3zFntlCr~dwji2YWeCirOo^XmyUN)j_Z^a?&k4mxbNm_oqC^{rl z;}mLa4PKJy&dY4jIwZ4AVeCK`du*n!R$_A?eiWK>6-|$I51nyh`i2A4yMA1}#Hu@= zJKJFe+f=Vx8s`p@zYOL}(IpbGT=cP9PMThgVlaosx(^Puevfcb zk4^5C@3AuoTaS%(PYcZ%sAAne4z<~Z8|(L{qT-=83?cs8q4w)f;lxj=p?I9@q!xHn zM0x26H_lbB{Ws&>Zv?dxsE+JIn<)w{66Xo5>6qA>a`^KDwpUfe7Of$&9bpuN_7+A!(9*nq`I6ta{ zGqM_K_9QA`#kV(L#ZPpEHfd9V3@W$>M=YKJT=r+lzfou&(wbhkpX%4}_k?;*?TXM* zOLZLCW*|s_CL*%Ck9}Xitf#vgv_9xUk^`HgP^(QY4rmR4*S^kMk}G4!+O)o|8(K^ z!oK7$?(xUMjsLdCZy}hM0JDckoVcM#;BeEBEUrQbLdF&x)3rwWko9E8MwnM?$wC>x>5`O%5$5N}fZ*Nuf6m7oLlEL$1mspPN+?yH9e< zRLZQk%a85Bo5wN*ya^@l!EKV+r@cp^o&)QZg@u|{^R6U2gVc3$;QR|^z{FgDriQG(|_OR8>j-dxWdiwJCcQsX#=K& z>R0+H`LDFW$MoCu>7Wy6gS$-W{g2w9CS~LjTD@t5BCI#94UQImOZEo8Vr>wHA^)v5 z_!_}W|I}cHSM>0(tX~6X!e7k_zuQ9|x7ko+*nyJoUWEMazDnz1wdO7gCEkJrJ+#H; z|C6@p03cg_3M)oRjt6ZtMiF6q(-`;Wgr!hpgy>67V@%kqph?#mxB`vQ5h-<56e|r= zi9?$~j4_C#1eS-{6%n91jx%Bt-nEq>=mnOYsTQchh!er}7g^tQd=J+CQ+AV+hTUjH`no*>_GLy1dTI24)Pdiu{NIqM^aRS=jhyYwoz(n3J(SokJ&;}W zegU$fM5lI97t$k?xM&X}sVTCyff5rVYmqWL>aV>jvr!c70qv#@#c(^KHmJCW%-|DCn1+#0oNi#iWH^yD8>s>cKKZ(?mAodS6XC3e9|>e0Io z-_5Ev*~=SXYTYI*p{?HTRe7xc?*FblCa?LwQXWbALi%%1DpVeF_omKzjmV8EkD?Ln zzfm3)s`zqMtf>uMX{jm1|514y^_CGv*!8A7im=|a@|dE2`zD)=dMJ;xApL)~9q5qFOqs?HcKXk$1|+IC zi|^b2fsX7zel|4beoj07Gh=Q>fTz=w`);TwOIJl=ALtz3IdXRXpD4@uNNBoxSD*!2 z`5PLH-&9|o@s|Smz$w3>ZMD-bR=agKJ>-)#3|tfaVQOP0*Pq|$Rck)~9k~U~gBnYn zUxFd#BYpyMxKtdKTGLfV&SEwsm*Ow*9R-{F=5r66TOmF@-_I3_ zzfDoAy2d>;xIyt=(HZxR(dkirc~SMzW2u^fVeU(CqI+6XLu`@gZfjHu7>6vXqUpd5 zFe_%p{jXcYGF$h6A&&b|p~SXm)I4v~<$Sudil%KHDs`)z@z#niN$b3~h-1s3Q@66< zKD8zrp|-Qh0UI6!J}Tq(Tr~qiiRB~(dV!$)oq7y7XhFG~mb$g{E0T$uDa*#Pm;uou z=IunVwixe-9uZrl)RO4IJqWqvwF23TR>$sBwt}+3`hHV?AHpTK6$0iacJli%Oqtb+ zP%I?8d-&l=@cr;Nug15eC9>Oz0zK_v?S13Gh2)IH?o+-1CBKdfJBrF9iD~Uy3Mezs zGh+Cwn6djL*g)v5M?6R8V}SeLdqfk+=ElNk(0@XScTsw_Z)uURud;G;-^{LH`~FJITbdLT_z- zQyP=DZnWnwE2+txxpB_D^Xk7@K{=bwlYVK(O>zHLVDye%-M`1q?+tDl=7@kS6|{I~ z5zlK=Z-+Q3cYrwclRxsw@j}~3xMA|iLGodRpmc~}!@S#)SCDq>HMPG>Wy6ygnu^pe z{Wt3g2g0cIJ)Gb7`*$;lYqF#kIdEh|uB_`fCk&ds}##{&FwNE18+e=$+!Xp{taELdxH|twX6V+R`fPO*FUf7g&Yo>C2+KmuC<#a$K2ik z8C^4%1W-iOQJaJ^u6HQ2Dl#KCPSGK~6|y!2{`(_`JSpAhhZ0(fh_sd6F$mK#X?I=quJXo78GfU|hf%$qW(Mmw(E|&H0L_U% zUs=$K=uQrw*mLMCIrUUjBkkf2cIs`qiFeZZ(-plmvG*2uP~<;rK^`4#G1!wSTDk(` z%3-|nSy7OuJ^qHGfr66;8U<&8fA95g@t@Q$FdL=dqbqu$U_JO41y%GLQg9;Doqn-$ zLm}zC1!E!jH>RMq01Eo(1QhgTI~0$a;N^@1NycCPSkSIe{21vglh(7B`?S_oh|v1W z0XbR+c2uta_G(Vdp`rg7=0P9qDZRCYI0n-D4D;Ool=kn@4bsO9{-(5lfxiErwO~Wq zr;jPiRTqkf`GCA^Lkh)fUAV}&MT~qi__`NBbsMrQvu8-p&hd~NdmIR+QszhOmH1nI zbJX+3qU7NP>P6r6WO1%~o$_`9(y&YdGEJKod$T6nq4hT-o|_NXxrR<@vAJ><-)RpW zwqNg4Zt??WeivnHmWk;3vV_GnXp=(!HJFTl;OnM32f5%UngCQIFLQqEO640MZN z&8$AlY^c+>hP+ue{G|_7^0e8!br-_Qoe6BSF9!6)=7>D%4;=qhbw$8kD1>1sg3< zYEk0{6cv=hj0Q_SbVy}72GS3DskME%{jk*fL3^!63N=C6M2o$_Qg0B@snYh0L+hwi zM@4ymzqR%`b7qnt_J8m5f1dY!^E{b#&OUpuz4qE`uf6u(YwvwRY{61bDtCx@2-7ZE z>%|vnX#r{vVd?1ulph*^oF2IRIkd}*`y)O2kW=U(32Uhi{S2#8&~W+94bEeuzfRP08ZS(XYgdD5l^`0rW^15d)g#1pAgN?O!S zLE98rpZBc*LG%i)Z<<_`_ZJB7Cl&Di_~*U6KUR6qtWjg?dmmNC{{pk93w<&Y;#c@+ zj&L|q;c_G$fHqHd@xCtFzcxKW)|#!*?TwGPF$%{>dO(H(Y{)b9XEVZBOoKSd%^kPk;j($?yEG zA47k~f;Bs;QfC64Lm-9=fo0$8pTHB8z+Kl?6%dG>8~Fq#5g-H}#e-F<-U&1gkNJ7! z#ML;yxjd8hBRE<$FtHc+M=XwKsZ4!MvH~A4pk~jp^Jrd<(%2EVs`H47)_FJ<`GOEL zlKWMw@iY7o5Cb{_SHC5@W}^@=lHA___Se7dhi};PS}8SnP~+;?A*HD+hB-`j>-E<| zp%jgW(c^f|s{`IUm{+1e%k)8Rkh8XW9AY`N_jNVgznPLLi<|weC&_}t!MPfSScDaz zvCl%=hv)$Q2N&9Y+Kr4yGGnvQ_C-*F*7nJMg{kAw{S3^T*Pj*MWKiq<@rL104+OqI z%Zp(10o_D=B=dGmsuxt*r*^EEu4J)l`XlR8RM~`COn0NnQ zTtWAu@uqHM#-_?10wo7kWe0KvJ!_NH7pd!yF?m!Ucr-ba^ld67eeRZ;>o`6aTgDrR zI}^-i2-CAH$!#!cLg`Aw=j{hW@0x)9e7p#PC6-Ni+)@q}ciP`yFM-%e*J*F%vzQaO zv#g!b5x}5M`xS}iP~+V*c5P(1?lHgw>=TTQe#5LJiH8D?C6HuAsqf&ci5gv+D+TN) zY>{PH?lg;Q3AlwZ?3zWx$3=*$FSPA0q z22N5V<+;6my&YM)Zg0aO4=i(0>UR8k8DKomOdd`LQpH%#N-LiFTYDa%aSqBqr$cyM z=ty{&(5PO1j$i449SIjZbn4pk#C~Z!;M#KVI zJ^*noXi2pU8psRXku;`RNBZ+hwhO}YuBfFq%{^wDkUQEllX+tR_d^zZ?<9|l?mq2)On ztuHF{s7QebZ4v13AN{4&<<5ZzzK0_E0qJW+3k#I%Q3dyGZU?Jy`b}+nbr2Tboll{_ z<8a=lo@juh40-|-yH&jaILaW&w^nP-)b=t&cg>KiK^2&*aji9AsYFajGR8L})#tz( zoEgrn+$;zEYgz98MP8OLZPtReSa;EKc^Hs{i@hW`hy4H!aK4b7S1zaJ;3%^h7`yST zjM}Pxju%3O+bQhn!6~b;1?8M?DW3{IoqiI*ghp?V=<5+(P9rLx^laerXTvx32!3%O zEj*WK0Q!I*-UmWdWj{n!BbS+)%T0eA7&*(Y_ZuwaS*B24R*-k5blATd4ic?>_uz;d z2uk#Pd+Fosdrk!T7>dsba+r=EoF5;W8+daE2I*Td`~K!Xiy<_do<@0V!l z+ldB~#L;Q?*?rLXHz83UOh55r3M(f6Y6PZF+yy#YElgt2!s($kT#oE^EJ*IUwww8N zqXdl|QVg6YvasNUryZwm-~VdzXVi_6k=xU~9owJV{!bGS{pYUj+cEO!4tGt3xeuh1 z+zT9(KjxSgkg)y317i7bM?+>uLsF=c17v%_+cF>lp_yIRxrw!Fba+McJ>LEc=CSw#5in0Y-{tZ-;Shm|!bDS8D zX-%wGuyEs8xXU@NN%)8J`9gVis-swA-ilT#!mpg%1FKeRL#yTXB$$`HJ78+*b{nsD zQjo${|m?!h!;{MR#xQ>xS(@#~ha!sA{1`i9u@{-^o;3Z6~11X|uPGKAFLI$EAfA7z?{ zHT@Dk|3IFdI=W6{9tr01V7|uT4T!^8SDA4Y4KJr&`_Go})2yUQ`DdJ0DMA?$TR==S zN&E~yM82Qid8bEm-bV8@Wq$KbBp9Jo{gKb5N|kAC-MN^*;@3CCwD&*F=U4Dd8AXu; zE$=}_bCvK7Nrt*9?+omd7g5j2-#-*(n37(H|OGSXPm!ERr`O%=a>1MSIOUbzksi*RgTPS z(Sc>0^2(8PgJ$tLd(A`r>P>$$GV7x{k42!Xe;@mIs!spaq-q+dVgtZYo2U6_Amr`^ z?(6w@jb6uB8co)kUDlfIAFxBisbZ<2ns7!hzWa|u%(;QnI~P?lHYDM6|KSK)bfVyT zJ20NMqT%Tl#gzCE)8L__NpivhUt}l_ zbwCH=YeqC0a2b>ZMv9D5OnGXY^6(^2R6;1MC+UT@|6>QPPvxn@ZsGQL3irYmE!_9M zEEWxj)4~Pr`vRmcuYj+;kt^U!c$NZEKzRbl+%9e9Fu^)-R`U^Xe?*RW+XRcvH#c4^ z5n~<2`YrGj#}ZjONv2US^AT9^k{*dgFwP?LK)sIAx%X^3zZww})*cPi=T1B5#wliy zoZQD0B@!Xyjc^EpQupR(w4W31LN0S{7IK)q2?fEmI3&DpLDD?QIBPa<2!dkoONsa8 z5WaL6j4a)LR4O>kN|k$MSU|%IE$j2XW+8tssu%|XJ0`!kc2T1Z8OUe`6o!Di%nYL6 zhV23~8zDilxr0D~QG;hJ%SVGz)C_)%!pVRvnM=sv2+n(6!$g3O!kN8i&OV~j(&C4IdCD3oC z^>{x;WU!EwE(P#^*B13^WwtKZXDIz*2h|m10d0`c0}Ri+9r&W^O~w?`dL&#lkE#nH zz-cRj!yF>3N`m)&!;k|pz#F%0b7Z0nC&Y>$4qBkMVZ|Ru(msakp1ekf^Yu-m(nA^VKM_yM_5z+rM#jEqjnlb6d%gH};8c%wkCGJ_<_$>a_AWKw z(-CwgwHmGO9`x|29Q8&a<@KlWNNukfx)RY)i$mD$rEbOxiMou$UyYZ-*f(fwDd7Z0 zIuil8g7F?R6ihyk;^!j~K!innIRr1HNbIoJwOtvG4AF!mN#l}$9Q&;laaA;j|24b; z|It#EBZWB0>*2ow{ZI3vKGi3x*2`Utx)dKJD`m&%Tn^2eOt4O4)GH7mya`JhFwYbF zN>A+1F!sp^V0jPc3lCnvi(1J?A;jrhlalwLkSN6Py?3o18GQs3w*SQlf!u|Jctuee znI=Ut(qC1Tom4q`7H%ha!e|A(fE+FSC6pwiA3uYSGPI3eOd^0FIQ$x5r=@V`S)K3;Ow?ixwZI)!K< zQ*~OtLdw$zj2~n^S+sEXxx;m9161AY4W--nLEs|1-uvO0jP36`^qA{H();wfG;h7%YtA{-j1mCcL~ zBH)@oY|^Ob5lnoDZdiL@Np`EHh-&? z1WtDfIzmKP^3jU;@2(#n7L96k>*u6#8?isZ3r?1(6yJ zL27DWOGnd7o@#5HDy))qH&ih1Q0swFV9Jl(FrD@iEH zVa+f^ftTEjO=$U?7o8$2jmoub5U5=X`*Q3)nw<^*90M!hS z4jW-ue#}6@c&;q7kNE+rVqAZBnbB(JN#U?EftF&{3=CXTd;<_+yi9Wm)l^UTJ!Eh7 zq_*k@Mjq27+NB(4XQ#*xMB|MCc4Vf5&G3VB8|4l^^sw^^W{b13aUW{-0^GWGez<5; z?6pC1_dpMBi5V2-Mm}2p)GyiJ`Nm_xp(nilg)REWIr8^j3wORTNO0)YsyHcXTqkaU zjQ6yLE4|5ibQj*e>5-V3+enXaEvfo);02|FZfQK$ezVdY9=+Ka}tTM~gEymr%|lS?SzM|lT`bNa%t-2 zZXfYH2HgFneA!9Y3{nkwp0ZDntG|?{9Q~dM73oO^Lo8X1^c8sXd=8~?ZZK}4W~i&x z%AuCdReG{mw6z!QEFkvyfrv%t(>kY=E0S!2^VkII+4R7GO*4sdiT*Zu z4F&?_g|$ZHwKIJkK6g+n?L5&)yQ|_U(dhCZ(<=2~&Xh@w6heCU#5x8uSqK|phfu5} zQf&uEzqn5F4YmrsF|q?~Kl)w7;BAXZT*<}z6RJy_cfTjiz7ow|gJ69~AJBL%#�F z!b-+F<2~cLoi8Exz)Cjf40gzRyPe(Xu~Nv3Wq(^SQKq%Ub(o+&!u&s&fF)<>qqkwHUhqcs+DL|NB-is-qK%9~M9FcIl{9T6hvogf1z7UiMmgIC`)&OHkxO?;uEJr;K<>&# zDL5|^M@3&j0SUpRE!Sq{+++U8m}W|`o4#k7xuDyO8fdv-6>vub7f#UTI_+iX~YwTZ{=YXsQ`W(4XPz zI8>;v-Qxz5O1ziTQpQC==5+W?Fl{%}5f24>Xbhlxy?MSpKxSvmGASJdxairC#4WCs zT#cu_=eP&}vFzU7q>88s77yxSmZRQgITpshf|0DXClp{*g(*TI2GWRF_9Gr?XKg8P zIQ(%NQs03ejFG^iEr{}xxCg>^5(eCcZ_uWkk8d0nc&hd#3ejA-5FD0Q?VflIypP1b zDuaC$SoV8KmkjImN{Q+bs4TPr+FabQ6t7JdDBVL>eS@Q6>-~y|9mE_0Z+-EEjC0Y${CCky96>%_AJP3a1@+PGqWb2G z-p1E>vl|Vq4B{Xbx?_=JlXnOc=bk}I=NqBU?C5cuS}&vnSb8M6YO>GSmeL^!fl}lq zWA3r^r^tZarR@{UQ>Mt-m@rRacNE%~b8(ojLxI;ieOW}1wu#uI6WEl_<>A^Fx+PX2 zbfZ)-ik!bqa|0D%#85crcBJqA-OUfw^=mx42`Y*4x48s55I5#C2n^w3n4hxAv0cjk zKq@e=Xk+dKrXBG>P|ML0?*g{if)GSG~Z+epR~cuBHugh z_p$s*980bjT5B--6)G}PEMh>$3c~LLVGJJ`yZ$e71Ngx*<;EN>E{jRk2AMa)N`z_2 za(yFaY@C)1g+h%-eu6A?2Dm{qT@phmUu9_i<+*d`VlWwsZ|!`)SO(|N0wgioD+Q_^ zkG;rp@gf@gcp3~Qn8qb1;UGuj(Ny$l7;qOmW+xSbV|H>#CKGXv%1|#l(Z8;XJlJ^) z#tnk1Mw+_bc1%$n&ZgvOA6i!`zoA&SCF+okz&;n}`WixDkfRTDYJ%03MD^R?0IYm^ z#b81~?+v%_g&1&Bgi|wllMrIzAvU@YW(SThF={D@wyKP%35p5WBdeTTIRa9(Z73aa z+>L`QVXS4&^EL0^&*Po;nxbdXpCZcr5TNu$i5VGufjz_SDyudOK3)T-g`8Z5kg0tY zPPsK@bY%g0M3fMW&rW+L4qsq#`Zsx!pHQCaAo`sH73t=8Ke0$pW2Birc7P&Xq4Rb^ z>d!`f3R3BN2I&#jr#Tg@RpQ8|cSUQpXc9pV7iJZSJEHbmBj#9>9i4yFRE!_h$al5ym%-RdlXM$)<3p&j%I6(f5 z+Gr)oOE^5iBYpV;A5QGCIl&)-RWP^9dKVL(m}Ee|Hze@XMWr;@hVmh151Jdl$h31b zyuk7{m*c@$_OQUpW_ceJ-ikJjSqi5ZPs8=iH?Fg_q!kc*w=gOK-!J`RaynMraV$6E zGQWDXsVG+Gra)Sc;4gvL5BX7=W_+;FG_jEgPE zpf}5)|2!8av7n!1Wus>93h4V7X$4c8G%~Zs(8Ww=r4oP^5Lod#`9el;NDV`_b`L9Q zHRqI^mM?2SxTNX}V zFw{)4_B5~=13$Vm^_0lXFzW0Fw4f=G<*S@d@T! zm{CS6b^V%irx-F^Gh~!Y7viJ)oR#Z7Kg6lArl1fbv6RVEPMLzXX}JQ5J@Y~?c78`R z(Cp;dZyO<@Y{kNE%+d65tDT?1foG_X)*&taYuoX>26}3(F%s~MRWlsN-BvI=QO~)Ag-~iV6lhkI z4>*mp5oT@vG|tC!g{wSjiTS`T4Y!WGqpKL&%C5(7H1#k}ciE ze7sN2F|OEAbZj2fWs3TEpiW;&ymK+hgQkFs?C}f%W(SglW{ix0Ubc%n(wKt4mkIkX zU@ZBl2Glx~wB7Iq&z<%NSSi%`^_*nUv3ebsnuCZ~W?0g9>%hZm)Edu)l^`2=8+O3) zhU}>Oov`B;3|Uj}56BKIf;#NLN;345;~#R{PG{*pAMJ2Odj!$`(4Z~gMr5)S{yg85 zrZf1odVSFp^a0RAS(H2pwUz4zEZqEV*_>L=;&j~sIqKi%aCBdeqq+=}^l%rFh!gj8 z&H~S!_LJz{$=y1`+jIjNvX6*Isve*Ur@Y2DT&3wV=0B03;Sz8!8bqSm`p%KU8TrxbF`owG@Vp5g*#)XfUa zORfR2E4<^tVSfgPQzL*WKmKy7%|vhWMxVue>DFZO^<)_GBH*=M%nhUtf!|l^KR9vd zA;|N+B#H0e!gpDWuz$G}+=76SsTkJ$l^I6m>)PhsAEQ2{bTl~K(&>G$ZRwe$CQ9?z z{RFY-K)tlda#o*E#4m~thXl6;p3-QCBgh{e8h8>xI9nAdQTx#-JL@(G=4xz-Tu5HU zSJe{;LrqAheKt$_oZ9&P#-0K8uVQ*sqK<%Cc;^0hWI=u{)Y^eccU6C-lW}cZ`rQ07 zYp9ft6V`1;$+xB9d5z)?WAVat-9Y)IWEkD`?H^VjRH3UOK&fq5_vg$|6myDeaanQ{ zBZ0NeTZ*Omt)so?CD7Ct7RXkccGNGoD6GO;Dn!gA)FZMz6J2;}4zfcAv2HZgwS17f z|2PHeRx$F}QNCkiU9Oho5%Rd39n8tOPjt}W&7J;9t3Dz3YnYa^1pjjG-y;2|^v#`Hb-w=zxnIq+X>@o0a_-M^Z22=_ zD5xklBk|<2*JlyqKEx0?#W0$a?=G8Ar(Dxd&D97+(c}7yK&RYk8lyPMIaY7_MD1pL z9;=@RMUU@`AbitSp7~mgDE+2QVa@^{sKuwu!&H3CDldXEq_HiFMVxd(Kf$qA3hvB! z&T?Yg3HmPtImtg8r8^a;=j4vsiP6JMWFjq=Su)Ayto(RqN<83!11#ggu{$q|h$oBr zISNUI#AQl{oG@ObK1-yjD%octumESMZ_scAq0JZFH5y$jP(tBb><3}_SlF_*51)N0 z8|-lM+KFd4b!B{!>C_eMr}h;`tG!&t;t;0^R}hT!$w~S&EdY8aMn>CmcSxYg3cm@QF@1b4q>24Ik}< zvu{T!m+8=27yqD9x}s1_-^5Qif$p|-#xlgYKW}xb6(6yFA8Xj@YlLUEbw9e}4y@wG ztqrM85UunQp;SgS^MAyIM{!7>(WR;HYS^H==Am1f)N~_$Pj8$pJQ@N%fKFd}I9`jR zWD9Ft8P1h}j4%|)?ADqdWiwBdLwGQEwJ{7iyQn7@p#l+-s8G&Ztf3Mf3!L^Jc#v!o zlVJs$%%xTEV9nuZ=~!bzyH<1M`#h|bqF!hMp#Z3;2Y@PJZN?CT<{kYqv0wHt-+MLr zq!s#W^T+d7*RiON-nxIkS#^$%;&*l2KEi#B8ZYx35%L^7u4hoI&ahqY(v47f?rKG@3taGVAt{c`P=}1-pUWTy9>k?Fw^7j zzfky<%G`D|yJ?vY^7RA?oaehDhx1)W~_%|UDa?jrh z{u3{z<%-F_8iD@}`?o)Kwrd}j&!YZzbAOy4#oZsl%3icpd9F z3bs?zKV*buk3IpdS65!KeIlz$5&D(el5@)*m5#gYG^|~4?9yqsp^BY^fLS?`Q9P!C zru_x=@WrgSo6wC42OjQ#rob*|&X1MB(#3?PXyJY`LyN$nOqc|d){zyRoz=l-Hc?$_ zZczT?YruqHPW=HH$U=EImdnw;(4n1q>JmPlf_TC*O5>sJ9(HNj!|i^5bMQ4hQo#1}6zPKONC}S&=AlczR;GuVJ?@u#96s zbS2b7gxvy}Lu(C6T-ys11UhneIPf@iXtyG5$74IDPD3CN<5#L$nXPHZJOx}9$MwLA z5GJgdWkEb_2f?doVS_jWiq;9sd*Q*>u{fc0a&iq%ZmF<>#Z}qZ0`MONLDbA*u0)|o zm_c}*QL*|X&Jt#07C7HBhVu)77~GKjm{X00x)Uc+M*~-v8$c{7uq+aFqw9 zj5}yXu(JA|$3T6LwiwuG1!I}f1&d!ZLYU5?!ylSz2t0F}Dw-)vmT)mK#L;E4jf8s^p6UwG9!D--2v*zeo!CzzfXI&ZJ))Xl(lM-Gp0` zHkBl%wUt6jO01Ag}qOF6~}Tovt^Bd*0lg#*3I}u zTWjGPu8mLrCnxUYjW|tk+$u>}C9*-VB(Us15EY(Qft5StVNfIjzu>^i7w|^EAMRSE zEaNywF3-fWMS=T%f#)`$BOPdYIMS|l#s(f-nrK;rAmh7pG^Zd?sfiX+s>`;}o}Knv zBl+5nCd>@Jfsg6=NZNr1Ft(YNi`)aGom<5aXjQk2P<8TNZelq)cy!&s#JGRW>J?Nh!&$GHgKa3%6KkH%eRT^c}+ksiK@fNcL z?QyFv)==kLGU&Z)>+V@lcQyu2d~ZD2v7+jja*U=#DzPF=a=dY4&-jkGH5<`kk)3V@ z?>v@|$dX~)S~plg1e$X;MryOj8gZT6_9a^|%z zobXpag{ipO*=QdKDd~~TPYyER!Z)oa@tcvt)CamOOk**hy0|qh9RTF8Woef+jlq)3 z$S$j1igZ>yoAqA+Y@g1>_K@g*^77=w4gNfFefFM?a znotZrNR7yoHp93zPl-vNZ4x9_F&=r;1Fhx6nvW8Rs3JPrZWGGI^Q9249c?L&K?T}- z1`Q?yh`Di95Y@vZ4a}XeFLDxpmmnbr!bk`;REmytsCbfASV)I>^L>HkPm5g6$Kvbb z@>B&|;V1GG#sKDf@-!Py-@ud1Evu!DmU6gdgK!ua32Q!Pi@U6a%!(oqSg~45#T8H} zM*mAx5=P9#vPQzrYzdkO1Spoono#7r*ql1qo=VAlQd&hY9KvA{(xBGyRP6M*5}O-Y z$M9YBi~sA0o5L-wh_!^3imeSm->!UR(~EYb5}=E|ahM5qj zlOVf@DWfF+R|WAG;WaT|yc7KET~-b2(drJ)SPR`NS+C6(IUH4^T9IcvSnHe;nup90I);}940@IkO8^%$NJu~rh| zyacybuna4nmC7+f25FmQ%@3@&*c2GW2>P#i)KMIVTMQ5){L?(4>r%%d$Z+My zR6=fx(OM=ms0I*|^_ycxtfAKz;YRCE$r7bEx|ms$KmJ!9$Zo@zjF160WovCUSE z=C>QYB4%$ysZ-HFi}eDfs|PX4njKjATM6=iXn0WclzDmTwG8*UP_Aqp1uVz>rndS!Y@TY7L?x`Ye|x)5?m(-bd- z$4VwTJcjOxZ*b;F!;W82DsP-8o)p5ava83G9=R-{! z8m#oXbqQ=m)rKB~!#?QnWruiDNZD3l$s)5Ve<(m|hSXRU&_@>1g>V1uk zp-=S0T$P~z=Bx>veU=u*tZA@xr?9yZW8@^F!}vk{+^{j#JJNd#t4e8q`)EY2u%=;R zCzc$o>?#sQfziBNgJdmrsDDVAgF>?ggxmN$&7t}}{fy$E-@XPMEOQcmfDs6$^)F@@ z%`hA=j=%M2lQNuo5B&(4DRn{@uuwsSmo!4ibo4aMR^Efe%9!HDdWv8@y}8OvN-oTh zlMiZ2P?Do&zEIP2a{3U{suN;`zK4;(Y)4Ei2WGds{GIO%w)xZ|M9~(;9;62)tWHVETZ5`K`lRK)H{B=$?a; z7>g@rFgxv9`4n<@b&N2Xn7+?j$`yOCo(5kwD}vB+AQIVKh&qC_B8VA_uub6bP-n2Z z;aY~#CJsKJLx!d}A1NRxlq#%cAhesMdm7YpT_d_s>%bTNGK6vsB5}YEy&1ilYB+jD ztRXTfu&hYTojP9=bLE&8V%)?@M?9B7$&PST;?S5Cfe_yu0w#QRnVzVPUW6UBHaYP^G!1Oq`nV+n`aUdmTuH* zqNXTB-ozpk&_%Q=olWe8jDE1xVS=fhvA|GRg?DKr@;tCoAS|XAYrHHpekbYQ0&nK7?z7{^=}&?jkJp@xVvJIjx95%0hIe9;vdfj$Rh?( z*-%m=7)R=~(=Nrc+HlO3U*CoP#xo2zSRJ!zr0Jv@XJgIH!`Lq;bAaF!9IAqNTMK3; zd_%39IdXTWG+~9`vzZeb-os@)=VQr=2Pf%$?*uk<=p045*=K%4on!NRpmn#_GvD*(qO)c$np4rM!<{@n0lQk~ zl#S-fPhi=PkgPp~p9&tHMT}WgCs#eN#)FJVA&ymnNAVPXW0;Mw*SOIR)M(Vq&>4Gh z76UjmP-^*t{oISzGa0vbGP0~rM<&4P&6K1+12!Y?3z4_1F#&lP&)6rz8FFG~dd^KN zL%8i5<}2KUI8BcF02^h$C|Q6Wh%y!IsxCv>d=MtjVLR32ZI`u|UtDyW=|JqWP4x7T zn6){GxS?2R1KB(l63OQgSLIJov}Qj#bs zM&)FoCbPZ>`@bw)s&FP12L!Fd{-jYywlQkF9=`p+KX_Ou5oTCh0sc5PxZI(9D8-XYILmFP58?5m!-2@HMwcmQ9F8z>2hL5br{C zNNPbgm3{@O(DF-T*&&Tk`j8If-gpIayIL`CR?k7k5#{PI_?lbE;1R`|hIu$qhM6cM z&ygrZS#r1{dmNe|Um?|y6tj&{abRcrZb2H;g>BP**qf~e|EQ z_=crJ8|Bk@j*^Cc?TrU@GbGkBcQ0KJnSdwGvIoFd%@(_#cQ1u0uiU_-d#BwBYJsT- zaeLfKxI?4#9k$TffNVf?ci7Teqv*-hX00MrH?rU!985)^ptZJa+zz5F@O6k!zqeE} zsNHQeI|rdDb8wviur8^o<2#~Oa@!?D*eM!}X0U@u@2 zQ%^lY7O6Ehfr{skjc_LM+JTdwkfKKkT8&2-BqFChcutyxNr=O0H4o6^uUqtUCXxO< ziO`tU%W~WJv^H?>wHOxR&84}Jb;d`8dw_9#5YJFFFMI2UC`afd95TsDvq~RCw3vG*KAtvEhXO^LgK@p8g6tK>H2B?>~#*68`ad zQA}=cXf0;LDmaRBJQjwfNJU@)wy$xMhibi7l;R{vNV*h1q&U!4I_=-6k8BcjVi5_m zJ+va5olFv>KZC>>3?VeH<=hg?it&Df7MDD;M0qRoM>YrIgm*uta6Jm+!Pg1W^VPbO-2N_*25F&-9fkyztM zEY>)C8{igeJZY?PP5v?r1D#*&aWpmR^Ltt~V2ZnkC$^_DPuWsk(!^E8X@i_`rfrp? zZ8?Khhet`0>(y6DRUxcKN2m1`#xZR_gZ8e*I(*i!fm5_*!Z&GCZ)g+S!_$An_AJbmM>`Noau5h0LE^7-zV`MrbVVA> zVE<{v=I2Iia-CC;8L1%Gp49#LOdLy2#4B1IR3;0m{ZJ1eriRt;7o%k{tQn7zjqB@m zyLWzmbay6ZaT8;S5g9cg&A`ZEhQ;;r*mOJ61xoDALWK4Xc404LB(|NoIE?|lLa&ij zZ?Ayy393_j33e(N!x?uu_9g4^bsf6}OiSUl7q6~dLrJK+9Hp;)(NE>nI4|lBahp0w zBs}b_d(lSi8}fUFzq1%_uYdyaZXaWQ)Z{AOZNwSGCrkTdAhfw7fapN?=!U0LaPAh& zGt;VV>E6m@c_)>y|#P4(K; zcux*bQt?b1(gBX9IE!&$RJ-A8IF!a=d7{)Nke z@p$Ud$Nf#1Bc&sQjV2-E)@?&A%L0ktp|DZETtu_0E?xG$v<4*fZP?G092p${v!T4XCRwItd!6Kw9gAU-K zT{sj*RFXI*!%JBmlhL3GrqkT><^>fS+`-PL^)q~~c90k`5LH;%g3a6+TQ??ia4t-w zOdn;s5EUtOr_A(g*66gXG11vru|~Xz`pyh5pd*G&+O+hG5`yujjIb)O76&P1FKcv2 z{!)-LrlNc}OF@$^k|vD4q0=4@wj-?#96}FDA@k=LBVRTm%+b(lN>06#g5IDqT7b$ope*TD)UO52_HCDPxnTS<=cQ zN<;!gAziFvv{;N3Z`4UC7g9j%r8ejTQuOG)AmY-cQY(LT;SV7-n)y6U=Ip$n;sXE+)k(CMNf4+)KIXDq86$H0KROn93w9EP|Lg;^ZqSMmamN}b{fFl0UbhkNO6)re6cZiqSp z^GyyJzprxH89h`hmNcL*Wjj-9a}Ekf7<+B>R9$u6Dvfv3iDn*h2uvuUR*LtBzlxRt zDeNp+Bax$>JdZH)3*zhWF^$D&+@uDdpbW`QUnnK6DHWD9Oq4fO$zpB;<&O}}esc;M zifqE_%LMInL+l1-paV_<2hO#S74p;15qYTQY7ve=!WSGDigs5hiufP}`7%!OLZ=Ib zMEk}}bUBKdt#2Rn2CCvHbo=|L_20xGE51p3?r}Y7syryF5@V$7b7DQGvEc0P!-f={ z|M~))`PWhaayPJ6^EJLI3;=I_Zkn7PZ{<9Ny$;3Kk^3xUF^f45KaX`e(`b(_St0{| zI1YE?H{P;Yp7|RW&kFUgh__@B0{1uZzv{u*fU$vFvSSHr@T`Qu;z!$oC4lYfvFwDq z?#w$SVfcN;?(IlLc!vJmpUgQGGb#EM4jvpd*y;4yoJTSU$7&#Rc5+Ldd4g;v$R$jD z724MMz8!xu z`6hGe*+T*?Ymd*PAxB?o1-^6%8-WLFSA7dKr81H-EW*yqWp#>**`mS5Tb^(zjmsKk zSIcq)@Gl`x@s|7X+|{y0!Whb@c*{14!QXfbN0sT@VjKBZ+B7Dyib=d%Mq7Nyjk^Xa z6I{vaqU9<9Y#gY$G!`y>YlcObV>H167icgz=UhAVKO` zS`U+im_SPflt5QY65q(UG}<&yE|GVbK)moiXx?t(sCHJks2l04HU(P#iTE*1=)~yJ z7)V5W^j8I_Ji_Ht+$Bt7&dLdRsKoG=J~<{0;~+{~8QG?#tqiP~Lz09ClL9MMuD~p< zP;F^NQf;~RCtd-XRqRo6piN;%2Lk6uVQ7#vZYN&6;`(hO!@>Z zSFmYhVGWB|{nFCZF|1n)iba71$V&M(gJLK&{)2o3DZ0MxuF~-iTc-l)p)h>!f_xb%CoON}Gn=vym7^ zU+%++&|Ok7d`=Hbv`DTJ;#^}s;=S*)=F%o295SNX&2bQ%VOsTv*uyT+4atk(C2<^87R=(thUK%+}LobhfDfJA3ssFJm zKeM?QJu5sBPt&bzv?;Vkv7X8vN~2YpXiunKkU%&wU7~4pNk4s0r`d^>d2k~K2PvG-#_nnpy1rp|Ihhk}j1E+SRkH$iD zcP;R$vvnr*<8Ra7eNa@U+mU?^Hbo} zJteeqsz^B8L(dMZOrXpvk%7R~5n0SukchAlk>fFXMQuRv|1L6(m!;=lY$F>pwpVdoG9e$i>r0p%+nD6H$jJ`l zMuFM^cpLi(@njry%PxhOb_(v2^wk)sVup3>{=tw3^y|#KsAmFwd`&%C+c+=Vtdp%~ zR(P9}>$;`f5D}~nxndZ5l$ixJvq00De+_?EOE0kSFMXaaZZr?N9H0pOiY-`{64Gp#I-TMoBj#k1Q1O^({pDZ{q8$|G)V9 z>Q_G{U(rQUJz~bJ7Ki~%ATCPK=vrIQ! zY|^8Y5Hsf_b03rbL|&pKEPFzJ!O7UhC-90pM73hbGi#!Hwogm6-j7e{zPB;Osvi7; zmbHADG1KWST2Dr3+^PZ@Rl3&sR`tYLfJgiAJYhnW|Fg`*ci!`b>2}89s55>NU^lBcu=}OXR>dBI6 zd{}X)iX|rk8Nazn4?Ur;)4_Yxm}XHK0Vq*SOURj1S8mjv+hOA`feoCeT}0M ztN4KQ3z^*(t?-4hjpT=X=7aP3Rz;7`ES7xc13nA!pVQCtx>}AyC}VWB?8I*f|0pB{ zTnK1>Rj91PdmZMFxs8TfS@4W)G=XJPu_I7lgK_J)u9n?M&OZrT#Wy%YcF9OegKOvU z6Y0O|6-*~e)x|b;@MB=vsi4F3_JNhhS=MoJ$s@K=atbV?T*MlV^F^?m zWUrjmxHnae9$nr*(_YmE5U{|q6A`YHHAqCsRGCayfF!8ABm&XHI;WG+gn7Fd*oDTT z*rUuUq)Kme(-wUwYOG{pwAnJ1F0)Q4UeWeOoS>`Zc}f=NRe_4ULQohSd^0NAv6K-WW^PTjCx^ispa@_ zDy-$(tUZzf9VGcguHs4&*@KcraH1xxnv3|JTP$WjfMo0dz>A_yM(;z}afx6L@?mkb z$HIdz^)e<>P3~!I7_2Oc-r))fl|L#PGq0MAS*gszaUALN9iu_42(2ItJSL}dGTsZ=A;t@fx?Qx`-B$_Y+1)G5`g}|8drC#te1Jv%aWuVB( z%A%$aEPN7Sx_NfUFlS7!ej+GgUiTm`ahSjhNjSV^4UnczLp8Yc%#k=c*i(NDC!P>9 zqL3Jp=_al;g$aD^WesjwBM`^8h#tatoX6(1KA87a4l9tDs*Q>xSDFWB=Vu4v`yxX% zv?wwvy#QHC3b4-q%R7pFi>Kghq>a?4zR2fuA9<3&>793-l%8xpMtP^TNvR+2JW;m_ z#e~8lxvi%{Y9KDZV63iVoGKB7e~!(kQ<$bEqUS?MCRsI&p9RF`P_!a7IN20j)AREG zQhBDti)^_v!OMzwVc_eTE@n0vM1U0Y5S|wPJpRHyTXVvM#AnJ6Q(-vDe znr8xhnPg3Cm?F>EIGoe&KrxfJpA`g!pC;kcq-O&wjsYH5Px92^jAWF_nb4{C3~jFN zd<(WHl?9lqNm#sd@>!MoP$s93^|F={H)SDFGS}#RNE9fP;OGhcdJ)Fj>WV8ueUMB*;XmS zTGk-Koir-kH0c|hM#dc*(y1N)yliSmRG881$nZhTrbGWA{18L!;}_&v%&xcfSqz}R z;yLy2xF#>>Cg5anO@8Y&BdrJyOJGxgDJe4Gw&%t_s>t;>G&-SMsxa5ixzQ^(Pb<{U zR;q;B8V|M8^tp<)TBXlI`FMHO=~gah?|~iOI$MfgFb8`g=U{Qa6}qT@fN)O4Voj%= zg@}$wy~-ec3$$#-i|Ig^UpG$1cb05TZw9(+C{Db9z%4;b9rKOLkrGY>+(S!m8qd_D z9yy_YFFxMBqw&SRXH^?=cD!FSe6r8PE0xC_pZKDzZ{0O=FMtU~G{4$lF2 z#B73Z(oD0|!-_KfAxiNh>`e`W!!WR6 zMTHC`kOFuriPIU%l5^!2 zG{(`G;oy~5df-1JE>k`D2A_4_*olsk4;@9=5w(}&@GS{z3;eM$N>N^5p+$DJB0hq- zN0MEu1R*7YT1OO_q}|}0H!1z@iqRwDjBy|pUk)43*otD#ELH-qL7JW;7lU68?vxf2Fd>EzkVHWcz>_$56$GW6`q~?^( z`Ob&1J!A@nZuZU;%7$a4>atV}k3*pWp(6!AY@83X>U>C_@_gsREZk7)M99RAw~BYl zR62Lc$ki@#y@+1P>p0Vo6>ngAR1LMV*xU>6ziOGH{XQX>w>*C z6yC7*Xl9UQyUjw2W|}JC6Uy(|&g^JlFYsVJaCwV5fS-d!WR--2^*tCk>~9j>K*$WU zCc}LU+}HjwVpm`fxwr;(W+9OI#c|8yZNF@b;)1KUsl#7B2nWNm#oR4z4h4RVkMg0Q z1ja21E#2q?=Zw@txgi<{)LOw}NlS!W!VCtU^6{n}Hf~mXAwT+_#|qCR$}BvS))30Y z(dygHOOWl8$|!jVx0Z(!{+%}$`vOl^SHeHTZ~u~KYG7v2Y!bp0=4M z%q?i=Tf#R{XHbuDWwbJ4GYmc=;+kh8Dz~&{h@s0`r$Fp1Boen$q=}&5Bq*z}gZ6Q38}PmaRc8W*1_?j;!$TP(D^f;JrE_It;0W zRr%k+pcyVbh>t`5V+|YRzJm=%%f)TIl&bV19^!-!$XcDo{_)l4p^Xo|6Sc67IJxn$ z2b34#Hdktv4fL;K+yY{dS@H&_@O?5{%bi9M9Id(<85CI$qJ$x_7P!A;i%)Lvo%TZK)q2 zj4KNqL;38hJKb*Iy93wEFQq0?`#NX`MNXhy`WdlHZ^W!1?NYdq0`K9etN4z`K+EUw zBqkcDftGB?7F6-k-T*$utz~${6b=j?H1Q2+M+n>8Wp$7mw)&2utqq%F4V!(@cd+>t z9oMfqTneke9du?CW_WRK!1{){7E1ljv8&gN?z-TiiweW7wwsZ|=E>HwrLQ0-z($

afA<-ed}vlNq1Kq2t#J1hOYP_%6khepYScNkLQb{(C3P<%!hG&! zcyGVDi8oPD|Gd`=71Yb?mNtZ7lG0 zo*~jR1ZQnp8^{J8fNr~1R6cHe)-on=I}ofYbL5A$fgiNOfYIztc?5O)BW!QwMif0n zS;eKxV$VX#Tz*N_wFg!_fN)gbIG&OxIBD1%J&X=Jkz0{YnkJhqDh1lOqhK%tPH+PZ z4wvu8X&ER^0Se4CLyS9ojmIFG(10wS94c9$^{<6&1A&|Qdr#9O-+;!O#?%o6ti6E( zRza!u5_#lrRSFPioHTT|Y{&zR78@5HY^*Q~eqyif{|&~-a+sk#GU$v<+JA)XQy+my zc;$+6lhh?h9cZN^B7#{0MOso}JxDTyuE71zxF4V&2Vqpqle^;xM0<`5Mq7@QryDeK z^ed2_qQ-K(nSCQ-LZasen`+b0L}>FhIAaAca;Y|UkNQ~CMdOWBcv@_(8QIggROvej zQQB|BPij19_LhAR%5IO&MXF(>!H5KUv~pt&Ykd)%ZY5Qoh7LlF!HwOmu@aDqYg#_N z43xxXtSv@14)f{X`^(m{-RWWn;@(R130BMHZ!c{oO4Ql#4nRy0&nk?|^Jn%O< zC1K$rEN+W91Blkp+Z^w7Sb)#|6XaI9tev0&{^!;c@S{^G5)f;60vVS!AXS=a0)dtLu*Ez! zqdSDIU`96;y+?wufcqeacHI0=9pMfM3hmu#*goTI@IDEHfT?k4gAgw{#uknqAPqth zOl=)ey4*QsLxaoImXnS;D_i?F73_z?kz#0!uzUUGI6jU3YOVesoIVIoIBgys&eV1c zx%hd6`?<+{zS{eF-z+C}o%vja&uuWhar#W3@c_%yUJAq&y#6rMIVV&sy_dZ1V|iqE zG>7QiafA}PJE^##At*hj^HqwE0kv?%-HL=E0kyk~F%~^qQ0h8oAi;e};e>w}3g_DM zuYD|SLv|kZz*H-gun?F^V;G;b-}l2v%HW<9v&Fj$<&q|pK`dI^m?Fge3D!0S!#4;k zF6ediOhX6+SPkopIMnP#nR!2#)Y(4n#g?0^LU+CbOt3Ze@~+B2%Pur76I9HhM7>;T z%NX)HOI?Ufj6sYM3Ve;~g*g?cejv$59)WrTdpVSl9K})zw48;n$U79cuSbaPoowxF z;PEW7DQxj^v4(Afn?@zDZ!3YD-Vr=iH=i7-yvv|Z4K0Gfx{Z~bIuqEiPX`6>pGgjL z`f_1P=uU1u@|OG0z-WR7D7Wbsn8+zNO$06X5AlSOjf-NJ*G^w#grHLM1jEZ<%Kgr5 zhDl?%@C#-OohoAW@Jdaop6J$g*p$B1wPcyK6Q`kW!+9UuN*f_qU>6v%c%reg`zbwU zfRZJ&xHrQ-8A9RCAS+RldD+pfm+S&97lX&LPwi06c(*d-6KZA&^pH!?bUcM9m8d?r zG09mA8HB+&m$MdZbJhaM?_SK4&uF&uY&P4KZr%pdy?A%}ENurE(4f@5$QVTKP6SPJ z-j>@3$t{8>vVh&vdZkeiF}EDcjdR>u9})*-WCCXT(xqUSU6S73)(#65^=D>p-208H z*^IGolImnyd`3A673(Oa>bvE|Nh@Bj3U|H{if`Te{&(Zu<1pfam}P(mHpL!6KX~h$ zJ!rWwsO`zt9-g+5fz})Kt6cws15EP|^TG7t5MaQb$RUlFqJn~Qii$`{yf&zEq1TyZ zu?^lmXm$lU2CzHJ11>O~gIS8DT`<69a2-rn>tn2A2n^jI_E%GFQ6vw9a&jTc(9$j> zgc;`9U@Hs)nlh~8?H05s;A1f0owZ;3uqE-KjQW0+=Yx%TAjp4w>$py)%bih#l|&2_ z{YBkI9vjhr?ZrZ$xzb{9`tbLz-G`-sT+kj)Xl3xj%gL_m%-d3*+Y1bF%+2+xR?L7E ztRE1N@iw{YMr`EUcTE$<7yCj`UZ^~{J+Nlw5EW2=7?`&p4x0$g6%PiV09T>V&?*={AE6bupkRUFB${KDNDA(yZFsHE; z^?^6+xDjSGgx%f3SSId!Zxsi*HX4OoBEfSQ7eEvU=K$`;reJVe8qgGEnL+u{pE(=z zi>yp~l&k0TI021JcOmMN-bG2NE3m%Oneq3QisGu>@(e8=j|`5_921%<1AJLV*d&j3 z0|qZ%G|uvYV{cSDGDUot#w!oEirNFK=#XOY19y4Yaa_hdikIrm<*1JY z?tIOI#@`R^ov{83u)5?s^rPI`CP+UOoSjB0Id#-szF|e6?fwuS>%ytI7hg}w-UA8X+MjvPOapmtZ)8{5wG9;PsIoOQ4r(izYZ^+{5##( z{_?-*0QpOE?X-J=06oPapgCxkt@^g!aKhj5dD?Q=I_KyFnGdFZTR{@M$8qvh#ix<8 z&wib{fcA6{rZueH$(M^cYtx>2dOu^_g2YlU!X~#NhLGdOjGVxkjkY-6#r-4Ur)g&e zrT=g~->=WZ@!XbAf&E0zOg62c+FR=flG=Q!q7h;*#!o*|wSVl}XAbhUwf)}%KQQI} ze^BfFe})f%(aRukKHvVa_f~2?iwwNcz$*>B(7-bdJjuW#3@kSAZHyi%r>`0KqJf(X z{H1|EH1L}S-fQ3@18+3&N&_!6@Js_wGVllkiw%7HG?Tx9FB-VXz+W2pLxH*SA7SLJ z*lA~H7(52vJyG9pHSih(FEj9515Y*Z7y}PA@Po57{Tl|pY~b?-t~c;u10OK(>jvI! z;H?H;W8h^5o@?N#1|DPJp$0ng^M)zk%LYDg;CcfeHt+!hzi!~&2HtAmH3nX0;JF5# zYTz*j9%|qRhL3L;__Be|8@S%UhYftdz^@y4w}H1Bc#VOV8F;RNry6*SfrlFSfsxNQ z41C$Z=M7wM;KK$!VBpscyxYKA4ZOy{%M3i%z*7x8#=t`j{J_ZP8wS2?;PVEqH}GKt zA29Ii0^>cdYlRpY)8l`(dr}``Op>ze?*0hyExR{LxZSSC`7eOs_=onRh{C8p0Zi}6 znI~;iubz6<)Y_>tYp=eFXF7C41=x4I@$r7giVC+z#?(!1uR@=+zIiaa*0-j&4uAJd zobJ&2<1uRxJ>0rs%sM99y7;60(_8&-^IQDS;rIsmd}(XVoA$-2efz_$4~}U@JVbdN zQNpdu#5079t7A@HI<5+CMjNhkq^5 zP1QkwBnYEpZ^qO?HTqx8Brwik{|HH@Cnh|JTid25&TAs6iL=*UTI@@43NtX;_q4AX z$oAKQ%xPn)rpI4HM$_X<$5e&eqt`E*v-pnrQKw|#_~5QfN+Y-2aeKIH*%;3HVY=V< zyJcf!`NF=JwUM}Ac{J&L9(J@$7{!r`z3*>QhD#GC*gr$Wc<25T!tII{ex9ihcGE_?+b?N;;sMTPbs6_));VfZD5_5Y=CZLh-K?FvhGD6HD4aNaJ3YhF><{;I;Bq{7Uf z6}G*mu={lrzFT4GUlmsVO<|3#aA8Ve+Zzhkr4{zly?A@o(_p!qA{pQ_QB6_a0NMU8M!tfx4^(6{d z`xUkyqOf#|VYVkqWy< zDQrK=gqJB?bF>LRMq$sf3Nyzk44t5`bhHUKu=GTIuRKZNtdkWsk5Ra0tiq1ZD6~&e z7#ydt>Qsfk3Weq470#Mq-cM6lcDlla?QDhZpH7IL zg&mU=+UF`PouaVvJcYB)SJ-@k!nM^3>!&KLx=>-=MGBWsQy8`sR$i>I{1S!LVTE;< zDqM4!!k*75w5KZ!Uaqk8^X9vOt84VV?h1wFS1JsD!GzCL*jB4>-Bk*Eu2z_tr7(1j z!m2MRoHbiv{j~~LUuWL0H?U6Oni~}M+^EoZv%;!53Kz~*xc18mlk*e?Z&g@+o5Je( z3hNdqY`$IL>N^y+FI3q36@~U9g=-fpToWG$-#>OqC`zOQig4-~flFNMi9 z3VlCR82XXI>K`kt`-#H(hZL^)sltx63X?xmSo(8?m5(T#^{B$;#}ux4Tw&Wfh3&sk znEa)}(kB#F{Yqihz|4~--ct&D+7;s7XVJ6G>lJqVTA^>F!qVR;ta?V_ybguSI~A_o zq%irc!pvp^f2**pOJQ}l!gfb9|yG3Eo3ktJ;P}u&T3Rm|itlp~7Hn8lE z`d+h5Ve_99u6s#g^1l=Yw=1lCSz&mG!up*G*X&YQ_lm;huPR)ZRM`7xh1u5>hIT6q z|3%@#zbb6|o5E6CVTXbBDSZ#Up)hG+Hm&c$zbmZ#hr*gS71n1I&U?#*ziq`i~W^-LEj|^NU=R6)Ox6QdnQ2aIIfq z&mju4gB7kRRoFg6Veg>|GeZ@Y4Oduon8H~Bh4n#&ZHFsdcZ9<3BNb*wC=8BLm^CnS zl=)s}z8|Bo>R5&Ij#Ie&c!lduP}ntuz?$0%GoR$=#N6lP9QST;^! z)u{?+RVeJ=`u6#H!nyB*_U~`@(O}14hY9Cgx;K2C38y!MH++o==Mdf--e$ss1>wym zd_+O`LK9v#Ksa{c5cxaOn9t$-4wATkSS8GR=bEDTuP{E_`E3ZC2WV7}?=Vk-YCJ#t%>Mly5R8B2d)?GS zq`&Y3&(ZjAz`yrz;mOXpPD48i-a}*Zzc*i|_?8>kJXXIyYGBP5X8LNT&2+PU5|O(FW<5_GH22Ca~IvPV9xco+&=dXgx)--zwiROuAF)OB{j9a#q(}J^u?zG z!Iw|tj~jC5oJEm2^`{Z|hF@Isg>yr`koi+ooSD|yX@way-TT;ld#g?yMl=k}Y;$BS;5KYu#PgJ9=zF`wtpxnYq59i$R? z(H-;eaKB8OvoJExiFWDjH_xeeUcL~SbNfxV%$L_&7S_za;r2OvX5zDM%st)dKHi*r z$_37K1Lx)fZ*c=}$ps40f_-r=Fq+FLdZQ;ehau`g5zXO<=8#NuaZJpinCMbF@n#qA zM3>r$Icg`mc}>h^HPOv!q9>yqwG%zmPK@R-c&MG2qZW_ZqeT>({G% zf}i8sA}*|s+CN0a-EnCJSKLKu5D<6q`x?X*bLVvt^Fez0(6rb8|kC!w@d?8-|PX{zw^gJzrK)s^M@& zJ~><={NcZ(yx~(GNTXUWra3*h;-~Z_;Wx<2IM$aL62t8!p2t6N{S*LNnL!b{-nmL{vBpfFq6cOqGjuAnKh)yD+;3O`KV>J9kMC2+p zCsAKcZHQsRMK|Pt;jWyh7X306AG)HagG+i*=t|$z4>2}4htA4%8ZTA+pr7dKXsO7Y zo+>clHE8^gSH0p`(XTX1%@uhGmQyb+Q&s7vxZ;=LiW=1YYsnub6;Jl-j!ObB@t zQz)-T>m|VxkjC*)y2?xGGQXr)E-OU6iV;#9b)1kPDoA*wi={F~g{psWMX#LnPfN^C zR_lvinWr!nrIKWvTCSp@WG+HH$QYMds6Hwh%1ym4lX>V&)td`dq~*LuV`iaVU79DJ zqlOn3s4m3d(^vdBU*((Cx=+;zuITN2HBv5q?j!vq3l^a(qe3iT1UJoh zvY-*%G~dYrM{rZT)Ax#cDx1ay69ZSyFEqLB6mks^;r3Ff61a^>5N|?6Jt|9wG+3A9 zFK0RFa1nkN)19#kb!ZF zW`BWOB?pK^Sq>5va!3{sIh9m^oQTTDDJCn4sC=AaGLZ#@^pGflFEa8%PxG>*;95Sa z6J)8uHD@4JT9&lVY?LmjtRt-1F@dNkJs{(u99(gkOz%+9P;Nq=OlFc0aV7{A4RSLv z$mCGj*`RDe1~N-fwjcqK%mkpKLD}5-GC5Q>vm%+yD3|e|Y|-UMO8)6u6{;@u)w+)! zY%ak`YT9p&@YiIq_z6rTP%>PM}j3t3OPR7y`!J0ZiFC4eA9FJS>0Vx^GX zSr!Z)=S$WPWsLfOMEyFng;Rb}S7fXaAV|m%K_POIo|>C$_=H`|%c4TTG+ZxL>!l`U z_)x`YqPFtkF7wu)Y@{e0s&S>R8K;5*g8cn7Z>YDRF7gF-YIU&&ifH)M?cq+dlV$(f z$yplJgW*C{rMQx@ccMk9XSg(UlkphW3U48Kmj`^U+VBH0GA5bEI!Tu?xnq)5?vlc} z5Sj6OD21qjo(+s*HTP_=5vuci3!on z6seP#8jVa@njox{NS&BDnwg?_VpeHpVygs^}#S@$v6eaEZPbRpBY3y?*W zVdM}=`RY=g4R1P=dPLU6e3eY(?#C1IQauuGt>mmkOvo6%qiiPOG*w_YjY}PC1U)m> z(7gf!z{%i-)s&Dx9wLj7U>;4Ks)f>ItJnmfFL*HOc<2cpfa*s03Bmh~`iLYUh|5PrlCTpWZiI%*5wBb-8UrZY zNzDAs1gevf63CK*;?vyQMi!!OGr=w;B&do~gfeow#BqV*pQ5(qpb)eIbK~FtX7JU8>s#Ls4R@o{4#0r+_K7=F1LH_zu-7To=45bO#bBDQGM@ zN0V7@jwZ9v98DhgR+vbj1BGr>OH+-<({dd}mlm#5B(qq>p#H#Lrp$A06@PD|$Ud zP8B+Lcu4AkjYN@ktRYtns6u0n5PtbO+!9U*;TG2*4?*IJHp2PIj7%bTc8!gOb5ip-K@ za?RSz7Oh1brsV>*@u-?$rfPm=ble@zcMVRsPUvbpbfu&DLD$(`+`F4*RGJz@k%FF) z6k^acvzRGR^Hm~DNvzigNkf=&=CMTF0QHlf1g!((aU&V4yY(uqwue*4Dh+bxKw_y; z^O}VP(wCNm1<6bq!xUPF*&BI7LsqjA>J~7PaM@b@h|N!gBN&b zMIYtbj8)(tz#_BgX4!{!a?q8*oe7vTipWlQWl>eKN9hXN@xBTSO-Jhs_!UM zVd$RO9g)6(t&V$s6w80Kc1lJ$BS9)qtr&H4^DUNN16^ur#L4zoAYHn}Mz5NFh`_%7~;xmhjnd0vXO~G8v3* zsZPj8(!>EO1bsBIT+d?$ufU)Y){o$pobp5tF_E{+XAOBs%JZ5HgzKmB1ux>h3dVhK zL)Jj^27bLv404HS6b+DfSSf;i+r|J|IZ>(zn-+}%LGwA{wVHXd3T!4(|K_}>XJdR>XCjSgO2W#aYKgH(1e?H^f@9ykMiCj)$I*mIQ9Myf2fMjRI7na78n6 z#^lQ`3sv6Cw;c0uTvFV_-DY)y9&Cs%4xUDdTB(bDDI0~EM){#5>26^%uGx-ZW`|t;(2h> zc&9K_(PRdZ!_Zfmed?)B@Heu~i*-+-+2}Y`TwH|wMRQWvmNs@)$?EiWOeC^Aaiw)- z!LpejGjA7iNFX$Z5|@HhS(Y9US~-o}&^+C~qb$;PI(HW=(W36CI*($=FM>e`tuT^C z*f0}?ZWT$3W)yfeGfK_LOcZ#Iq)~}-;Z+nBT+l5>;9zx$mTqxrH&!4L0aEth?^i9*8bZ!#~vOYj8iVd4gXvp3J2A}_rB*&Gz`q#zr; z1PmpnD9CQYWm#L%*i%_w$w$x%%$r<_QXf@oj3qSd4U96pLTElYsLrJZEYMn)z^!oC zj7#~^Dh02#tD$lzmtpEdnI?rMmj3J{!jzRpsIik`vct$cB{3u^>sc6leJ4oW)^@=NSf%R+pKzbCYS z7WZ1dul3tnze{N*aqq&t*6)(aOZ;z1{7Mk+wD<+Z7!(vjky6F8an;mp;L zo)prpf`tSyHrPQlJI;&ifESwyM6H4+YQ`F@FthqjY~EUDRt%T!gw_YgBMZ(*p%)mL zB8$c2(beK{9{0v$f<}y_ke{J2WRHtZGBO3~fpQ?!s8ItRI{N>CIwaR0upIK74UROkXKKJk_7Kj**L>kLzIGQjWswcVtTCz z@xv9Y#OXYSbs-}ejFl+8AA}Q*U{h+C8#%MfF*&>CU^8-Jlrfi}{?@9q8b>YqxTdRjXNzKXsFEL!kJ{b)0M#sDWw9_M6O<1#Pa$nA9ksx zSI6R{u(yLGZH!x{U*T7I)GIafE4&$0>VonDG_S~=*5bu5qjl0)yo9od9L9Jt?v7yh zr!QX_vZbTKWJDIt6jcu>ST9Y;wb$NmEh*|08olTnR=XOk(r||(iC!itRlb)qg&Shj z5S;i_^(2give8OyQp@#f?8jKhB@YNy0<{X^n3w_Q37Q*2eN-E)4lya}_0({6)y-lg z=eqr4Jt6W(44wJ~^7XRcBl(0$I+UCHKu~&rv$|L+NR3wXsG@G_w9v{Eb=`!&Sgo%N zcy%y4n%aOMx!j<=hSj=+ahDlzz+)TKK4b``Fe%EUFz$)PQZ}g*t4rB4>Qc~vs6MB$ ze~DV{mMWE^2$aYw7FAv?`v?qSz4WyKe>d42BEdfO}9Y4qtt@_+?bnM-ks_@=IA`^eILH#IBQe)F^sT+3H9ba`?qS>bGVY47fmeX~}t&Y?$DRrLF0dV_nVS}Cu%MSl*; zchQ}z)?KUQ?A{DIwCH-XaJUr>hmLyMs~5XeN0#8AwBnW;?)qh=!euGk=w)$D0%mg* ze(Tj)Y&))C8QWdNrbpc_RgTUlBvBv_EpkQv}rKxiKrpslOi1T1sm@9_`F_fsP zD^V3+DOt|jXr<8wLHZRQPt?Q>l;SwU`q2u!&fJ1P+bj=kf3b$XZis!)5)VOsk?z)g z-1n&kAPTJMo%AbV--mXd5X_t9d?{y3@NZfaL%raku;SK8IBfR{H743rMpQgXckLgU ztZD+CgAKF@HmRt@e5b@qoBW`q=p)lUx~_qphIXku9Fp!lh*v!&Mu>P( z3)GgJ(edN`(*rmK1a2b7K`U_aP-(^>SBb$$q?F-fGsn%8hYn32m!t9c9Gxz}SHiu7 zCm*i3Nw1;K-7v)pJY4*aXV+5hvuicLd4ThpXV>-sYJiUe?s)C9YYTu61Max%*|qlo z9>X*JWjGuN&fTCXk9HQ!m8+*6;+_ZYyU%&ai!^JcnbBP!3X}|f0@^Le)1AXgJTrd~ z1w3r1Kx_teF0HPvirP6&4ZG>UVHX|GnRS+(6^Gq$SR{7lm#}NhDf@HH1_o=Hd{FCj z`hmXUTg?*uc0@Wyi#51Y6l%ZZ|Amzqr!6G{iKr0QTN6Z`bKyXWvn zWvg;ZbijgJEWvY<7-WN|J@FJ})A>la#hPf5xG$AxP3do~qSojTcWj7WA0m<_ik9!B z@>1Cp|K_|*kt1nwt^OyoL(_+cAhRU7Zw`1Vj;tK;(n;>x&KMf1xZo-H&L&VY-$@S^W5vZ2!0}Tt?}bRFTMeAl+pCvvlaSI?GKX? z#=peVU^aDwAZk;Lz^12EZ%-_qCd7CHn*@8=D2pT>syUn2ZSX>H^A?<5KLK9KoL4iy zeI_AfiP3e(GdjC{;Bvj;1}Zv0``l4T_%*K>H)h&03SFVm*be%x^SN{zV$AM)H`cTGkP3O8uVpXxYN1V zAT&7zuQSUA^AV=Z)|6zoBa_Nl@`T_gvK`NHnPTHS*Tzp@>c;5jRCoM1LqD%C6Lm7q z0{ZJn9!jO-=P|0!jWOSEY8s2k`{|yxcycpwT>d#G`EPubM)jiJo|b%!=;?;Q(`G12 z@v~GWBgKUndV+a+ayY+jBQI|DQu^QZra&tEXuaA*kC@kY@RL&Zhg8-OJGI=i&0{S6 z95X{i_;#kRcVC=*PNOWaL6~YtC)2DswuGP_62iAh+KuwpR9YImWTTY6k-sqv7@L`b zV(oJ&Q!Uf{gYdLXf{jT6SIs^bpeWm+t`}#&1bz&Gn&}AqbsO>1R>!>x^U$UcrRE{U zXEQDAh4~Q}dg5fhII7r)%Ggq4XEN9PEv@v$7V{+6k1Y_ncnic;s;9zZR@FDmx0v3@ zU`Cdb>!Y42MF8P?4q;rhP{gk=8g+jwJf<^dhim@dqJB~ZxfjbF?JYEEHk(BZG(m!8 zB^zamv>%7}#>uA7*~y}FO-G4uO6~xj+S1n!o}}MQ%3MfO za^+DRJX9vH(Eg%VgX<)b)fUHKoXC6?)WMck+qNUmrl)jYy!`#J-^#ch*WB$>2<`f1 zQ(0wTW(`L5W9#~DkST1UIcy9=lQ%Q5s`h!Q6gv`RGcoOh9?9(ssgW&cbo&LUw$u~Y z@4RfPKpm1XtdZ~=>xW-#)g2@+Z=6+AqVbgxP7z|!TKfP9i#?pwr0Z57(%HUT+OK6l zxovR;Q}NymG4(T6p|79%_06Sx!e82;ZmM4a{(z(XT`69HGrFBz?8t4iW7j*AwuN7b z;!MJ`PMdf;_cBohNPk0Jf^^8kxCl5?Mp^W@=Q3>^?$o#_q|#!3%l7d2z!hI7`v<2F zA9D}q2Y{T3VK>)5q$L8-7yBk|7(cC~r?!|9hS9bhPdZ?(kEs|A!k9|6#FWC*oF^8# z=TdWXL`Uie`f1|8SGRxk!7kz~G2`#b5z*B^P5a@m^}Aj*zPil?hGhR7X8n11Y~PgR zqw$mNPGbs!b+9*OaL52ZZZ4m72gON`Wg#fB7JoC1wqQO{^aT0aSg_i`*P`KCuK1I} zm*U?6e^TRbhrYt=SQ=m5reoL+eXT=ZFfc3HqHqUa>)>nu4t=$juA=-Nv!rQn(elF% z{|fW3Vy_s6>0*ze5hnXfv_EbKS#0$T=&O6bkl*pY+vH!1-rKQ9ZG5TXS<;uG3-)eQ z7nHh=P`|^vHYprH0Tk5cjttBKa@_mN)TPCE?n zwomGV8^BwOwWp1m*zE;np)pGxPg^xY8`fH3O4&OE&g0Z(y-}@K8$4R=RQoMvVXbMkgf=9hskbwBVQY9g5@u^VLeb%Q)jjWv3rl1c)FDtNb5O@FZn$dUmC!v z_;q~f{pM5yHGY)6C7tatJOQyVil7wtuNXoWZKVo0){xUIcs&B`4=dsyL$fI@C1xGl}Dd0 zArMPi*{ItaVx#!6{2~U1XQp!Q;MC;I^wjt`&NGNE6y3;%>IiPz*Bbqr%QV+Sw2 zI7&w7&h~gJ&ClX+z38HtA^Y4oOVI7jF@am15(juX#r?+=baB5?=C%#l)f<0o~pT|dcXK?b}*0FSr~{o_;aKz=j_pZ2ty=VP{A!cz}fZM5N!&NkVC8?RC^%Ehp?N zO23Uh!+j$KJkbV-6<}wkROSV?d&N`@e}hbt<14~}ze}h0B{|ONX+o)Vw3P#GJm->aR&krr(i1q=@<44EFsES)Nmnkt(tXl+LBx2!0aSI$; zxvrQa7b^54QtUBCg5GY|-ZmEYXPPZadwhnsX{cd<{v|xnHiRQ32+Vv`b$Ee%YiKdK zhCqh&FxhXe@N|wDwF@C-t?&eh3IssCUqkp-c9Fcaj z;YIr`V2h6?9z>GE{cb)tH9eD$F(ux_{`Iqc(toMjKo{^;#KCRuD^n9c7V{ZT5!H--Wnx`Pf8jLqN0tvr5H|OFjdP~dxy>9B0iX)@kf|lml9Qj3i{AP8CzU-?z_RA7g(``nb?wb#W2jqJaVm)b)~-xpic1X7;bap1wIMM<%b6Nqw!e(VSDqH`+(n%2;C5eWglVlla_pC6W^8iQ&CiStKI9&` z;#^`AZCs}>$zHRsuXlg1zc00~XJ5B&vl9ME-{$^g8;CnIoF{_a+9I9mNZWEH6am#p zbv8_3*|G>0hH&|1gZ2h&)h6Y~hU3;vIgL#9wm_B}q;_tn$)%l|b5$698&edKf#zS@ zf5gBN=z#HbWNg6L&SIDnHtx6HMuspKY&VV~YrPhGZde~MmTN6*UE3W|ijLI!Po_6x zNDz#l!gekB$U$OSbD_)>^%kkDE=3-Je>vMMA&<>o2p3Z@IJ{IO;R^L`AT z;zs)>8mJ%XRAgbwR{iW1WNXClc4I6ZcP(tK$$C|A0wSQKeQ*EAF zNb=R-eUrW2ZnMgk(^FY|99GHN`!HFu!zq1uRZpknh8mep&3$b5cH%A~B?ZM+$u}-4 zDrsK`b|6jD@oIf-_Z_Qt8qOjZLD6;wmf`1OJK^Rb* zE52}}?1XJM()*(XizX)C})8QLwOao@Z1lS zw-|>KR%F^BO5q9E5%S8cNl?kr(c%56{Vp7btEIB?(R)ssI9g$+t%*b72X4a5uL~K| zK`%L4s?x!qL^E5%%4TGfjoUXivG}$j{AAJv!vdu66XSKWHFU*kh1XCCZi}Rl9<1po z8&5~>kz*N4nX`e`D+aWo&2{VA z8pHjq)(N|f(}fsM&BhY8s!WtlF1c_3wU~(2+gCwvzP4ynbcM-C*rC(DIWHZ;@MyY+ zj&zOowmGbOGcMpXlJqd>d|{Evpyf28+Jk+lVxafQgCFH!NggUS$xD2=SYqGk!REqJ zd0{E|ED2_j%^iQ!N-h!S@acvsAyvD++YaKrYT<1|^xm5lau?Q5%ypAP4-R!5i%9Al zUrTD$JX2`D7N0q0TIi0omIVA?@dNes6~#M~hpP3GIBd*xPRSZyjoNmqFqdODzymZP z1uJebYf_ zv^{TtWvBwSQ`)leBX(^nuBzyP@#D@c)n}_QqVx?fT!r)x#b>epXZ*ytAw2cplo+z< zXuqM=XgaE%Jc+}NW~xi|3beCI(;Yg-E8#%g*3q_(O2?};ftAs6l!9ThQ(uTU{UrQ( z&wYKVZ1T|5^cbv&*u>XgFL*_4_r>R`hL3gShjFfhJf~nA&T>%V+*NWD(|WiX2m6g( zFsh$t`UkIu*rgbv$x$~jrK$ofj=H58mZ&2tubNguiKp6jP`#pnjjZur<4Gq5KfMw8 z4C9CViLR2IswlxErRxw+)DmuDd;T#`)eFWO=xHR!?zvY+lx^q0 zdhM|k-EsMDU%>nhbD{se1`v^=JboeIfA)BU-Oi!W&8^7rNQ z?3051`QYfvH3erkkw{~#W{A!8VHw0Qp0#vb4W4LeWnw@5mgY1)Z!7=Jw7oujXsPsI z36|%{;k;!^HCmwkGB$20jF<*ZiK}(p!}F;=m(I3xl#GMvGO2EPFkPe%bPGEDFo!8t znszd4y6C(E73Eu!2M*9sPDQP(K^w!9VFZT8l8oRfcT*xt%@@l5rq^7CAv`V3&3X8u zP|S$nsRM2}Y+%;MU~POvL~O{KMX?V$_w+jF56( zGy{{U+s#@aNy;4KP9Jb3HMCoJYy^h9Rz3+W_Y|mBsr|zld;jp*(EU} z;2J@3grHp}LubLXS_KH?7xi5$V8t+DCdX?2yyxadyY{CtQFpnm;>YCv7JfR|T`v)! zxWA1bi9TcZ%@*;KZH=G4eK^~1puB`HkW`xs+ClqXCgPO?Tq zV@YlaYRi|E_miSG+vbJ+MM~TVII||WOiJ(lY{hQ=zjr6wB#S=Qb>hB~qZ zU01k@(9gc;csm+LK3f#yt-;j6)Mk&JU@nUxkwC?4F>+M9_bEPOx;J3lR9Cuia)-NT z{2pd#t!&+L~M}^laV!deX|;gKYGeJO+7aoG=SDm9`G{Gu(12!eONg$>*wSCFEx>9)2LRFxvo$f_ zl!fgDsl)ueD??wU?ZsO-mfw_s zE&E3Y&i#Fo*|bm<<0N3c|Gb;zk#<^fz>hNB8MY8b{DdBSbDmF^qH~$U8?xiAT%C$0 z9ZkGov{#*m+FOgF`06vaXJH;{fiI@1ZfOJAxGK;JVJ3M+6fA3^+PXT`b~ATKj#K;m zO)VVuw1>#uENXAq8aZ8aMx=EBo^jd`k>W$WazAu2T`(vpIbNN|!H9(gcW|LpF6Wzd z-&?$iICTU+nU?se){D%Pa&^B@YLu##t)r>cD7${AHXC2*wES?YoY+G2r0|8$I?UrF zGQyT2^=?r^kZUz!p0YgDI+*>nR4odSI)auTg4sW;c=JbRYGtoEXYYZC;H9?}UijeX z{sMVLZ=HayIU_QU)NakKBlziSg`bh0bSiAIxj96}2!8gr#LvDR{6NX@TH!~t=kuiJD)vFIz$ z%egbdZvNhzm&U^D35tL9+53b|I$H0=VJZ@T89|Nr{z?*4&Z#fYV z((tqS8;-v;7PJDqO_U1Q5pZ)$8&P~=>qLw3IWwGLJ6mwj?v)#!LO|0ZY6t4REuzLz zQm49X93A01>TU4?x8~CPLX$_v)xygL{FxOhx4g5$9$$SuQTen$V!YgS6JQ0$GQ0dS zq^pIV4Y)BY^n{Mz4H(cy$R@&nviA2EG2gYp&lR5buD@}Wj=>MBf%%cKBbG}c5zf{W z;a1{+WnS!H8O`<_ldgwtX8g9x#u2&!zmY4(LXk;M^bZa8&phnrM~)a{OGuhx85T!a zKGz0Ur?b1_I~h-W-xHr9w)rszF}@&n<%r~(TMgP#kiyS?>6h;nXN?05A@?lHCHo_# zdXqkG7<`u|LNpux>_MEJS~Jz{p1#-FYGKHMdjCjToM)=n=O?_Cxw6+-P=krbQ3Mgv z15h6_VwpY!Nwl?{NfVGphPmmYhfXN%jX85e^`~0*?C!SCH`n#2W0tIg&O^=Egs*|B zYA51_6tnarf#M?tvJD+_9+Ka6yR#c;ci5l6SXS7utD~GKM0bbP7bbL}y_mGH0c=V2 z$@)-dNHp67?i9T!zl(jwZUn58YkHaR=1T>4GT4K6Lpu>DyeN7qW?Iox7%yA%9%vk-PN$=xR34y&?&MWlo;QpdlYvK!C zSWb3vuFE`5#w%mH7dA-by(Z3Ja0}H6z9L;N`}L6AvI(9YuqbPFN3xROzHGDN*w(QN zOd)X#II5XyKJ{TJP;)7T0e?AKD!GH0SsUOoF(L*8cylpFd}M?8Dt~4pI4qn#$-&>= zNGH{Zis#3=E!!<<1kN`tc7X-<6UAS!a%p~tM2{QbHY^b9{QkgXbp7UOgt$SE;i9)Z?*D_w8agtUVSHn1kpy@7gGB z)mnBJ+(8Oo*m*-NN3#wg=(m;RrL3z8uq^;_p0nDGZ{3|w5j@RaH9XnQ@XskcrCZVy z4mR3edU|f-sUUv|pGdmD1j2s6UVm>0?SA>=O5=nJY=+gChlwwW0Y$u-i#-vg%8;XRaaO|HRhD( zVJgOuDjTHatF&?lNLzCIaX{`S_fVR_0OL!HL?ALj(;@G z5m#m@@v8NC%*?d@_EbGD;rvK3p0jd%^CSg3&SbTL{$8xQlimEC^libCeeRZtBr#>_ zfi3j5wnU;1dWn~CLaX?}z|cxQW()9(_OntjFX0?J)>qTrY9p^eRC4mj*w9#ih@{+8 zsr25ISb$~o4PGSleEy1T2Mj#m_pK&#!)?u6wC=?F%0!!`z^YlokP9`FH&(3_lm^Fvc^|Jd}%IDeaVTgVC+Hu#HNgAr1l{Q9saE7f>y50Ddz%-!BD z+LBxpAJTU7tYtT!%pY}6lXwo;%)lY}roww>e*{-owKUBLP)?Jap0k(9zywpaQ^n8Og`@bH`a*^nKY;xwx0 zv*_YWucXJ*&;jw%B0KXM#7SkLwVQx@Nt)5z*PtN_IUBZdSZ)Y3oVX^&T*jvt@F!tg z4>OGy%wnvs&-JEEdr+>Mc~i0F?lTPHj@W9zh2=1_eND{yT@gT7yWln|im()-)+Tx8 zD60R}xqgsx3<^l*hH~x^zus7C1YdQLo6)W$Qwg;uGH;p9H63ZzX|C>9wQj50FU8(e zGFJj)z)>d4*df2jYfY9Wv%yX^XW^@6uL>rZkhrh`{kU6xcfROZ_NrXjTgf*)@v)~) zG(U;2jcfiUGl&W>_Fy+%2;0~$!+cM*GrKQsu49MWd8E|zw6zK3&}QM9PIY1W1oP7i z*YUQvPME&MxWX!NdrDVy9momRc?Y@bX}^xOHu@R@n`N)$XjU`)btNJg{7>O&e~%?k zF@nlCI%7ww3XdC&ueq!>Uob(rbZq$!#sc|D;j6GOj=q+6@C7ll0qs!nE&ctytnl*= z=4bm@bi;zc!smueEbZp!E9e^#R+%MNU)MJvJpTNwu-omozfYLUtAp?1Q+CS7a!J9P zEk^~J8+_qS(Gi%FT(?jZB*J7apSCobn^MkGa`3($L;QgpY~lyjmWbff>0-z=G9Spr zT!>IHgN;q(ZA7RD{g~(>{9MWJWnVQtL7ERD?)mG8&zeuG^?z*NGS~mt!OsoOel&i1 z@gt7YyDu`2bw)ozjgY35uy#SV7j}!awF`rGOdIAVTq3O93GF|O5t_nUtih^rrR{QA zm7@8wjTd8r*DJ}_IrCZHcG=b}r|D;Z5i4}~MZOag9ba3+lPL!oRyhb&bXS{XwsT>w z6>Dyfoj^`h{Kx!CGuAKHISMk&e~6}dfJvGX`a3Sjf~Q*wy>#=J8L{G10%a2)idc>{Wq@U(gbfk=H zrk9$S-+EK`x~I~6dwW~krng5*(lfsrU|cMZlwcW3jx3#bbL%o{zQW)-!hS6oV(f*Iw_- z_gtUjZn4xTq_N5-S2`n%sD*63#Sy5+-<&*OqcCf)m*;cZJpvqxmkRZ#2|H!Srn1q2 zKA{C({$ODnxq&oXh%(({FyTcp1p zEK-tpMtW91qNfFqgD)2H&p7@rm3&kGl_XKXRKW##eh(h6Nw;7GWe4g!2i|;k0t?vX{ z^?pFI#;NcOAx)H+9?rJI%NFHN5r=?5x^mHTVf zbEjq-rD6%EV9R6Vg8qXVHBl|{y4KYf1UGP`T#IQ)xp8f7bt0e}jX!*4Q=*OoqWAXr z>yF3Y_Bp8=n!05pyXKO#qQ7h=t@bmS_?!0Lj(v-VR*H3Y{V|NI+cKX^!suYbxv5gf z{pD)Fsl}~$lf`=CfSxBb9_;V93OwYzN^`-ldpGyt&mnnPk=5!Y)QB(7{hJj@|HP2n%Arw!Jaxj z+&``qA$&<=?-e|B-e|og7(udZiv;i!zOSZ#OWtm&6WZk^%SS&a8eE@Sm|l&%2!v>i}trogOi4SsOPC_ zd5O1kMvun1`j+);tvpBYmFW~;Ze~yVzRW$kZ?EchiEE8EE$1pa!*1FR>CC<20!+LV zcWqIo=}h?v`j#KTbQR0)gI=Y$va^jcq(`Z93UFG5!Euv2_*y)GPX^0vl)-qE^p%<0 z!B;y65j7sW=j8gkEmzf-DzB@&uBZ+m-^59r2v(@_i)+a$*Ugm3gw3TUY-&VydW(B9c;-%H|J z9y<9Bvh-WUV2B?N-F>NcgcRO~q^&manxk~CBOSEddf#7LJYAQ3W3~l7?cX0Irwt=V zxe?mtmbPfx@Oax!a*B+%06$DmnlAxA;?#O`5;Ekz@FvpjCZ;QlrS&|BZ9Ku9Hdd~| zF%e+s5yte~qoiPcqEk^OcAn0p;^jYV*T$084xY5!myYM((@Tput;Ui8HaPW5uC$>! z?ppJ1HQw;OMzQpXod+Q`A_cxwIqku1-CLoPaVvf+XF0)&&BEyPW)>b-au>vNgD z{l*F#-b){-J1)OqcRaHzI}eYtGB18rz=0e`&|r&)9v@~ zR=8J7fo(^HDNI}1hBES?gcw=6bANBO-(-IrUc{DS11B4v(M{fP)g;1T9ygbS+wQ^% z^lfz>mWdUj_nPk)$LaGId35GU-yLD^llT?kPPgG)XE^F$-MOIrZ+rGz^Aq8RKMyj4 zFPs*&O#%Y5d0gs^hnujyrkGaW2ik9N%w)dlVIz~Zm3C_*Ole=j9vbGEOjp8MYE8

8GaW`Kk5DQyI67(1fG1q47cQx-yj;27}}r&7-~QE zGtre|z-_F>(zG|0BZZcURxc=A}Plv_H1L72?h~I#$kY%o`T$T0l zmYhP38||l6P<&M|ie!G~p{{+YOgGQM1|PkVw+m^xd~Qy7u4S!*;gb0y!;|i@u3Z0+ zo`Thy2@%JZ&&Jh^wDYl~9|;Cez-RQ#(SAii(StfiIi0q?uOmM1MQt`rbb6R{Dt4%$ za?MPd0cuPEww*Q{@RjZ!oM47uCS!UwCi4$Zytykkb#!>z%?-BvXvNKfE4gK z-7{<9XQ95-XfAqdOji?vuKfIIujXR=s%2-` z8p1Xs5?vMTp^ZqSxRatY94FXjJdR+NZ#1j*MX}_|-^S4sni`fHUhS25YrhU9CObV- zZG3!6y3!>CY%HKSElG;`ugw6B;H`V3coY8IHw!~k_)_P*r@F0k@MlWkU8CM2eLv_v zI<%YP!PhMHN5X@J9We`Na@*nw@aS^lc$z*sUzTe%QO5a^Y6@HD#ASsqrN+7%U*5P zCP+Y4bT!*2&qKA(f7`(mp-JOO&5M2eW{n}3oH;s}cV`yr)n*e%@koOazTc@n?@4S) zS{Ui8yJTP#Qj+8rVggeg+l8-5w~)(KmV1#Ux(u!Qv$$XSui5*cXHJ(Y`32e}1E#7q z4G22hAwnB1x{?gEm8Q{>+7(`UeL?5xtcjT9;qEk+JZ@Gqn5J(mFR|Z3dY=fD$s)Nl z?cuY(Gzm)={H^iJYpzV_iMKtMRCJ^KuOj~B*i!3ucl9sB>m;4&8dz#JaiG$39Zee^ zUYdTC-v~}sG|#0cJk1BxW2`)E*uHVy&bBQrWQvY@denJRv*tRtg�a`cS*#%d{rN zOAxTqs~LstgmOkv-!w|;kKIeZ5xrC?m8rP3ZMn<7tzl5EK*+gDN*!t@!bHUNKnXFb zllW{jBL)mMI%Dzls_94VgT+@@Ouf^>&&^6dYF#g7pF?Hg=Q+-u_3Bo8?i|Qvb1C5w zh~>_J4r~?8O8C_|7shJtP zhiB%%$4ySvI)h%}3l?f(Cuu!irQ?-!Dzm)ObiWY<9H z(b5OYt}SKyuwAIfm-ZIV`u@_f+3K>eRhLfr*7A}e#9le)c{-l9g)u3@7$iMe&huSL zum`SV(6p}id@zqFJmH)hbH5DZN%*gBE0&b1*w)!sXh({NY#Ci4g=llw3h<@$Y3w32 z_32}Nb(y9SgNAMbMOv_BtuJ=>$o)X6Zd2|*HavAq*g(KzR0#-{d%hR9a-HW%5(`?|O95dmqPfY=zf|M!NA5v*k&U|=ydMY=5IPd1VU2-lhD&~Hr zbtyG~ObPp1`{~fQqcOLyXP>p>SxDz<{K=IFchBhf$kBVE=GQ=iz<`%?iOGi~;|NGe z9lqJCJ~Aec;}Q~fpCNAx`8`HLw`|4qN-cibtV(XkfG0W`zgb*Yr;WtBo<_>0`GuymdED%HG9*kBPeWb> zCsyHPvYTwo8T`ob95L}Tx>Ra<^_vVi_Bb;%T@yb;^`%Akp;Gha!w@OH7av&T-`|R0 z?Qh@dII9l;KH@lcf91t{Uphd!kZ(|gjx$!lcZy0yT6$mfT*Q}rclVr?T=y1hZlm;e z+$!P0wY7JOa@XEDic6Fq{8V{r9oHQja#PE@Q+>xoo|y&Tt^03Vf?F^|G@T3H5tVAw zZ7kJlILF&BI@!J5ds7hPD%p&CbadRETq>8{VMM0#6Mh3HWRxnSN4rkT&n0~ys00prGs8vuU_|7 z@Y06ctXAE|qKEd7?VDZ!C+VefQ`T#t3ctfef3aF$agnK72&0&40ts+ET0 zP(R`f$x;DdH$wi}!b*eVvQ~u6S>LZ9PlFpa;nixT%DhVzP<58opuDr_kRsG@W0r%MgIqC)ruGI0nK_umYNV9~3r$i6_{IexymQuEyf z52j$R3}oY!YDQxLaT*q8{oz>eF(Ddb$3?@N3wXWg3w|A*Yb@30Jj|_JbM#Av_I`M$ zKvJHn6tW^8wmK9RJWzk7s)Y9Y2rKq*I)Ju&GJgporOkU0Cu1g zfxJOgOASr0>N)Cel#kc*5Nj+I7C;iGF*uqF)Ee0!_38j`?&(r)Bqz{ z;g?ZAc%e2FtEeBL2Q?ATlFPUxCTciHogCNje02$wN^~9cJA5IbK@iGRZxpJP3dRm` zgDPHh5>G~pBtl|=qDaqDvHr42z;)(R4LN1^y~F% zU33@n&G{q=p-2n|umZ)OR(u+`a;$>0f9dOMHNXi#4sZ~V0=R%(00(gCi(g;62)F<^ zkLz=QGk~*zQ%ufX>Al5r*%4Je;EvOnsx#WSa?YufqkeO&g7dCWCw$JJ&|fTynp%%& zboPSz09K>#K5y)v(12BxO0eN+Znr2)*E^IK%=2q|+i8qpF@kIRdRE z*(lJkVj5sCmF59=H@IBf`&&KN;75);bR48`ToSt@FF{AyOgM1@Hx|7@wGmDnJ~nfF z(QkU?a3YnqTrSN<(h89zjG;0Pw%0ks^~X#~l&l6J^AqBby4hlsi0%%xyVY|vMrd;n zk107!&^gSxh)Wd8CF0hgdba_J#Vs;09G{#zE(Zct4nHL?$RZX>cu2_$&AYt}-vKxTI0je%ECU_`ya(_Bz=r{M z06q@*3g8*Q=K=RW{q?mZ;2nT3psX^&M*!o1I{~)?E+gO5fG+?(0f=iy1B8kxPd3NO zA;qa>mZ$i# zFGF|+unXVfq4xsBW+>Nv( zK-IJE&w8?y) zmFZ_>cvgnzWO!bNxqagK6Ea+t;aM48l%d-v@*R}nx&7k$+@J_k2jsO3&&x1(zeumi z@SF@U$T0UtdH=gac;*3Fjto-=#dS`G7i4&_U!L#Thpd|xn!GXHFNPECo_;m|Q z^*YUH`lfht^&1?g?^bhM-!E>RAAaSZKHyk7a)5n->F;)4@7xP~$MuEh%sK_9==jc@ zGw&=oCFiZqNvG^AIu)nt)SS0Db*JGpoh9d#bJ|&UR-CtM{@Q>TUZ@tCXWjnd%WDLW zz2x%RI{=pve;V)wz$XAt0iFar0eBqn4nP_32w)s=KgDmqyp}|mLwF~`A%wRhq&NY^ zw+s5{T>uyGGQe$s#{sK=CSU>Z9>6)k`v5hxsR%d*7zZ2#bOZJP?gZ=t+zNOGd7lPc z1bhatUG;5O&L@DErvOg^o&Y=!cn6>icmyyGxF7H|;0u6{qh6O0J|*jyL|oBmTzHfu z%L8s8)=HCs@6L?pp>`JiW#h5kLQHic&q2U|mJ1V%5L)Ky_zc|PnUStO@%VcAA>fJJ z%OzUZO&{-OjUxIUExuPzSKP&FaS19Iw80|uVyI~KY^KF?r00_or*h=|CMhDY^$g|4 z`cHdx-x*}B5;cXS3nNhjdfiB=jB_D|m!07XDSMOE${-7-L$y*KrVEH?PN<+-i3`FR zu?^Zt4B6O$-h;d&)w(Fw=`X$&JV9JcL-ZM5#y#YqjQJH9wH7BxKPfb1a*@%|$Bi0- zz+8YD1^o04vO@1DY-KrvxY~a_eLNMqPUCQ3(#}*~p{t4YbG&uXt9g7p2sCj=r{Y+| zrtcI_FM73-c=_qYF|=y2R&Br{RILbv6rG{dizDTw#sWn|+CX`!KHx(VFUmIK4!x&Z z#sQSVj>K`Nu~4YieJ(v7i2?Q|s<1;rxvezO5`JR3Sw4c(2Y4m_>Z>x*GnIrirSUbYA!8OMK?Xf?tNVsAS(l*@N}Z(IvUymOi*-!|}jFeUy6| ze6#|tT170TTw!T(jZ(!B;Qt9Jf9;wP^ZwZh5uW>RB6L3?!V@w)_i=Gu`+X7SE{Jed zhPyr`u2V9+_(^#!@4KHC*R?+q;iUx;o+ydXd8-I>Z zcU4?pkYVk6#dYl$M0oC(MR@5~M0oI=2zPx@gePQp?pI|#8J_)hneR75c;>?*ydcBc ze-zi}WOzY_mt^SvmbiabhUa8>L592jv$%i$V4UiR2I5UYyAgvvPe0Ky=-XvtY zBukISAa@-}xXvAkI7#kK$c92fn~?I_Buj5Ow853|e1!IIZ1gxMMaH`vm&&FYxldLG zX?%ut%_Dsnf?Jet2=k51SqSsa`1gj&g-+a{DU13m@_t{Lvs5jFb(@VyarxDJ`_?LE z4RVC1!x_j-bCfItlT=wgZeSNrcvO>~QD|?B%=jE@ty!KW1^4bd#e&W@a*M1A}-v@jaK=;0gFw)nh;7GQT{@F=v1lbW7!BCh;7YZ-V1%`KIlGx z8Nd*r8*n$^WdOogi}x)GdCZ0EOu2NvSOpmCIGLVaBVVrUHHpJADq&L;Ym8`bHv8ZD zVUhB3bfDjzE#V-y8b8=79}apusq(eTqm88~sZ}1R^9@>T2_()Ga;vN3 zpt3>Cj+0y$`WGoj%b%xt+{56_f@dV3Y@f&#yg?Nl$zR9HFBUiI9+sWhLsrdr;)$62 zQVx!PA|^l76IwnA<;Q$Ur8j4wbB|tLJDnHd-{&r`eF#8laqxcJdyjShQR_PHxp3O8 zc-OdmAEiI~rucV-pSj-RKZ-X0-~{XoBLcoW+&X^SxOwldCoiv2euDF3umu1p{#{7# zN15v0)d$@8JJ+P{tww ze4WD9yhRURL;);+|uxnvw1Fd|C7{3Fu@2 zL&6Y_YHs?W1ek-A7XUe1AqyFZgoS))(7X;kHi6d2T$Crj0IS_NBq&JXR(nX=sg;)D zdM)%g9+${b!YxU|Je9OdrD$%;{H!?vULcOYzm9Ji%g?W?= zs6qe4y{}R|E+k?qhu%kCM_f(lgt}P;GC4M|fE=KJiQ>3Y?j^2d^N!DvAyb#aH9+~{ z@uaQKLxW6Gr&)Lt?TO825TU15t0ldXvygQ7(vC0ibEBb1%v`WtcM79$bUeVv2Hpcb zfakQq5{EmaI3H!ffF2)w|U z=sPM(QNLYexV+(fXGMhulU}y zt)3%Cp1>K-#2WWjXdE<%nd{yG;>qbQ)P34{FDNkJN)dV#Y{lyH7`ip{p@FJQZdahP z`@H<)QYA^u;mQP#y|QJ-v+GDEN50Q-rU4Xs0QKADLOua}3*Z*O%K@(dyb^E+;9CK& z0(=|bPQYsccLBZ~@M^$!09?Rp0Cxkv8Sqko`oRVI0FIFiFwINUV3I@e=QYjBOAM5<@FvB2$s;nj5Dx3)Rb7Mn zEj&u8)F4M0@`Wf5O&pYxlvkgJ^C4FA^$=t0D?7l$8J>`V#eVoe5(*VrP_?LDqMW(X zJbb#>Erw@iuqc|dsU$qMBs|W!IQBPHU8kJ$q${x1h3Fl)$?|-al>Av13ygs4!g7M% zR{p@seU`mVb-jnAnW=+{E2mo2nH*lRLdO*r?U$nedI(SrQtd|nfU(09Fu<!DA-tg#Dg>LS3^?+z$zdH$C=aipgM9h#Ewa%_P8O~hj z1)@jcg$tWIJtMIv+9%6_h68Kv2FuaXgs@9uw3Xo$&)W-lm|`}D&70l&6;+`Xn0f2I z8e|RM-nu}<80=fXRakzA(+^n5vhv012TB_&{{(C>fKLFv0C*hm1mJ1FWx(z4h93nW z3Ai8dB+{M&yaVuhT+{twKnZXHa1<~J_&0z@0sj(E2mCzXCjnGO767kSRFhYORp<*% zg2zce4|yk?)R0$_j^))b`%NR?m^MVxof*FlcSl+V$Hf5plRQrm9b^@DP^vdb7!utg ziJG#=lu#b=kf4fC#;`_%=Sb2Y;IYm%4GaEM?3>7wTj&#XX=zd1W#e?D7~H*y!D?q* zP^Q(NthY%Y+@g!Yf=6mqEJk4GiX!h=|}@4Nq)=;u3ZXJd<;_5J5sJ7Qcw_6uo;~>pklJd>Kz_El1#oo?xUKlD;Ty_Uv9g_1HQw>O|qJS;Cb<)6g31k3;)0t^8515$w30bUJw zIpB7{Er5T1&*im004yW^iYG3wy##P8;OjqidF_7z{t|Ez@W+5Z1pF@GDZsA+egW`q zz-y5o^pii8K0Y|XL!7UKCkvyHT*Fz6Ft(uZtvqyPEOZ1@Fy#vtY&g$km3jgr6fbFwsQI zoh7|Y*Et|hyPctYB9ny&L8NNZX30S48+mzza~$KO>}fdhdD{$qe&0=y6Kb->>O{tWQn0iObV9PkmquLFJ<@KXTl)89%v&PMwRCsN4ZM~q8Mck)>z4*9j_f}I#P7K(<8Lc;BCGo|4Xm0ynqrLAb~p0>S-T=WK& ze?%A(xZ?w;KER6fro%~Y`uszt6DolD5sm>Xd%C4vaH z)*fhgWXur8|(Ew_Yk*(9)kaScF!l>-ShXj?jCdo z^o%#+0!~=}{#W2@4DbPUz)u1m2fP!2eaZ8uXFA;dJ|3zEGx==v8*-iE@E$E6D|%=@uWb2Qn*n3)6Ze3HBpv& zk}VzV2##$?nDE@dnAq@_iZsdUEyf`BW))N!Sc!^zMXlREhh=-Yp(w~twMB9*@)Fb@ zZO2|q+VRRnl5}yR0puOhwRhK&?n=oo!)=yq7ue({cBI!-MZWZ#m_uSfrT+--QT#pt z;bu>zRKCZlconBqnFD}c9rwcJwQu^=_Cj}?GJouo;$6Qd^ZW^}f56HguG8nOd#bL# zj`W}TG}dNsd-kfqk6aS(`>&rDAzdrjNIgFD#b~`>^cO)|^4TCBE?2$(BcHjvM)}pd zDg3ejiDxzd*_gZ|F=o}pXMyt7s%uVedn=>d^) z>76qCh|IAnuhn~GviT0jc}Cv9_yBxw;E47A^^1%be_I{|@lN$%&Vod*0k;HLoZ zlh>3^dG34pvulq6mH_`R;I3DQYf7j4$B&~0C5q8c)my#)8TFo3zU}Dt@UxU=*GJ?F z+OwyEjX0PA_P_&n3I5+@ho*x`ZxRk&ai-CMxDzmWIW z5bl%lcZc$*ayASP5Ivc^j*)c?SJ6ewch|xS&RK5r1<4f++P0{MI9o#+L@M5#+4$38jv>>9xz18$oF%^>_HgyMl`9`FXMh4=r^ryuy$ zkDvJ0dw%4uZ;>p%aipMFKxyYA0CJbIX}MSew3cOMkjcgpZ(G8C!! zzp7&#A-b2p)rp!4y` z`NiM<#X;wd|MtX}k9~3Q;N!ox{O!N;@}b<{e(7^xe$_)mKla&)7ku*4>JZA+zgrM{ zJ^)+W*yo)5HNO$?Qoy$WUJfAtiB|#c1bheJHGsPS-wD_Qcs-yCa326_56h~00>*5- zd*#`+W9z^O?%S~5Go9Y2ay!lYjWX}82+#!Z~6P3ng6kOJ^x@rJ&dqsU9Y~)y#Ij(^Ezd{@9cx-`%t7&{;oHf@Bi?_X8!Y1Ur|+h zpLP8K>-tIS+PP%b&pBw8{~oJ;XRK?dZa#nBYQM8;mbc5gzQelSV_l!K>g!tgyJX$J z_!9GdC+;%!wo7-I*JrHf`>f|3>w5Wiv;5nv>kBV7?>jFtuOEA%dA({~_gUAwtn1a= z%;!J+4d(TU7ns*~Sl1uE#k_ykx_;()=6yF|UhnxQv%mZP!MwisHS_whzca5deZ-{i z(ZRt3?jC4?IGyS!w7})Q-s8R9T}y>}vx~OgEkn~=po4O-F_F%y8NeDRoePLnAgt=& z2K-4Xf!Q^iAp+^D`=`=LcR*2zpeMR4Vx_xYrQ^GpRx)8)A)YDd zWv9Fy{2j%|26yOXXME6nw8IZ}_`yc_!H)d0Bd_hqr#tevk|%fg!45yz;RieXV22;< z*mHL5S3CAPWu4#Q2Rr;=hac?lgB^abv);3_zI83u^VGK>Q-YA9@AOqCxx&!aoF@qdW*7e5K<& zg?J9(>-OTkpL)KNLin#!IByW~GYG#88w(ym+(o$O4X6+0LHKXrjn%|oaY*VH5T}Du%a7vxL&}5jsh@#2D&pr6ehF|H@kmt4j;oX4S{|M;_ zM*+JK&msICz{?O{Mflr*J&0dG_`DCmj}Gx&2tNt9pVARJ{{eVJoWe=K4B|No0goVl z0-*;ufp`t!M}HCRLi_^4%Yeracb>$)q+do`5I=+P+^?XI5U21@03SpABEnC75dA`N zgol0u{epN7;V%H5ru+!!K8!XZUPJhc0OvDENBDPu+Yont6X)9kZb!U^@CE+~?L~YS z!moY=ctIR~HO^bk<7|DzPayOG2Pr?oCBP8ks|bGpa0u};2w(F%zysnbglm9fh&vy} zI{}X%z6;?UfD?$j2=@buh#y3l`+blU;uL-a&_w(^!dL%Sj6=$g(EkK*g?J62`v>S_ zN=Nvcf9NaTj44a3|si5&jgw zMf@znF9PmH{1U>%7eH5t??U(*Koap3!hZ`$QGSHK1n5Tm9KxpneTbh&_}V`O?NNS& zPXZ279)#a;5%@=Z7s5k;6Nu*!z86rYJP6$0Uk&EBEq>Z z19ynm5PlNy1mYJE4*og#3F0|~9|b%~=?I7ZCvZ;b2uJ<`IHx$mv9AEn6i0abUxIEC z--Ylgz!wldkML>0R}jC1aL-@i8u1ju1;E!3uOa*Z;4*tUk;hw)nxrnC_ zE&yJJcn#qP0CysO4&he-cT;|ZKmRoFOz8+$zY2UJPT`5aK^qXKF!i^fMZ_t*2zUf> z3eR4`I}oSv3xFo2Bh3CC@QL_Agy#UOh*Nmx?}1OmDV+QV;0^H{!d?G}e27!{FaCev z0`U_F9|wGb@+15x-~!?o5WZ{8aXyduS%g35B%CiJeh%TE0ltR#C4_e+63%7BQwV5stnv z;T)oLgzpEOK>QrSZ+}t3DI)G7JONlhoWh?2)F=|}5amI5;4a`0@q-9o zdUwKk3UL?Vli!(eK92Y~gntS6GUAsIzU*~q59L9a16)F!!VdwSLHs}95YHiO0`5nA72*2;2N6Gq@C$$;%8ziM3w=lV5jFuc zlpo=ffMb*&;b#DkP=16jPJxaQ--Yn2fCa=aA#~FTr;Ip-zYAz0egWav0PjHD$)Jva zRm3S=06d0x4dDj>XAnP!@GF4#{2%t-2R^Q<$^*YMN!s)e?eq_j(gIT`5RtaizXU8g zX_7XPv>B4L4Ye|pCX*&KO@?IJ2CN!EG>eD>i&9xur)k=xf4~t@vx@8}6{{jfK&?`B z1k~=z>d3Ng6~y27ynF9^_s-3qY3%;i-*0)J+??~i@7#OtJ?Gp%@6F7+;M1Um%ivRz z2W>_gAbFxl36cjrg)~U=poPoPmq;G88R^{0`m=T7`5PJochuTaegKkr%WJDHnV<=w76J@Nv-R zk*0!AfzByHJb=gkRBRPe0eI|D#RjfH41mWzRqTpuaSc57sA5Nv$_WSkJW>EW_NroE zN2&#n{h}E5tYUTGiMAp|!AC%kAa#LHfIf}1hvY$5Uk6{1JZRfH5w9c_`2;V{a=~j&u|}_P1hBBPGCNk1KX{F?<0Y`&_X`q$GIkRmFBA9RrX3 zs@V6Cj*~p-kCC1Rp91|2(hzv;S;hVv>3Q(jw~9?fIsqPgR|6nN}u z#jZhm8N45~80i%F0O&@fSHOorwHt9|64|X%+Z5=sqMr_yp*ekV?TPL7ze@2R{V*1EfmuDbQac1;D34FJ6my z0FV9D*kYs*c#SMrV@P5#5RU;n24}nH%;8XBKv85ObfX99>%!?PeZUIlU1StYO z02)T>B6-l0?}abGr$F5|!Oq}4pt~ATKk(h4*u#Y#1y2I)q=&#`4;ePI z8*Ksa1^q74G4R++hG8EWHUysNmA7LY0Ph2hA*I1v6#QxMY0zDFpkMzSdO^Q*Cwv4x z3Hrm2p#OqTf$r=YoBU|IYGKEY$Z99D*O3_SMC zVH=Q+gU7x(Y(LV|BoF$}--6x1W1k&1^(2 zkG+&|zm2lM6D>j70v>xXu{}s3@Ysim<^2Hh4<36lvH3_5k_VmnEaC?|_F!U7&%w^% zu@4igeI9iJ9|CRqA$&tP=$%MM!D9~~wihV@9{T{X7mywUkG+7{(jQ?A1dsiInCHi^ zCwQU}r02mC?L|5XJ^?!G1jah>UeF<=GpnH*?1|J7~ z1nGH_2mL(K3GhkKuOpoVKLq+5QVM(ubkQ_7dl|eB^lj7K>=oicf8}wr)8Nyf7tL_9 z+!tX_&@aw%v#H>dpa*8V**x$G&`+N0W`*G6pr?_RgHNC5W{b~vvsK`U_9K;pC;EM) zTJS@lFCc}$-HXA(A>zBEi7d+8v%iZiM z@I)Jsiog?fuW++c@I=4A)6D|liLTz|W{u#9eiJDIeh9Q>H+&5~0{RqE58!LV!wh6=-iLMf8f2K4gpq*br+rcM6XMP!N z2T$~aNFN4I^ckcWc%mg=fe*k3Kto6a;ECS#RX0n3C;EjU^k?u%(Ema@4xZ=@&thx@ zPxNOgO=-fhT&`^BBLt6a4{F?r-ueI^l7&!w%p*pk+u2@I*HtJr15|=vU|q;E6tmGz6Y#^RLktz!NS04f+CjqJR2# z^ab!lH~toV;S}l*+BGGIO$FZ#`m=L#*gWtl&~4LlSRwcb=rN?_;D3;A;|LNi!76%^( z{kk`Y9R*Lcb$$*T1W)w$NXNhveejYTHUysN&6nn|li(wupGP_cJ_&l^f*kf5crR!t z68ksQhv;Q!J9whqNCkw0K8&;gJkb=A4?NL=%X8Q&@I)^~DhE$=D^dVF(OpPez!Uv8 zQZx9Wt8&Rew=$Q{X$nV8Ea zR_FNKAy;J1tjTPaYYv<0n-ZEFnH0!RP8iBfxzmyJ+<1qN>xy^7^x?cma`B))@0nb- z-<9`7F8iwMrl)e*PuzK5%4N6bd;mO~!)D@x6fZu&x)`4WUW5<3F2o0u^Y8)2TzvR; z0X|^D%j{h9nJ71RF3NRH$YB#!qXnUH@YKfqJeL0r*+QI8p1>woPw?f1aw9nbchZ&T zW_e4o$@q9F*=Z@alS}1uaSyl^`lt?+=1pPq_F`=9$z`4{#BZ~kP51_H*PN*nvG&aO zO$<$lud>xX$J%_7PbA6|3<1P~Hc*UaH4x zryg_Y?JdvaO9aR8rGi@cBZ>fO^uYGhA#|?Po9mfC{(|yGFv~&F7U`EMfUGu6%4L&W zWsF*(wS8JXo7OSeHz|}KnHZRm%pG#4q#cH-ukK@NUp4$ee%q?}P4y35mv&t^oh=;9 zXA1@GqOCOvcy$f;i52)0~n9CX)Rv2T0m=(w8dc@VPD?ZFBqUmY)upeo|#rp5ipqf3j_pbJ=9{5i53v>n~(`DNFUC zq~?*fr*W@WANMqyOF4?)+lT3MvTUhKQi)5d;*yt?+I+j{>r&sMnQYNOKD%u1M0Rn{ z1U9EDkIin*Wz$`=m|9z{u2jxe>VZnzNe9MF!xmJpk@}=suhpVns^2sHk{OqokVb=kXG;+nuRrczHf|d8aWi)&mz{Jun;Vf$8W^lOI*a7q!hYE@gQIQ!vj>z&w|S zc`ldbzo^WUQidpxNxpVc{i*V*ywoG9dhJYjX$Msg#l-C+)bmWrR_&6RPwLIYvpJ`{ zGLB{YC6)Q*xvT^Ab$N~Q-)_f*&xJ+O6xTnvWQ&dUOr|}o@p+j!KIhXqrjS|Viprle z5nnl($DD|PZngcH<;(V2dMZpkG>*LLlxCGh_E3FTIE@wdOl22$O=0IZPsVsSS04|h zeaI%n--kJLavqzEbr@eE1D}5mo8L7XP@jFuai%yf2+2a@u#kBWIv;PR!q$>{U_r?mCcM#d5!Abq10RErSfGPWIZz5 zKzXZ`yrg4whW+IlL$;HsYI7>%KBWw)U+Pw6Roke%Dx>n5Ws`2pX73o;W+XG5ZnfNz zc&d}^YqAVW-(@4~lkJsMwNqwYZddw(_4z(^SFZGP@%t(8dq&{B~&yoHNIftodwMo zuo>{*)zW|G<=%N-&NNrP8-7!HwT>!JdY&DlzM1l}j#F~;rnqJ^%B{sAl__P2zi)){ zXDYbLtMV$oRFNNvSMy34C#E+brhBcJo{}2|{|&)k27a~0&&a!z>}mIhDY^zXSKr1x{X|>Q@&Y`ov7oBRvk&-qoQ2P2NyU#P<216=&sI!m-55e%aePjCnw+K zWiIaPnM!#}6}}@2UX_>SNS&FsRAui|%B~i?)&7~oxBt;$h(G8B={lMz*+uZ>Lo7X;p9Ve{Xk=qghcH;!*+2CM%fcdtdJOw*` z3(5mF+>yFT$6piY1woD%3R)y+t)L-62Lw$E>JRaJTLf(uG%DyGL1Tgr2$~dB*72k` zPfNU>mpfHZub@6bR|#4v=vF~H1nm*@GlC`rmHM6#=RXqkck=pHu6LTC7YVvl&~iby z2-+rSx1je3`jDW91^tSkKN9qJg1#x}>;|rPk)ZDuv{BI81-(nqPYD_q^btWH7xY;{ zUl#N=L1p_V(`P(5E)jIOpsNK92zryCt%B|m^lm{P6!fs5PY8NK(ANZ=7Uufq30f%V zQb8*PZ4~r2L3;$fPtXSieO%C!f}RrekAhBU;`#~&EfRFKpw)tI7PL{&RzY_O`T;>d zBIrGWeoD~K2zpS^grI|henZe_1U)I}uLVsD`e#9NxAA&U6SP23ub@{7x?IqA3R*5` zwV*c%+AL^?pnC-Uh@de+9~87t&_@Iv6!e&&&j@-#&=&=LMbJMAD&veb^Y+XX)GO#x zL8V-&INvO2o1k|I`jDW91$|u5X9ay(&_4>A+rstE7PL^%YXuDm+9YVVpnC;9DCnS| zPYZfd(6pfJX09h+(DMXcF6bIT>jZ5SbeEtpK?ekVT+n9(eOb`g1ZCTWJq5i;(5nQ! zR?td8LxQ#mdY7Pk1sxD{P|)LoJ}>AgK_|3wyC{#sBOhi}b~+!Ali zk38<)njg6x-kKk|9p0KBxgFk`AGsa=ug#CsS#U!eZ4Vu+#;&V7!ZcevoD&jp4Q+Z5CIy3-98X2$`DpWs#lHvIgYY%+FUeweIX}hCmZP zzXlmw+B(A>(Tc{hvg(@l+8|@M^J}f0Wi70F29*=NIv8x|!WQ;RmST7NmWE(wv;#Ya zRy5Q^mn>;$uI~s&JL+4aov2GqJ^qy~@u6VauZ(;yVxh}@}kWh?Yo0p z>l=d&^_@|w^Y^W5%NwwHDt;Fd+}zk2*hO{t6!^B?x3Z%bkX7n~+jiD>GzROpws%C? z!#dV*%g&Y#a@r#rj!j_0^__$t1HP@RD;U9UMD1<$t=KO$h&Mdq+ZD9mTS;>}3>awX z3b$foWo(&?ect#E)2myeJL>V-_zH1eTC#~1Nxourjo99n)e5W{K=rmZ_90g(bVtLb zC3vTf;H$RM{x*2G3(h+mI$E}B8sG1>cA4JQ(hx4Gk94vRIOl8YckHOIuHS)r+?rEE zdr^~=f_{upwfgK}PR;K62;Z-|6E8~OwJE{A#@Lg%RaW~tS6Lf2e$Ry27hT*8>%yI# z_1nUm@GcnFnvQT-?8eQaRJTgFudJh^y#sI9q4Ra^jXPVz<=FA5HC&80FJ)q7T`1@7 zOcdBL!mz(@Lowe(n6Y0IQiJVtqdOzmnX!EnE6Z8i+P<~EwYU}A`m#!KMs;E=AkHe= z8@4mHNt|tHgCx#x%c($L#oo*2=!>i+*V@cDfSsW!Xt2|BNjo-24tKDdjVq|>Ejz<)4Y#rz^Wdx6X7s7X zvaWE$&S*<}8=4vovv#_&p{?1nATBmwgKS(}gDtgC<_Gg?TEpQ8-W*cf+R4qp_7QIx zp_e!qZ6oa~%=Xh63gc%_com#^+hEA>7r1_C5u!9lMHd!zS6mwt94w-PCrp_FL)|nob1% zEO6MbJ{rVm*04Ru%XvD&HTKj4ck|@GzKH}9cVQ2IZ@EcCECKf zgO+=KIT5Bo8H~L?F}Ty}lA`Q=`Ftclo{s^EIyIX+iMRN&NlgvdM;-dtPHJk8gxgrf zq$W(T4Q$<{Ch9k=iYTtvOls;3hqtp0lbWL8)>gK0Qqyh>ak#Q+(zY-%u*jsA&dx}E zLzs1g>uPM-M!xyPBq*$J4Z}h3N8h9!q6P;i?bzDU3We+tanbhO4eaxic7%5{L@*(J zfw&#*yTa^?#96KS{-j$vX+HYlBvj05#vdlp-tYCTc(>9dOq~rhYQJf5R0f+&SZAaS z-|C@>m5-mf#9MkN64yqV@;Qf0Q;DN2>>P`WqPNW;F4~UqZ5HRCZ?@oC*}0tC*~-q7 z9IjtJIoggb>@k)sp1c#g*pq#hOx}%{Yu~YztpwL;)wamusJ5$!v+Cu=FY)WQ6*thB zgaIks$>v+g8Z3l5@qP2ltTSuipvxH5FL#hYJNg>lFNIy=Z{FD%-GpUU`)-;k%erXi z)z!4d;b3ihZ9D8$h#9#aH7v!-rnUaonsAhMQSY>7kV;n4+Jd;QZLh(Kg?zP+kVwf+ z%sx+V;CkW%!BO^f5&m1Jsiot=8lRtM*%PtqXE3nc=ZP8DbgXcwTZ0T&- ziotOukj?GwJHm)Cznj(zJOVedQtPZ@2VXe_-RtTjVycDVp#2uNaQK>z_8r1c9l&g- zu}=58&TX5p(#JB)nt3CwVZM6oWP99IO&ImK>)89p`7(yDF$tP#@pJm zbz1}OMd^lV;gUs-m}uE-;D1X(nOKlDy#4J9S6T%YqP#OrYfCGZE#=k0yH_XtuchnD zs@IoQ+Erlz)a9*|y$xs+VLUwu3sc8W{oVU}_Q&?`-5=jS zus^YXaDQ_D@%=;lPwY?aKea!-|MY&=m*3~`V3??;Gkn(UP&2^nud{*ungR zo`VGky$1^q`VJNy^dBrg7&usWFm$l_VB}!e!R~`S2V)2K9*iFxIG8v%crbbJ_`#ur zCl00#o;sL5c={mg&+qs27xa7k3;TWjMg9K%^8P@7U4N**xj)k1)!*IU(;w^K+aK>A z=uh+y_9y#~_Yd`-=uh>Z>QDEd?q>t}1D=6`0q;QJfN!8^z&}tv5E!T%2n{q3L4DP&>`?w8&!K`t-b00le20n-`45#J z3LL6C6gt#=C~~OlQ1_vpL$O1955*4+97-G-Jd`|i{Ls*$6NgfVP8~`gI(>*8&Ohuq zTyWTXxbU#=aM5A^;qt?Q!*z#4hno*a4tE{yKHPISc6jgM_~C)WiNk}3lZTHV9y)yD zaO&`>!|B7P53?irM?6Ogj(Cq09`PM1I^sW4ek5?D?nvlJ^O4Art|Q$?dXB`7>^%}c zGH@huWbjDxNb1PxBc7v$NBu|Zjz*659E~3xJUVpr)KPSR0Nx!!eG^fhLUc1bvi*qa zIwPi;FOTho_}`bMP$56B*Qvj&9#%KNLil|$swZP=U;rl(yo-kjdRh#R=uHvyq!@Zp0zGI5y(f*H<3X?Sp~nQ!TO#NwG4zrIddLuZM;blDgI?i7 zj|iYQM9>pr=miP%fFVSG8haD4ZhS8lBtPzr7sdT?u>U8ItI%o;`8e{e|AB>kJ-5_4 zGymaDD3CA2e=eUX!OMFAFY4ge@g5v&eTcWGymkB?T3`X{l!rIaP}QvUzZL+O0o_4Y zx<3bxk{tEniEGzy@RlsO+Ph@gq9u!b-i15rI~tm=Yiq|dwR)U&);BkSM0d7@uiF-G z3*%{>ci}eqSGCl;P^P9pMTvJ|)7G6Wt&MmPypTVU0MgdJurrD$BMT!P?NL0-!~@_9 zzbGoR1=Q`DKp&ekcmBjyJo@um|8};~6Z&wYm!3+y^Yff57T})P*^Ec}UR^9V&$^h) zr8z%Q7xCm;B{jD1r00WfSH68&)FD@1%FS(RZO5m~`T5IX%t`KCUOQW!*CUVDgHCc< zXhTM_6h7=}#D~9~3>z=7`7YNqSiKI{YB#Je3)YmBmz8d)EK{*SadmA)ab>W&Y;CZ# zEKs(-v}}FJjm)LwswoSOAmtueYE2+mQnh|#S#@oVT5itBIffI@b;L`{YHEXPiYrkY z)HArIsyeu~vIL)VInBeRnm{GKT!6;ZunEo;S^YJY#cQpW*3=ePR^lxxyej%7Z+^Bu8KkZk2;2>jJ^z^`*hp)y3-}v}`GU0B#pCoW#oI zY|7{oHC0r}8a6dUthjQ6q6$BVxAhd4lx$eHp|ZHPELgRnc5PL~`nAF0Y7{ZekqJ~+ zRaULtP!`-!LzmIoGW@RCE}|?@VO;h&m#L|hWyQ!d!vO;uYRZG_EHmL_2D>OKzY+n1 ztN2y64VPAJSS{h%4lJds0_X|Sfesv1$SVSPz$Mb&zCzJ9g3tad~7 z`WjZ?z-y{DR0rR^0e-5iSXWVtuR7ROf~9LW6<4FK)n#kS;AFDQ>az7^Ybr|6BiIFw zXk|%2M8I4JtS&=^)>V~OthtfRb6_%7#rl%cvJ!TogI27sEn8bw9V}gg9A%aRYS=~2 z^{PN^uz2kTcCiB$Z>*{)CA(X(UR+vQ&AiUln#!t8!3y{t|JE&AN}fP}cN(W&E@GEB zS9q}y4^{;by3~(Kta!N8kr8gGsjY@h;DrUwCCgaV!RyyoZCa166riTmtcG=}YF4w$ z969uy;>wD(>&wVS8__YXvS90~pp3Q}cDW-@W~(VKU&F4@v69N#QdX#At83ZY9E3MW z9~oA!t6*0uBBk%%fYD>adUS(I_IBq&6-LF1HP%Q4r`4=yS2@>&q1IqLEMp5DysR!j zeZF{g4O`@(RR8h{%eiYRV6w&fr5fduPrtNgO?6pWfGu&*vbx$}k#AYi)l@NzJnL&~ zut|mO+qH<%YCdGw1Z&7{=)RTsoX(aYaKn0O|H>)^Dgvxx9in>qNK)`1`hpZ&p^F8I zORk3}MO=`jY{`|nWbKWCvdXd=RE)~2VOQ&ysV=_Y>KlWBx~ghcJ z3D#BxH=+++Gm6NX>Z)~?(6z$~@d>2{14RvchaV<)t@(pWN<0DcV=8?60^^F*^sXtZJ!BBqZI_X_oTw4xFRW4UJmp3jgU4yZubb~d@^YOVlSdJkuP|Duz$Q5s_gNl(N{rXV^ z^X7E$X+i0O7yMQwQyHf9@~OU0F&if^pJvI}d%AXY+H;}l)ZHaY@)NTt!e zvTQxuuRyY402uTOWBRibxaNGs?a0EC`xl1+w5FkQ;ZRu z_ZIdZ2Ucd`;HK(|+A_AqxoBBs-ICzOisB%KoA)|b)>qMV5GbywF2=fq-Q--6YeiZR z^HpC^xhU#*gEgGv>kf9Gc52U8pY_fq(I7G(+v>nP#wx4CdaQ;uIM-+ds4FR7yP7rX zAX?;OVF#mj@F9w(9@gYs;XPCgUs#RM>krCEUka#hnV>=xKK<-*olJJC)*UtfXm0BJNVe zd4S9zZc9|CXf*U&xLl9kMzn}e?j;wu+jUI)v!F@Edj_Sk^DtMl53b|2`GC6jJP zMidQjGJRVz7{S-N+*f)Q;f64mUSJwPn!-?RYkI1zK<5au#btDV9SoMzlqE19VOEwN zFjF7EeW8m@pzQZg>cam?Iow0czQI$by@zbDLEIlF18*Hm*d_+c6af#g6^EQX`W>9wES;yxsl4D&kHDeq!Z-l0HxEs z8}aJCw~qg13oKf+nE&xd?~CyWA3tqa+_J1_<>H20x9x6eTU5iUuiXNMzS_5PIen=M zzq!D7V7K7QgA4gb09&jl_`w}`hQE7D7uIwumoMCnWC6>TZsCP9T7(qWc$Y3(Zhh4H zzufe<)WZU2%eAA#5j*Q~l&Oq|*V07|XwKqcB6YD7sf(3JU2I0`ViBns8d%0T^%<0@ z_7?GNTB-zfo<+@U+t${F^vP8xc(sOSr!QsJNb9&AQCeKn+WLRaCR*b$C@32Bv$y)$ z-?0Ujpl-BAvkq$=G~rL;Bl}#er|xaVtGg%N+j@WQWPkVk&*tXO1oU^@oVTiWh8Dnm zlI{PAnTxbDEbA@t|L<8~5&uCbe&>!~Gyav~K_dq5a90@fhJEl~_TuIl$V8+Z#((e1 zzvIIZ{O}aN?_<_?s%)7K`h6&F`9-cB^)2`vOO(+)0hgg1A4BpYImoAQwm?JTI4jhU z!#JbEg}gZ<+`fGW*KyC!Wk{I)5Xpb88!bR9tS`MdS#iH;9sh;@e(Y>Cn#>er^Nt~N zDP+25k6um{WN62t(R9%*@Pnw6)l#d5Q7U=B2iA? zKz+PmCn05yBwPZ00d1m{)krdnx||Bp+j(+a^o`=lGP^|=MI0G=Hk;$3<(GVl;)r@^ za4wJ|$y(=v;N&>J9p_@4zjYhko-f3Bzcsu~W|&Lm;2@I2+PB_porUn<8J`yk8Ow&& z_0>R9(qCl$VUA6ZiJ_U?r_x_ZD;KqE8uHN-e&;a#CDy5q2$TNu1LOV*C|5Zv?M&9? zc3xo02s@K?xt-||#Z*mhQHSFTxF2;c)~?GF@lkh9z0=g|t(n>^r}qTw{zjIcPNP2hH0>c6Qvzdm!dy$iznJ zgQWR7$fV|R?P!2 zJohrmP@A1YdYR&gTZ5jFo5xzarPQt!5FoAku@N%d7bLS0&AVI|z*!??j)U?#uLb8= zUJ7?YW&kH3D95Zb$=!~N9%SX^EC*tib01_L!q3ESuzj@oKKKdbJmCE6Sk4}LZ5()51VXSG8XbvATh6s{1J$Efr`vT6Mb)2kwrjaxRrIvsRR<& z5PD&eBXO!l}WcsLSuK7s#mL&iZ-m(C*UQpm@nj{l-PGxB}{izPoHtU5x}{Q)v* zA;axFmj=9;v@=z$5{O4bZUb@xb|NQ{MX)jjGNd8D2I9vc?s)LeK+EcU{|$)e0kzJr8^}4BoPCz!)-nA(I%R&QCxlK8DOMAd?(J zhPI$uIrVcVgnl++8d}HMLBxKm9RU4*(B*)0*nGTXnkSgWuMLso#1A38EY{8ppC2jmuFyAB{`!BE63K%+Z)eT7HZ? z(msH;hivbV^g#ppB9MqC^AjLL|D;CCbWbKy287`j$mx|0Kw_Vfh>QfTogx7*XJO~^ z3XA_|D-xu4LYPc=Bs6Mz=tO#F$o9@EplGOa$Vl+Mi^oWgt5CV>u;js179MP}J%|rh z=`L1%Adccb42vZ`%PltCoj7TH05T~dL(z)2ND|U3?M%owfdu+hdpn*U;Wc<)GL>@nTwCi*t<7X;l_(<|$pz{qmiVfcdas^UiT$gvg57@CmZ;Q?d z4CJ~C77Hb~#YT#ahsg3$#?tf%kZuim0!U0ljsx*MDt$rOP5@zFP>~mbcr@e{AYKjm zZy(@V#5rUy@B*@0G0cc0pI$n1B|XK^4;kTpL?@4METl6I z8R0QI!ry=dx=qGGcP`w6ROe;5s&^=AgTqyCFjw zMtWvNJ+3>89`{S7$Kf54Mwgp43o$ReGg6Q9LI$tsWi;#Yh+0Cqz9W4~O?VfOgodmK z;{C2F69nSdkSLJ2hV%eQYshDSc%D|vc^HUKLy}aEhWr>vLPOF(k{U8$Hp=;)TJps} z5*o4;h-XNZDFqVHkOs=7AyFWqAE>$R1QLHvMfL$nX-EP{QKzzSYkyaon z4e16FPpLBhL^2xkB_RG^sxr?2NodF^AhBPmGH(J2{8~k3oDX08Mnx_I68fErtO63# zkQyLE8WIE&|Gk=PJCH zj2xf4t48_UL99|@Wpd0c_^2!MQ#|oW-^=!x=_QrgA;<8&_LzATGDkESI-xOhD2p+3 z3Uc#%kRANRycwhBKf+?ZKX4n4G-e)vOiak|3V#hqTtmK3me-IJkkqTHcU}d;{!2yj z@wh9XAr}IPYsgZP`IDNf0!TtbHUaVeS(VuaB%~qlr(7EH2_R_=IS3^1x?0ZHNk&6X z0`XyDb?%#f4lmk14%=-qy2qAVlIiu znCCvFnE$f}-|s~+&LLwyg-9oV4(Io*VqU~^?nJpJdCgHGI2q6MSqNVPT!p@My(TBu z8;Pe~jOOGV>o+5sh;>3S@0~mL%A9%?k9oXaJW9Ja=s|xsEas!%rZDe_PdMX{Nk|z> zdx9*lAx{DE!+6{mw&g7uYDEEB3KJ{GS+l(S4ElT0VjYSW&dL?zY1=#VOd0WccLWbw zxpy!?W_;dFnaJ{yEG&PGZTVe0TesJ@>2_!A1z0Q~EvAjJPSw(o>6S7UJmmt|8DB_p zVty`=P`-*R1`^kh8-TD$s!S`%Xviml_$Esk=|NsY^58?T-3?CFz@0S@ioP=&mgnpF zkfw)DWZ#L~-XR$bHp(IUPPZKi2T^Bq`HX`+n9Rb1ci0}pH|X&m|4a`)42z|u#YXVp zlaPr_l~$#S{1`|~LtX)r(2(4@n9(%kLLg}kxf+P)9JS7<-Cx%~X*WfcR&r$bSIw&Q_5(fh5jTk=f{WX$@Hj#ET!iIlBBn0vb{S zB&i``lEIMTP)CxIk2-dkd%h32NIa4mJvnubGuO{@%=GQ4g+X~=Xh#ug1(3?!~0*8@pu$W|cULe-~T zK>QkV50J#$q>SuMUaK7P=|tE^xAj@`saQkCUy!|^i)9}68q)TAAXCKl`;hJTR>)u) z%s9xWi7b5jy6#hB8Pey)_;cmxKK&|W5>m#h$8$iE8uITzhBPF1J|f{N=?lts0g#A> z_<$rdWG#>(4Y`TRS*VutJ}L+A{%~f`PXmeJ>lzO7MIZ?c`2i50PnAgn324Z~OQ2mt zE&}3TqUKr&B%&czK)N-giOSKCj{r$&NH37IhCBfTuQpR!@e?2(4S55IS3}Ob6z$Rw z9}vHWTn{9mA+10{8qy6Uq9Ji0-5T;3kd%ge8%ScATIXK?iLX$RKLSZ=$aD;5LmF~9 zkd%g83nZ-}>w&P9YRN4?JR0&LAYKi507yhb9tINEkYhj+8uC*hNewv-WJp7%Uxqqs z$R$A18uCsc-mBF*ZwBJikPaYz4fzC+fQB3a64H=kRL-?($tNk7hU8q1TpF?jNW@0u z2+2nP8X?!v{w-ZcQqFqblX9e` zZ!<(30m0D-$u1mwy)h$4KHsp`8#lLOLoDnlxJVYb9v17C7BgzhrNWSjNf`_J5RkZr zJOCu2A)f=1v=QkGu8Vw8jFNPPXWJJ&m}MqFOO6X43}hVA7bmAlU#x<}bWKY7BJ^|V z3%$Nln0yh)!WWm?zQ9C<_jqDkL~N7lEdRa(iw#MO4X=v52$__Wu`2LqAZZ(scIM?! z+pmBW6f4Ksw(~=_oe|g>hqQA6Oq+*mIW7z|87n=?LN<(NVZ#*}Hl)2b9SdSjOU-?U z7&k_+;UdU*r3@)&6+rwpB5laap*rJXKBErj9B12x1GWt@L}eV(hO4r$VLA&NuFtfg zwFRwX#b#I|AgwWi4L=B(xRkN#`v8!Xh8zKs){uV%g12s>-{WbjmFol$pN6~&B%&dc z3Nc36i1ZGxA$bQXGCw^LKfC#xpIVD`Z^Rw}D0=lnu6qY<4=&dQa3(z%vOO3As;|{4 z6ZHcBMPrZZH)nV-^IIjCz+y3Bv78Y+xC}D!>r`K?1v2zb71=^E?^2Nu0tsPib3E7! z#9yo;U!`(ZtH>{aL`qa7=WS?hsfx@5;=NuX)(DB4*wimG?_qhn$nRgk*!?jwn|++^ zUipgc)4OdQl94`5+CHUwB0OAC4mpFw?f8jdLRzaxaF+ZY%fj#XX82uB=d!>Buwg*h za3sIK12PdQ!#c$l$EkJP@=$;yK;BueB0(VjQ_>eC(+;E?KhSj|;XWYgIu$t#B&Fqg z5=iJqRVD=lFaK1^`EMZIH>t>lSE5e!DzXwtNJFZD*!`U<$Xb95Y1(fClGKoo0ZC}c zLqOsh@(7Tah9rS>Ysj-eA{z2CkdTeoqn2fPiryA{`hBBSPx;S`UN7Q8pHt7|v$if| zMYuuN$kAJb>4wkCABN<7Srv=Q==J|fu73(ppUIjGolx|qvWVVCGon{Mf{_LOCoGl} zJ{>7~bKZ_f*vcb;$Lb6qp0FAlg+Nl}&_vIt1z7FDJUh_6FMZUhpu5gAe3V&rAt4D?EA zOpD5hq7Qj!z8%i0XA)7l`0hK8s5dk{bRx&Y$L)wp0`<`)`;ZY8`ZpO-1DKui?dy`E zi1Oh@g5=>GSBZpeDxaq2L5_cV%U9Hy4}f;SVrgNq;o43Gc0s1QliQg`)Ez*QJ5^*K zkkl>}d5m&(smQm0M7mYvc_4`osmO~!ymzR`e^9OmIl@;&xeJl&Gb%C+8uAAq2@T0#0&6_3mUA(X zkcM0hB=xVV%nd+%Nfo&nNI*kA1f*L-=-0ZTKd5VxBS1V~SIc=4h)+X)48;2lRpvDy zktbDT#!{4gOhv8)68WZzlmm%rNEk?5Lp}~9r6G?4@js=O^HU(6Z%IUs>U`9q(XYD} zV>cff#Q1l%M|JwO5+Bv+*ERYfNA*23h_e0f}hHy+AxKN*U>8-U{-v2Oolb&Z?*UcecHJ(j&b*%x5>^ zyFER4P{vn-Bj3yTUzJ`iFeh$Qp1eGig_p0hy&N9a%ll!mZeg*Jy!-{o#HEa7xNieV zYsd>gVlSya{ST7SkT-w?URGsh;wC=%Yw0g4`eGoS->b+HAR!G|3ncM|Dsv-{l!j~v z;{S`3k&(b#N|8{2NH|xE>a!gQwdl*dzjs?UQ~$~SPCwq_kr2b>1*(Ys8@kY+Wh5M* zf_0Q8LnjmoUhmkexG&m~(B8Qt+*vQXye#njuvlDJY@|rIk1Q``tc-m?k{a@5AVV55 z1SF*)zXIZWlY57c5^oanSLrV*dfE!a4|6kTKD`u(*QFw>fFyHNWCM^yu8On)iRVc~ zMgngsMM53=7d@4Bj?jeOjD*;^V@HB-g7k8^F@sQX6bX^hB0;{33FT1e z<2^9)#W@(Ys7N5#-;tkhW5Cjz1z^49E8x8FtL%ZM5p2iq1>?8ah8khEvp`B((QL?ank)fT|y+TI3 zx%5eUz0p*^J&b@99QVAT04 zqvU*}FIWTL_8;qk_(NJsR{UVTCqH3lssURJZO}>z$lr1g`}5!!YG+ONf$Dr z&Pk(WuTgT^D0#>zxz;H8xKZ*vqvTUY$;XY7i;R+ojFJnCl2b;>Nu%UKqvR7t$+L}; zPZ%W+8YR>Bc62+ZjFLS@$wNlT38UoK@YGS1Eas-G?YRj%WzdUBov)-IH)D6;emQMv z$G1n&r!$WTed(PJ^b~&2NGG2}uEi7OT{a>8bl*M3TS<>W#<|lBV%Ir(Pintij+|3? zeb5VBzd*_Q++(f*sJ(i{jk{VG7AF~p{D#S@new^Kf(!}g(>E_ZRfZW+ACj45$kCm6 z(!LX4Yh0I~tQFh7XseG}@9TD)A=^G=S%Y*L#n|`n9_fqW=5De6iB*mKTWKnp>g<^> zpD>JE=j-h{?`~;rY^d*O9ABNicAcR(iizN&W)|D^pbq1$}EUI4+-LS z9zNzxU*J((#v$X-XSXXu0H@isOU7ZyjzfxP{g6+M=v@Om4yQsuU$0UQ@@XOq`-E-# z1lz*9@r_C4+kUd`sVw5~%-U}HUDlahGIkt(+^%!9wX<_)L~$t#2D=)A^?2Hh0;zZA zaO5`YHLe07T|*duc)N!8JIVY4GLbRHu|GoQj(m0WbFakcClKdO+_B+A^k#ZG>>Otj zKXJP+BiJ&IMKFhiXr$zCL6-=pB(q@i=FJpYBg&Li*Dk25EcQXhvf*%J3zykkKAMd9 zc3(HFK@o~?P~bmBt#d50$|=RpvSaBg-CQ@CjQDxb-vQM(V0Jb(EwmE*?9Ym#r;HtdQvH@3DnC}x!Q@k|y^fV1j5 zX1|bg_h|N^R+8UiS=9MQcAYW!1)CyEd`_it+7ZUOgx7g^zY7s&%h=C*ot9F3>W7R3 zKf>2qy$@#%v0@yprP9lNwwFg;L&{&LGBYQcOg~d{~#jO1+gU#XANEqArQ46J) zy;*qq%-U{70_h_!`xcBHnQz*4j&z21HnuP92y5Xv1M37{XYa71dbBx=1!_a*kR1I; zIewjnHwYIIebmRku5;hPE(QXK!Gsjg%`3FM@vD-I+v z#){|%kV%drWBThM1Q4wZoa0Pl)wYj|#%2AG-<)Bv4_|@2$B23v&qwWerZtUz$WiO= z^JF}e3^HUK6dT=H#D)h8KtAK|>i(=H62BfQrro3V~=lV}bf_D(_gbm}bmR777{P=#gx3gOjeynkHwGWeaUib>gpag81adFl^37$Wzj(=H=fA*T3#?kne`gYhcDr13f1)39 z94oN><fCA9IfBJugKB20 z(7&J+G0V=wj~gW9LC8$Sd$yhEy%@*=4OtFkJKkjI$lL(rs~XY*WD4G1<;d&-@?{OV z8^~`pxW+ux5`k;K5OAPriy#R4GSID4)Lxxs*dO7kL{y*Eka5fW{ z1xowGZ2QP74jrF;yiNfMM}9BFIrsY);I`rYPTuBz|0gB4!;%esIOR6e*9Nj)#hGPDI z-ahnZZHiF-WI%7av`;(>`^2-b&oj1tS~}b6bpr*19P{D#mDb#(hlN!!E`fd_W$b4T z9HCZ-Awy}%*^a2Ev#{9FEG(AJ!eS?Ei}B|z@(z`nCwo)GvIezXX|!O?wo_Nv54o-z ztv68)>IMG7u}6th8Fk);$7C&S(I%<25H8|%9=#Vh?W#vszN(jH$aaM=HPD zwZ36{u%Vgt+ek|roinz{D`hBG4RU=JpGERqgoJ>!;R(5e>;Q5-Ze|^150Ghi4&Wen z18LBZy+GW!PjO@hfP6p95k6}V0$JT85gEO_rR2e1VX%8IRmwijwwGVFy}Z}fAsIQ= z9kab0!=(lCvPI=xcFexZ?ncx8QInw)>M5x#BH`T3Na$>{x{56DI4m|KEjC>57OPw? zLpJ<0vSzH2#P18vR!`Z3g$p`3bd5Zp))vUs`^fXD?`>m`B$pZX(MOV>!ak90BBDmI z50{}jZ$!;?!LuD>^vklb>=VkuKHXW^=W5$N4eg!UY7G6ez0(S#p)BmfWyn4+AQ%^^eA=aGEM9~EpjyGNGp5FM&$U+%b|8H z*E}_}pdton-VU6(mPvulGtBV`~Z04tdYmWZN)ezR(*vUWea( zH*@<8Hwz<|Rqj(l#%-_1crLP-KYyfM2L9$q5YL2<*%pIYG7f36hir?H42@0BA!jtN z-7CADiwZ|pjbvffUA9#bQTh!KBZP{Kbd24bxD45FF>>gEcebPMq;12IXSBy`8;(4q zC9|;4U5{tMcMLI|B-O|q8 znw>8}uJt?ZF_XO2+NdDZpsO?)If`;!=!EwUow$P!_&O*uH3rG_=-tcD6L#s=8wpMX$7~-qlHs6VH|m+3=EF3+p3){+uF7o zn^A6rReiUOJQImrj*K0LJQrE@a#<;xp6xjF*;XBSZpxoC_T5$>3yZyJTZ})Kh|nfM z^-+5z9D>C{!eYa%h{y=-ka=0kSXQ|m$V=}N(Q6_10%_T!A_ssRf4_=+3CQ{nsK|GL z{9HqR0;K+SRpu0s_ur`^uLBvh5sd2AFDLAnx9T(L9gO|%BWkV!ApVc4$Q3~D(~#9b z{?wz&1c5aDgNob=b&rf&5%Uegx$6_o*_k0J-}C z75OWW1AA3u#wx5D9^{BKN^qZ2Jm2#+{@I0P7VDoG&%^ZYGZ!KQ^Bn!cLdcUxkQDbLr z;xc4Idg`Vh(uT>ub#4-OfAs4-Ugy?E)KQx-EClL9ePh>|%TS&F6*-1~zWO)US=^_D z@I41wL5%j)X!!+Wk7Ma9e7eA~Py4nEpSHJ!E$4U-kbS65tt~rRqTmW0gmMA#S%`Po zUtq~~oh&EEMLG22Z2Nts-7efoXcM8bC4s@oc)?e!V`iLqu_BXF2oY`E19R+OF5 z_N^==W$^nZt0Z>U5!KGSP)-uxXCupH{)R<9sgFTsD2s3E^7>FY4X{+LVjV>I*@`+b zVh`B9@M?PKMEYWn?F+iM(4WQ0nL1^U0DB=b{9R-!3xB=e@z=1u;9;_Ja%6u|y}t^X z{}M9X_Q!$T@D1rNJLWAJvNJxc&p6JuosZjgCL8L9v~zqEJ5vsddEX)#Vl@**WtP6lfoBPIa7e#bxcLbUq8?g>Twgk zDP@{E!r|7I&M0j)i5n9!@|=%yLK<=@5PPJxbS(sOo+yXA?^+<6o|0ZBp*28W)R0;r zp5v;_O+YreIO{$XxTZlp*bR0(t6tD)I>+ zpL#|`9t84x z8#(0MP2VNvbN4Z7n0CnMJ!VJm)40A&ml5lP?in(A>D?~oK@r}aMT9?TN4WJYwQ*a< zxK2^$d7fKrr0AtMER!-;RlWi{)3^7nG2B9q0ofpAc#J&{B=ke+i+XH%*N!FGa{}S> z=F5<2{*jdF*j05s6;}LXQ6ILWzAZ}g%AbBJ5xV}L$hGkWnTwFW02vx(PLYwoTS}2Y zJ6$wTR{M~Vkdm=FjNRooL%L)6@wzDe) zZT7%ne1#^ zYlQ14B2OmY8s0_+xDryumm&L<{*T+IE(`kuIG>g_w5;Q&M;ckIE_lgg)d%sFAv8%j&UVx_+ExXVambmm)V3F`o+$b#elPTP)j>Q5NLKeFo zdi2%K*|ymISy(JOip9t{WQ}+h)@ZY>(K+m`_=jMP_;g{75n}9PkometT8y+m0HhTg zVNzuByORAtey<^q0(o$zl#w$C*F|>z4vd`f3nTGl>1^F89JTGdiYl!gvM=9l+nHqO z_bvKCc1~wu=MUI+-bp|7m0v}-ZY$(TWY)-bmiFN?WS?SGM0bc8hgdph%7}aFZ0@8^ z+4dRvF5tLrpOII&{-tA&b@%A@>Ez!;=XZ+ygt256_Te&QpZA(=7x9y}?==55`@EKg zeV)(4KCvw9^BLVf;+G73?2xF@3554EZ9bz|&bG1}O5riQR*fb_`c3HA!V+p;0m*%H$J z$^PZB?SnuPJ0kV6#Gg~UM)5nz zy#g6_t{iFcI4InOr9-!kkj(3ld00axy$dCevihQ2iy)KIa$N(Y;5?~|WGaEsd&sSb zB7kiHvRp%2f%r9K50HR{d>jbvqruAwMsLGAaTvQ))Abq1_(rL-$i%j4xxRv2yKF>` zv`!>cKxM`=A~BNC8_s!8=|;QgmzK^UBOxYZY`BOL&#b1}J*C+xKyY#tD-LhIabpl-}b!W+5_X#-fCzsH?$WT+6xWsb>q?=F|-E^?LI^MQbT)aT-u|C_Bum*k)gfP(B3>Q z?OlfUkfGggXm2#MN5-XnkDF|>D&OZ$fn?OlfU zIz#(?hW4IuY40(#cN^M6hV};y?XhubzuVB>V`y(Sv_EcW-#aetF++RI&>k_gpER_` z$EE##L;GGsdzYd8l%aiKT-x^<+T(`yZbSPY4eg0>X@AJjK456?F|<#>H*oYhZg5=M z07Z)nfOH<|VMy?I>P z-!QZ{8`{ea?b8hHk#T8fUb_{1Mq3dvvkRE)LwonQwC5Yz zyAACjL;ID6_MUNRpK56DF|_|byuAs0q*awSTr6Tx#0U+BJu34V7uswM0a4POq&tCh zchO1Hp;ew#S5;C)SE`DtN>XW2qBt7&xQ$Uzqk@wcQR0Z2Q5?hTG7@!udZ}Zq^Jod83-tDo^R>fZDvCn$!lOFp4kA1Ex_H#Yj`tv9EgUiyr%J9{XBV?3a7&YaaWO z$Nq7TeZ4C7c8`7CV_){z=?+^S+iXmK`$JodV(*!w;9I*)zBWB-N6USAda zfX80%vD4;dW(gNRYT4C|3crG_%H4hbw;NK~*hn%GiP#-eL#5Pc_9$*BvO7%Fj;Tgn z3fjOD`qR4>%?_?gY4puGTkTbo(R*_g`HOOi??hmV#QEE<=B%vD3UPOWsj@!^jcrzT zy$A1aSgzI@U0;>HW~D|h4Z4;$Pjksj>$QIPen3)_SFXQme_fq7j$5#npfrzbmO@td z+tUrAtffs`%U{L5&10YU*rz>qy6KvG+V-m0Yd!WkkA2Exf52n!u8RE>kA2o-pY+)8 z@z|qPvDbL)Gah@{WB<6vK2R0=i5~m3$Da4tXFc}8s@NkQ`;^C?^4R~uV^3AZ{>Sr; z^(qfA>9G%b>~HYc^Hs5Lc}wwTfXCkEv2XL(XR2a<*kg}+?2R7#W7u%c{rqfI z?5iGox5wV#vH#LzpR0=fA&zU4+hbq%*q1%_^F8*++11RP%zNx>9{ZBVeyYb_Qx*H29{Z}tzUZ;j zr|VShsF74{RqS&f`-;cD;IaP_C$KnsT~+M2dF;y``@F~gpvPWc75l8mzT~mbdF=mU z+12d9!&}TAn3)rkkC{2KTzck2mjY?SfB(}nCzpdj)ZkV#Cq`y7b7EyyWj_@Y3y(W< z^0zBj>#Xh>H=h;Rv8es(=4mcDSC93M+hXQKm+P;ZyC~P2wxh;z-FhZRpI&Tj(DOeU zxwskUyG4DOd7HMX*h?OJyT@MdvDbU-?NzboJ@z(_z0PAl&13Jbiaq18H+t-~9(%;H zE1N%e3!B&N-R7g)yThg0dsLU=Z_wTok;iZER%YGa9hp_x598X2$7}C@!*aDY>iXKe zE$@ixTHZX(B{yo@sPqN5aDDw%?fpy7FfCWL_vKSVNGp8}M73_2=Hj26XAs3cpsnSv zVqf&wmpt}4kA1~sAFPV~9*=#|W1scd7d-YXB z_Gyp3TowDg$3Ew=PkHRG^w=k>V!zX4pY_-$J@$gfK2;U_oX0-nv6nse!yfx|RqVHU z?9(25-ed3e*k`I@pY_TNQi4W3Tnt*FE+Zd+c>pu@8FeH6A;C zYo3?A*<-J-iv6V?d&Fa3_1NiVVD9G|s$w7T*f%f|XC-Z2@z_uD*c+>2@Auf(J@#df zeGQjm@v^s7#UAz8*F5$mkNw*odwW&vy&n6j$G+&XFL~_URk3$_>?SasEWPaV_)*v=REcqkA1Kz_RBr?MUQ>fV}FCko~nwy&0}Bi*k?TU ztjC_OioMlipZD0OJ@!G5y<8Q0qsKnyu}^vIy&n5yRqW6A*k?WVNss+9kA12t_6CoA z#$zvg?9cbur>kPW&|{zW*z+Fy`5yaBRqXX1`;^C?^4L%F*k`L^Ki6ZQ^w#?Ui_HK{;>mK`JRqUsD z?1LVAyT^Wy$G%h*dyU6F;IX%P>~kLba#idndhAh;z0qTz_SjddVvl(2-5z^`$3Et< zuU5tWN4@Hy*BN)Td+hZdd%|O1tBQTYV{h}=>pb=WkA1x=_TPH!jUIcg$G+QR->8ax z-D7X?*lRp?`Wh_vw2^bG8NdI+W3TtvBOd#i9(zqy>}wu-oyWeR*Y&hzUR>j`*H*>; zu*Y8Ov9EjV5s$sDD)v>6y~bl-^VrwV3fFsmRqPLW>=BQB)nk9iV{fR6eZ^zn&?~dL zmRnam_OE&Dja9LK+hbq%*q1%_1&_V0D)wcMea&NE^4LG(vA0*n{xy$%)ni}u*k?TU z?yA_AJoXijeZga&^w^_Sv46>9U-sDNJ@z4weV{7#MUQ>SW1sWbqaOQURqXe8?28`z ztjB(t$DXQ+eZga2@YrWO_6CnVUlse^9{aq@yyF*<*j$W1p^y{Wgz%+GEdq><@VCGgYzAdhAmk zd&*;oQqo5$X1+11{{)E0XS^}b4* zkKR}5a4D~~tV?lR#QXc7-bY;qgBX!*v5(rytoKzqGOMzWZBcf8Qu}XMuGR&;db|0a z$sH?t?Qiommn=O)>j&2hg#NaxvcK1q9iJ5%iWN&Eu~N$U6yDMm_^#OnG|sKB{4J!v zVZBoku7NE}dGLjf<1qPI;V*$SCG_SPL?aDZi2qkant}5+|IY08cqvyXMj{RMYK$-P z@s0Zawn*gr&>@C$;QBfK>+^jVY3{-kR2kCr;=lgigU~bm;qWHX?7)A0uH!Dnw~+7z z4IF72@n3AX5B>c_<%7>0APp{>sF2TpK%U$11Y#pi9RGzfLVv$Tnj0Vjj=CVtZ$eMy z&syB8vAo4sgecWHkhvTGOJBb_;W%U(l3Cl~7pxsl43DI6XA_DvSU&EF^#m& zhSM~Yo$|Mqy|?DWX{%i~P>u)3WD7Xir60X1^yMR#Bw#X46dWXm*zfiyL^Ia#K`H>2bE3CP3Jd;w|J;hb&G-=Viu+#=b3Ok7OD z-vXgso%XGhcESIk3$V!PK&J82BF_Y(ZB^#H6Nvn_mzKGg(!fF4x2_|Ev5x>*VC=61 z($3i545S-fEm_p@_EoTR=UF1UV#G<3?(zV&+`Db~JD?nG-sYMbUfAZx6pv;e7N z`RoP~Q8#!?nXe@TfAmWtDIo32LrP>E$P#9oC0B4bI}YQGOf#9MkY*4yX!ChDkQw~6 z65a)5l9l3qAZyHi9tN_2?%uxjM<5%loj$V*Ucz`1lKxU4%giTS4TSE6u=yMXvVor# zc?FO~Q!dKqO+aWwX4BjTWDP&9^?ngZJ+r!pfRGopZ~Yd?^ocHV;%;yu9I|Q70Fq~= zxCltS@gyYA6+mcJ(Z01G$lwVsG7O~Q2`=&~AW@d)y+CH6XIqLpfVAVMMZN^2tj;Qulw3atqEEX^ z+qs#DQ5vmv2r~RNSXCAyMVMAdxqyv77Dq7yf;;AY!GSmm)s@SC?Tu{-vmT| z@j<3}50EKXc#Vc1Smt~#5cy3UozDw_%)v0MZS(_KXMJT3h+c7)`MeUyAhU!w1DQ5D zMC_LdV$cT4gbGsSdJobNk>&a^5c=`_?WX3#_CE zf#`i`QsxAZ8(DpofzX}IHlH^FX=lj$fkc>(`X~_E6=UDJ2go8T#REW=m=1pgWa>$7 z*`Kf%C1t(Lxj+^{XLD`@Qp?z{079oNY?@vmq-2W>16jvUtLJebJQ{o>5Nh4_tq;>% zYP=&gwg{xm%Jp3!v_@v%`ZkuvPkb9dW*^Y0FVY&t{($gVzJGy zfK0Md)a*yOjAt8<7sW`Sp7oyRBMsd~YfJGWAgksr)MjKKkU?nd&wti!X9!51S>^*}nl~iA7dxK$cmWKLZ(Lt@6|;GCtMK=NUlgYfU!i z?LcZo45>@;A|O*tht~pWV>zdQL~GrAUJiu*Y|d{3QqOewK_Kg_EpBTxZRao$ebGo*n9)NFQ zHP{TK4#HT?cLJF+ek>?uiUDS;NM3*uS*|qF%+$JX9Ro7Ue9JVDb>_8h2SQ&lvs?>6 zY8d-{K)RV$e+*>J#Lodfi%!?ZcC*&~TcjCaedS4gU^k^8{hS7bR=_R$vw_sGc5x+; zWi}=n0J6%w&GkUqnQgoRNE?f~-vEThakiFk0W$S8m;Fv4c@}Sc4af@fNI$2ySg-de z5c(FIEybz*&^(i{9teu%*lIJ7CDwMX1X4C0;D%zZP{JE|7Rd}F%_L-|I+yic2C{7G z0&rvs2yYiN^cIU!J`H4$mEv9?bdtiB;`@X!d;SfOHdf0gAB28RbKg1-NDb3Q2ao}l z^L0R$A-?4*18HO}<@G?Q-?8=e4?yZ!UCaSVo$luIIZDGy`Yj-}%#-{8$PCl`e*>9h z_VdJRU}MaBF91@{WPTwKeWp*=#cm+!Jn7 zGHW~CK7*oi>&PT0$E{g`2ipc%z9T9VsqXAGQc9eCtV9J*2|m&gw7Azx3&X`8fzh|dohqU zqgBkfmU83qNR-*nVWe4Peaa+|<+I$JKLCWjgloALfY4%+MZQUC>RePdZFF z%N4QXBc)s>H=ZoOdgc0{w1hK}W`Nn)bAix!*>b%Q$O7v@ZUWNwbT`d3kVa-d?*lT) za-Ii5r~GWr_X455(zc5;UV?mB*&BiAdj_P;R{*JD^?of7y1UrsoB=Y;?C0-+l-a1{ zT|nAdto1=4-4I6RGZM>Wa&e?#8v7*D46ydT2xN|x{o6n&sL1V}ye0KW$^&C0&* zrFiQc%^q|d=nwYxN7`XgRBl=J=OIlallgKWE39_9flM=P90sz?+E)n(jRLHMuLrVp zmS!h={t%D>rm>F$nKc$pX}&;djExa;Kjp)0<9k3>SR4Eaki4m7%s#<5Gm%xM=M!Fr zG|Xe41B7B)Tgw*#X=f$93dqLUZkm?>=|0m%#(=D_wlf8UW{GXiw@?~pKc56rd!C!- zn?TCUkNp714D*MN0$De8O8PnJI<#D-jnjdYnI$|M2t_GYo?Sp{&vlV&2{AgPoU=e? znLMup0u?&7{5~M>Jl{n=4J6OJ-&cXqpJo3pkSL4&ehp+1GRrhQ#lyMWNHQ_QBBB!m zq+v3j3uM*Us;KwTOhkS^rCwS1i;yP8`ip}=@~j4jfV4B)m;$oIB>XUtI;Nk`0?{{D z$kz0AAoZ**KLmu%GT5^J3P>AkaeoFf%WR`=5Hd5Xdk&DPR(G7z3uKkalK?{d>nztR zfeae0M)3DGAX7{mb3odeZ}|)mK3@I?kY(0R9|BU(-ugX|8K%{3F_a66vGP10$l!S{ zvIoc-v(>{u8d+bN1v1IF-T~yc{cb)V1G2%i@iicGtnK_5$UH;-03^cN&PhkW&OGEf zK3=JP2fg)nFdTAgi4>0MWO991lSvk#|!X7LVNtWCIT^`uAQ0BK;On?4|otP~?a=9rgwEsz=3 znr;CSWo>y5NDUjgd9p5#0r-AtZlAOox|UkPOP1ea?NNISE! z9FPrWnQsO%$8792O2caB9v~^!TYVGAB(sf&DIXNXmh``Yw6ofI!Vp>$v(?jpMDZ31 z8&S0kq|E$J2apttH~N9hu%0>rgl>Mb>}5*BkT(HoXMMpx0?D(oFVb5q9(xE#3`WaTa+VkXih+NIQ^)C%DMfK;dV%%WZnWQwJk22zVx z@PA5&b3kTUJ6!;>z#@ll0;ywT;hzH8U>5Z|ASu+>6HU$+q#zF~dkYX+r2?qfuLRP~ zG@k_0$lBM-6=HL~703oF*8-4s7BPMe$nu$bv=#{n4Vt*r0pclPWkKuGR-3D>w(O% z9^~afD8{xLn*mbCTGPjX(3%u!OKR*sAZ6x7zYnC1>F4+yTJBTaoKFX`zu|<+>b5JJb9gAZ65!eX9@15|ih8Abiv| z38c2xt*;p%tH{La`Q!8!tM_|=)G>L!1tiMa=_B+OtDQdrDL>WC`Scqg;WM<%SOpfd z-*v*X>GpLI($pGx;%a_unb}4=(oCb4ZQ1t%nPmM}nvfbd5-!qP%nn}%i3OCMwdH33Sz#@%1qjVZTdp1;>nzPnfYhJn zYPAHUj(LFB0a;<1e?O4bQ{9~J05Zeu=W~<~0ioGutHUKA^R@0<{|1j{uoxZFvevJxlXmAnT{O z`TR4G4Y1qVSpm|(*nbA3`wTbDpMaE^^`1Ef9Wt(qfUG^ued|R)mQi+F%ZGt9FddeG zbTgkh1th|9eix8NHm>{-kVR&hpQE?LAAHnts5nsv3R3^`Eu@)awfr+6Gpy{#jiVH- z7d`{XEUT{zfh-vP^ktGsa;te}Q7=ZCN!AAU16hB%tDgdpHG>549JbsglJooBH19{6 zK`7Ab@DqeE%lsmc8Iunc_5mQ>EVlVEkap%b{sg3f&1XKjj5O!DJH5{aLVcyRmKOk_ zQ->DW4P+MeZljAnAZ2FrM}f>6dmbt#lSd(C4U4R9Mw%LCWB&jo%6!Z1Kh z&^)BA!B+xVgm@Nt2ax)DcWm`RAOlR#3qa=aiY?duKvJwnTcLbd&-e%seOoWp5L!Qw z4VKTz$B>4#(|RC{tW~}M$UN(bI)Fr3Z}l=Dd1eVW0J$8Twxn;Qx6XHw4+2?ZI$Quk z@sBO(_kc{XR{1j^8?43s0mwSD)u+82miZE$5B3_R=|oQh!WM z`c$vf*da>8TFMZRZf2`D16h2U8_B#KNIQyPOK}^J2$S&hK;~IQ^dOKq;p^bXNSw%p zH^S;zYw7~B&h!%lvchcjMj%Vf>Ru0I8{VQ^WzHW25@BBS^FWr*b!A=!0`)i=TL)5e zwwva}SD?OF+c_Ud-potj^~e=K8d%u}C=Kf`jslrst@}nGbFALqN(ke69}vpf+Soq< zX=K{CA4oUT*uMi=Gd`RoJpPriO#H9Sryj@(bSP^O<3ao%w935JPD;aS`5=&WWMb<* z3#6Tu^hO|4tWMuaZ?RH*8b~dRsP6@mV!rPCKxUbqe@Dn!F8gUWq3n>ymg@yTrkOl1 z0I<#f~MG?ZIGtIosPk?l@l13&W51R+51+vO&=UG5#kAju(G9XJ#8{I%AS-Xe>sbf9t1dw@# zOi@0}BfSI2vg!3A`1>#r`m_G$Qgq0WxX&3pA5#d?b%in>%yzA*5Ml z>|X>j#q{$%AcIWiHA({;v(|gut5J$GU772F%rI}F{W37TEh7s8YUR$re*npHMB{x=}q%;x_S zNG(GiqqmsHKJ7KI4b~G~0Az~UPa}{BYS~)m)j%3p4PFl<#ahA5Ko*$~e?O3^b?zx`X(#k&w)_?Zf9j5 z1G2(KN$0*6?VYv3W*`I1!lOXiSqbbM#x3=5!#drWL^EOSz-)qO9I8Mw&SmC+q>T zz)Bh;gxTRSAOoyiZw4~WeCB(A)UcfAfpoKa|5qR@tTp{RkPXQGMEt+Bv0nq3VwP~+ z6tv1DJRL|av-xHqbF2+s0i=OhW*?A6Ru>r{jm$s40mxL1TRZOqGRflJj{=D@|NJ>1 z?Tmei(y+3x0$D&2ZAmwPG_X2-*6U&OY)riiNEt1KbR+e12uLln-eW*&SV`XkWSYhE z?*}r+eE8i!BCHhm18HLtJ`7~WSSHd(9tG0K-a7dWu#G3U<$4wn8ZXsAK<@d zyb(3X%Jpm@b;b^(rJn48WW3bQY`zU?^p~Aw?eqXyH*X>1$hAP)Ot}b|075;Q)y7+Z z(0qcG=L3YCsMB;NGs#l2v!jj0QS(T%{ABm7&jYDvTwe!L$8`7*kafIe+5bRqF`1w8 zCe+uH+%)F^888+FA4ST*J|cHS{{p0$VqE)xESWUIBaLP<>&zpiktWJ~!YhE#{HoQ) z+kuoJvqffs)G}LL0Fq+t_XC+{@#=p7X<(l1F(B0I*_=;)GjcY)%up^*mq}n2%Z>ET zK$>|L$vg+hAS=a-fGn9(V2^z~ zbqSDqASfE%8Y(5R=M`FIW#5f7QC6q@gq-Tib3Kp&W``3%YEN<5Ukil(tUMo}G|Xq- z1*DtV{1<^Nu=@HDkV)33JPM?ZwTqg!q6azEm9P#7jb$wR^MK5<8tep;VqAxS&@NM} z`2vtOCgE#=OtSv{JwOH-@(Dsr`x=U8a>XR-i`nYek!G3s=N}TnOA4f(^$t&)2Et1E zEFg;z*p{>j$N=jdI)K!%`g$plWtJunq=9MU)szMjLTSpz?gUcHJnaI##rl-50=co? zmFHm~^Q>GaybX21tmO z24#}10RM=*7HMWpJ^>xgQ+o#Jscc{GLK+$uSUukjB#%ri@*t2p)))K~$RaE032%p= zXR$;*kQtU{2O-SrdVmbFG=o5B_10>v3}k@$-f1AS%rbAMw^%;^L}{31ejZ3Wiz>ed zWPrW(8z9rHH9h$q(1y_=>FpdK^-q)flRA735L$(?8oLb0EQ_`F0$DuGEmw-t;1yfa zmjmf$t$PN@w2>J-8chS>e2UvU+=Vm)rtc;R7lCZB=;GgjOg`1+dK5@IE7ud>DcP+& z7XVphJ=%64{H~bGfkaMsb3O=UlEp|zfz)kt)7%84mU)Ty04X!%6Z97JY~{HZ$e{5O zhzi6BtVQFIsO%3S%`DT#Pk^*R&$f0V??R8p?B_Hf%dEbdfXuUeb^@7ZZ}kCLGxa6r z!()&cQrI?lJ<_C*#+KqH%9&~YEkIVzaMRocWQLXf3qTrKU3?1&MS3>p{{TWO*A{sU z$Q)}0Px%MvnYHe-fKVHhZ}k*=;<;=(KF9oF3)0LPc9b+u_c_p?EonE>tgyBd2SWbY zYW^6IGV{-G1VRnoM$5MWX=lB|XMjW**H?kevj}7b$fT(Y%JY{%R*ba}^2B$`Tk|Z} z89o;<@`e+HbX=CjF8;HJzSbExT z(p#*T`4Nx^^Tz8y)|h^ddk;JtYfZI42ARyyB!tOx36Q9n-=Mm<8b}?py6b?@js>fY zB9MBN!j|;SK}oOv?FYAazXUcL8ZT%gyHwAd$0OM}aId%ltBsS*Ecc0Ex01d=v=HqTAYe@~u)HTZ;36EU}*P1wd9< zjC45=ib?ES`zZ~6%AAMtqop`1XWdx1h!_F20fhkYWAqopTa8Q`6NE5(eix8I<}>Gj z%rIO13Xl?ua{mp8zSmlE{U0C+aN4q;@;-2}I&Ao<-=m6e+H6g^>rVmfy_{d(%5%_v@x02fUMTJ^860SI^M9g^OX1F z+}3$6@(dt(=CLmV(#U3{cLAwo?P37PlRj58-I_|u(y5< zWR}^_$sYte)8Sb_rdeP4JRt4HlSJ@$F_1F;*UGaG$Q<*K2|`#qod8nHEPNVBigA4q zNITQU7l5oVdwvi|jo}Ki$#IILrkQR01ZifN%=Wc6$}&9i|F z!ZK~0?gr9m;>@A(Lb`+~u8!5$%aA6@>~I9g63gc%APdYRy&Xu3`T5&`tTAi(6p(Ia zhf9PU?@IVXAZ6yqeg$NndF+}G17WS;OdzyMWlQ=3AlmmX(sbgAnmMY zJO*T$>3JFm{n^NT4#*($;hzOE$8>l!+K-?$F;DV$ zKx&wNE&~!3`!jW)vIhvwkXt`C0A!U#_}2rWH8LyVO+YqSN#6@(fl2sLAbDn){{m!^ zwaWV_%}H*~4*_XowfsvUGl$(YC;lV2n1p8mnKmAYZ1g!mW*KrRrC}D{1*HB7Zq6?Q zvccXO0}^3g^kyKdtn9Z_KFn`?3CKK4^DvM`W>HU>gKuH2!0(Bkfi%nTQBQ;rFbMqZ z0#atJ`#K=RUSrZ61v1aP)+CTcCeQnTEV1_WaUcV%ulzC)e)GV0flM=>@FO6zthf3v zAayLCC;l(IWo%X6KZqWUX>}XY)U#Oh`9SipOk4KdKn7UtTni-1qRQ)m)G_zw)!a`HOvmb0))6~j8-23l4o!IHxSyLZY8X}BO;ZuNe*#F9+594q1&C)Q{5Iu-S1j@fkak{DAOkGT z$sa{qW)^i3kSOc*UPx(Jx%L5>H_;cJ45HcHTG0yXm4&|@Y1W_Q%5yuA!IRx`eHloU z$+JRlG28eJkQ!!DPrehSV7=Zmfy{!-YU~n1m?aDVnP=W_7)X@GQOAH#|88~o4j}Z? zBL54>EQlmmfLoHYIkaWg3jR0JT|+dc-Z8ZNSuW+3Y< z=Y2r(%mZ8tWQO_Y486t11+N3L%(VIeAgj!tKMrL26kSqc{}PY|ruk(c2pApxJVI$8 zv(?z|fXp*XIO*fajCt&OAgj#Ry%-3MbETefYguOpUaDo~3Xq0kTA79AT!Kb+JOw1+CfRhO|x^Xq=%5Emc{CM zAXCORs1!E=8GN!^J8uV4W~KNDkSMFcF9Ydj9%%(gEwhA2fRIT7{avbs1*2-`b14Wx$k-M0d%V{zsufh z)g_zeA|Tz&4le@|VZCP$kX5F`mjGE|B^?LS#<<=Jgyz{S*X=;&nf=@gq=Du9{{dNL zb@5+7YFT7;@@HThXSzB69gx|>F47F79@Lh7KafV&E@D9DSWlDzvIHGk`*|&p4W^A- zfK0Mp?=B!w)^fi9ME)Uy-Q>$_-$9x|B(fIuGa&7(ou0G+nHl>TK&VcwgwF-C&Z2@# zfh^SNvXA5AX}OUj_`-tQ4$P}~izXuY9gm4Q&%1Gp0KvtRG_$Z}e65b1> zmZez%vdAoa9mo=LwY77?Kcnq1t)2~Jj@96HATubqeXAYF25Vo3fwZx9aTG{{@uDD& z+zh0)&dujmAdM`ayMWZN*8Mpkv&{E?1IQAy@JE2uG9>a@IU{6i`Kdq#p6;?g6G)lK zb2*TD#=Zy023y7J1JVF(SW5_ith1If4rGzFrndrFV#r5;EHiKOWgsiYHn1x=Nm`@O z{J040S^QW<8XEms_6;CN;MC43pQC=6-qQB-3?TIkX$I2BMoGJYM47c*2PATaoAV8n z57XFfKoQf5(m-DI~}AuS_~? zUSc^Qq;%5+?(bYYlP;D_?uG2I{5=xO#)gxLSUw*~#fm|kz9os>60t%_2oMDHfy412 zogIs1(upX99Kp8ZLU$~i$lyNRd?6Xc@ibkEY!@HkyKs9UIg~FIw(B>Mb5NSdL*meL zxsxJ{s`KDp>b*Vt_8ba&_VxAe?ClLYyLKJi6YSmD-`#cKfR~oO3yQI-6b|$i#ZwCq z-ph&Hcsi4a#|nwap>%d{ES<$ww0)$I(R^fk21><0$0H;B0dBDmA>o1r*;pw(mXz<0 z<^x=ubkrjcCjuGPDZH%2nchlhtU_A)qMx6Z_6B)qVj?R(4?qeF=z^0Yu?j*CXGbyE zj3@J@bS@jDlqGnXNyEuy5=fgbZJsVYk`Dq=N6GA1B%Vsfj|R#r12W4^GA0=_rDCFx zD+NQjLcD^SwCT!(DBE9>t~i?8EM0m8O(i%ID<-i+GclUaq~oZbAfHD=Ln}e!3Yj=! z;@lB&)GC%qQeBQB4b17nfu5Z^)xSFEU-G?U$BspUpsPFBePHL_u3%T!o}PWglq@8N z(Rh;u=`ez!ERqe!;{nGN7*6hZn&9xhzS1Q@5HFXTnwpwXl60KB1cdL2_m!HO;;C36 zC>3Jq5_Xt%^~La?uBPT-FRp3GBHd`b)Dyi5*jRti*B11Znj4|gakP~H>uH4;d`D1< z4R22u3$g8ux>(vH3}<2qXrv>PjKNQFRjFqOi%D!39EOt#CkPJrm0IID+*v~HYm;u6Y8gEpVU;#iC8JN-Tch+pYo}FS3Wksl{{#rka`k@-{E!UzV;lX#5NFYyFGTwfu$hL1TYWeyu^%Uz85D zgQY5ch+c4Sx0kMIt@jn!>yX;4CF@|;yV+GLb6z_^cM7w)^2s!(WJApSZ8zBn3JTl ziT%7JkuK&lu?Ye0xyI-z0^#5J2bZB&wcxd&lsZ*CY&mkJL;-)2Y6 z3f0RLsO&0wnXbMafo~nM!@#+}Qc`BVNfP<6FE?5t4p5Fv6?e;QOexdSmQ;b7sxA=O zr^=+c>%digfvcycFcRwvBf_dHca`$|-=PClEEQ7W0wW(kDsod`-cNuW4N zh{ujZbLc}bzN%cXu59c`Ch4XZC3Lmck|@N+`wMA|DG_>Zs-jM$QraQkp^UW+DktR* z!5*t=%XrJJpG`_}Fq=m7IMO_{$#b+um83U^KpO!J6;R9A)Cjkz?1@ZQ1aw>Z#_b}+ z+bRWFWe3K$g_!A0)KA@;$e+zkqm&m$z9MljhI6mvL#5M@X~+w<3};4K@UW*h-O@GG zyRWY?xb%imjH8FrFyh9puCBh8U}#8Gn|ceb=C6ONX$MphjHQ#~7&yUt%1n}`i}MH? z(X|HjZPO9-NL@oiMrq4A#LH8j%i$ie=~CH&7&8s46sbfj|Vvv%)t zu|CjY@R04&F{T6%Jr0fUjU7$uY(@(O5Qi&TWGm95t~+EJ^@c$E#$$QiHYrE?-9zmH zlMy0;r5(-ZaSwD7VFD#Qki@i5F{Sc?b3&6UY9bhoi8d0+q1b4q)G05-pux664tU5; zy6c!yfFC1E`j_D9%Eqyg@nHY%Hu;-oO!U8EW%VyHOC^%`WUk3EpY%~;q)XlKF$lOQ zqaKW=RRJ;oLrA|VrG8Gy#Unp4A=iv%j>fVS2*YpDJE&8*C>nU^pR$DX4Clgl84-OH zlY24VhdD)PEF-K?16FfaB`37R&|?S2tIp59c!55pV^i;?2)YB~^rH?4L&MkcqB&D^ z%1B^c!spu8J24b;Uc53lQ7rMiG_81zvLV|Wr zm#LOB>HLu#D&Of@sJEyLivgT^)BGFB>E8volu={30zr$qVKkYIPed^93>TEV1_P-a zwHLQF@5Nb5bntR)CYz#YoeH;&lL4xwiC3MMP{xs%4!KBDYSkVn`&uK7e4mCe6=dwg zL}Y1KG^!P=q%h_XEnwoZFrkGbPH05^Bsa*BL%G6HuoxGj6Q(9GTNumqrH>gMsz=K2 zz&B*Vk4){NCROUNNm*IKPwYF)*6@N+j1k@-+HQnObeW&abW$lsJqKiC`EF$D^)7$P-P|IW<(E-?Mx{yrt zV3H1AIB9|(qXhlLjc`=uMpgB7CdbloOtK1pBZ6whBbcTCm4@CoS{R~_2-)sX;^HPM z59Qsd%7IKJ{JJ=~JmhxbGV$J6IX%K(*iDN~N5rs6SY0?PW_RI-oiUoN_vppE3uzL4 zkM~ens8B_xj0~l4GEyQ{+TS9T{pq+T)vw8Co?e~z`s`GYKwnwD z+mu0$HMpP-X41K)X7pGQ40}bBMficq26f9Erg=arulz(bi^`AmSPFv&u^dPhV{#OF zsQjL)lFAQtU4);8%qIL;YTUIGmLh8ATtx*O!aA0mf(`wozMqzos3=fYscR%(!rU-L zV9Jf8OZ#vumHcJAxrHD>78Y*-Vpg?Rv?z7$3bi2WPhp_01iQtn^jvq)?3wBRc$Nlc ziB0ckQPgM!lzB3iSk{DaM-_0Y`wDO+UC4}>DvWh8)Yv^)afQscfTho>@Q&lS>{Aeh z%#b~k0vi`r;aTMhWG>3S#r%|obpw%X7MH*E(?s||H>P+HB3RfdNk2B2;Jbh{5o4bl zT|7-B<>|UXkGnuZb~LG*&E@Qia^;aLOhzb*X%~ePZ&h z^n?#r?Moatfs;*O{k+9g5Fiz5oV|T+J-)>pw{Jl=XtJm>6S}t3)tiK+Eu{Si*QASS zRtMIlIawRORtZzi%|7BD&f=&9GqklRDqt&=#zr==J6;c?e9&&6vQfW<4KIiIFwZ_C z;kS6!wWn7H?Cc%j`(UnP$xgW-Soxu>B=$^Ce9(Y~J#Z#P7`}P5&XPTDTy9glcBX=J z`;dkZdZ_Jm0jBS&@Qh4L!!{!*R!v0lqOuV_%_7)Ww5Dn95B4#N2vu~W8kgOmj}w(h z-ZwFFB&T{?{Sp;OjL~^sct@aeEYE24BRi;PkEQx>n3rd$%+bRsw{Tk6sE5gH_l{z= zykf=`o?toSXa)FSmiC0n2dQKxpHwprG@6rHo7#kdwlqu3&I>!BHkCwD4oh}p#|X_( zdr1hShldc}%A`&bS>3JHz%3mh-lc4!2s+|_vc4IKFCt5qPZ82_|h@wJt@9?uW zXenJKxy{STjLQKxgiwiMNiGUC4LKQ3t1j!WLe@$dW&Y_t)iVK5L z0ijIn#?Vfw(CADQ>%b~*s`LoT>)yM6Ur+!318ykhyzB(#4oXUwVxr5eh;NdeeNo5e z{B0v-C|85?BWoQ{KM6kn#qDS>>T)b*Mmv%^$p^#2iIU8R% zI7%xy!~mnx11f}r%R(b_UUMcN4x7|8W^i&b$+yDgV4~7n&S=7U3^GT}p`48*W<0^) z#4i+T6SHF@3ggR8Kv_G|3nGJYsrL% zn?Tunq9SDv*M;=+Qez|M4X0k5!I6`|$ZbYPl^&YmQKg4^d{pUKc!1=jF@~YWLCzy1 zRK-VpEabe#ADBUslfsTQ9MDdpR0P#%!+FMs8_ok1$&4%<7{%EVrV)pe9J#o$IDg^< z+=~ z9V`epABgZw9(N?-BzN7p8j8rLZl@%LLBv)RtGseU$)P>H5$r3G8$%Hfq+7l|fjEwAg!kg{v)>m^4s+1(^xuWg~=( zNr+Pp`sqtesFB+IFj5~<_hT`dasRw{CZOl zxEd7b7gRkkWp#g>;<_L!v3j5iB!6|}GP#1P;@^d%K~eq+=-x7lvLXe!x`8cS3EEAT&?%@G;&QV9Ul+&~ zCL$EUbeRNBsMeO}V zVgpw`d!$lhnlXrz21JhXmpuG+G&vz^IiN#tNDz!)+-g(B5{W>k@E#VRM;zO7o}1jE zLxUUIpud+E!sS|tRQ*Ll^%oKMcGQ5_*@V$@4=qsc!fB#Nup53m+NJ6|^mCWogQGP9 zkFL^TbZa)vD<#^89XV8RD5tJ+67)zD?Q*z#iu-a|ZCKbuB)8#-1X`82lzwz1z(FlC z(M!9karEnVnMc)isW$ojMj?- z=o)qn3yG)HSthg&oH2qm;d~4RF8FsA2kr_mr9zfk4!t#uGX=EfuYZqYo!RgeX%_%Z z94NJp4Hap#r2Q>7PddM|^3aEa7$BZ2W?+RFp3xWn^gpQO+6+zRn z9+V8?iTFK~kK?RDF^xrZYTY=mC{AC|JNSy=5gf$~g1jt2fXNC}Is1uCQDm%63iszY zy%9<>9tY?qLLma^3yO3y0_glI(@%0I0qQO}9Apx?QF2_CI6mg4Px7Bb{AUtt&OsrT zNS9GwP9z?|Jd*{SWg8Pad@P4}h=Z&F4yI*ebZkx+TRa(}wTm#A45Q)SUV>pS1&Lb@ zqh((QWuq181eJ6UshG|>Rucq;F=^;QXY)=x&W3&uVWkm0Nequ}7{#*ph)S1AH^Lx_ z;+N;7DGICn3ziyn%fLi zTa=Fpk^jQ6JNh@8%ozM4bzPA>{)0;vWO#RHb!s8|{1)~!kQvW>Ctn*8l{LuE|GU*VO4vM5vh?UO9p=hk8=}eORH~Ow1 zmrY6)s|KAI_WMe#o=rH~F2dEy0@;R+x$ywLFO@8dM_x_hK_We@UX!MzAkhS6c0kYu zRHC9O92WM{IsK~eqB40rm!B}U69lpB1WF^)p|TJ)iMpjDyXxmSQPFQ9DxK1d6>ugN zeJvz6KVu~rDE83G>#DqhNZH{Vv4{|)i$Q`G;bSAkVVRzk5MbpV{)i6gl8+`<#9=|{ z7~iA;JpoO*dSvC>jR}Pgp_do@Q&PPWwU)P!nyTt>mHv%#XMwMa{@lf%@R6dVvm}dCgky zUo}mPWoxTKjP4K6btz&awJ~Y#n_Mxp$+X&LM^gu_M|1(0OK5@REA8UOgxsaL03n^l zFtpT@=)y$+x&A=ECIqCibY(;+U@Y<)oW`QzlBz#rMbaXgO*(PVj}`@3j#}KXXS#Yc zi7~xfkFBu)Ie1j}?e-7`6V;b2@KUMu3}qZBvL?{ zv7p%@;Zm^2ffh)0HS$)49$&wN(Nv&5h@>*-PK%|({YJxaony^d8$+86IrqaaTk#7OGF=y#)ZkMbTJhP%fHZ z(XMGVnNC1-3gm88)j&X}b9Gy?O2Az>95~(xTRj4{Ytp!-Dwe@2*dfoi;=U72XAB2H zgd#9d%Mj(=i||Ju))a0~31DAv3x*TiwjdA#<|vgahKO>KTwGD1;}p?K)hnh4RcE3U zY>gkKm2EN{#qTeaQgq%x2-t_AXFQ$R8!H~|!(mF=t*V8fE<980qJRxiQNAcMNaF|7 z6r@YkMZtK9g$Z&9gXucloSc3);PKn!^z#5*s#}AZxY$bUbKU@^TTOq*|kzk zNgt=T)C!H|_Y~3zcyHS$yK9*$nTp={m|;snQ=+lIZ~#id&h)8DttS8+9#jfgtaVg!68ndS z5F=6at7)B10Fy#ul0{LlR)9_{Jolm&(5zBb>1~xk(K{*5PVU6@!0EVH&rGVdew#`S z*+fEJV=7bECb{b*4}j*LYStRUsyjYxq24onm@x*~O=4aSyjDeW@`Xlxa?+cA%g@@Y zjCf_C$tqmM{-)w>U|tkvuG6^%=+PGfp)=^fUQe1Iwc)vbgARzp3**2EbRn$H&d|6+ zF3huFGDE+`yC-8)D79!K((hT71Md`TdFjB_kgP1FJeu;<*~>QV&Gi%`GEVC3FfC(O z8tVkOa6=42QBWBL_2#m}CP@!QrpQ}HGn&El@ODDbLOM=@*nGGlZbspCj!8WQNZmUz zik@G!{WVpabpO5jdLs2Ql<#z13Y2Ofji&8vFl#2!CgwQv6S@+cOvmn;1a)63Z#zBf zYDb%@765LP$Ji=n-aVR)o0wT87A;#n*DTsWSzhHxU>mZ|(2Zx~c{BKl!YfyrQ<0^J zQm3h9G#ft@zX*89p(b5>WJ40toi~$g^4oIyTo=-9E_znmwisB9?1@y{qf$!0=v`{7 zRjk$tmtT$0THShn>ICKaEcAud3ooc zBGPWqj0?r$hZfd~VQPS8YN*wS#ZOuj!=0>67Y6}O&|=_=4I*-mw8BlYm^3XlwaEKs zRqCGppuaKb>bp3IUWIG0%9s0409+hpws?i7oP`XzE5 zd?FUvpgQ%g!X4j~rZZ2fZ=ZhmoHzBLG%1RG8koTXK&GH1OQx(iac zb&Qs;ur3xy%z3d~Znw)?!-@1}Yg#maN)GV|?v)cEry0wMV(-F^hW;F#Dic?9DY@AkrFEZe=s3-mE$ya@ORO@KDIho&H-s9$ zB9$Hl9RXcnfsK@LGtX#WHkzb(2c8JAWO5ioi7cHkz;0_A6U)g+1ZKv{lyeeVLIN2b zaqkkcFiuklwBQKYWs zBH?KYxp6idsk?T61t+XvxJcAKHti(cD3>>O6Gw8`sFe0FLHy097I~;(x;WGxoG*-! zIbTNNv@K~zcI+Lp1zI{pR^bH|=(9rL?beGhO&Zwt%B`R~wXx|%PxIUHH$AOZ5E%=r z8B$d}*j0j?CNZv&GsW&;+~w{yQy+XMk*XzD#FnzNI*MYs`Pg=V2YTua^%I&3Vr%O( z!jtiAe-0PCC6mTQvl?*c9pt_UB^3*JRVZmbj{z~%;V4=Mp%p?IYbt}ZpC#BLct9S! z8wcRlrBpRQSqgV3ud9wv{wTriM1wFXcVSX5WSOyuaVw?`P+u-B7=~1 zSy_76t5WYxXXV5g*$&+lsc^SuF@vk7c+iOG%&+nj2h>Lt_$bggY&Q!y9-T99h!z)d z8p`TLGx8oiqYkNlv9&|Rkh*tQcCYu9$@xvOI41UPsFL}fvNtw?rSr{}j@2%#w7334 zUlIt%t@PyqDMoKHhFjw+Pna^zsM%O*3VmWO*UW1drQNa`!Ilk~Ki%9aP17ixHPu#k z6P7U{YYiHrL8H;FZdP`Hf;bi_^-#`Eh$}0#VTTOobw{FP#;c|@J;bBWxvR5=+9rC6 z>O~z}>IWfXF_9qoO1HCc=l|5Kq3|H#wQeV@kZKfxAa1EkhGO%b1=^TJ@e`I(iio<{ zD)`PgeXNDHLm1_ANoDJ7cDIK3A-kQ0Ff-`1E%0%oC$$=E^lG|55lP{46s`R-vm^uR0qq5b{#a(yion z_UWJ;V)BO;1jFsE730J()&fkonb4jgceZhpi3bz!JZ_xm^R`g$k6CfLdKf1|5YpfZ zSa;th=}Vhc1oz=7Uxw=_4$-T|wQ2deQQ3Msn8mc7Z2dYZEKY7$v>6eT-WY?bVVz-E z_@-coaxA8Y!@VJD`-g4Ka+$}t2c*?CtQHgXrZnY?+2&rAjhP3r6S%_n_t2(RT^u?5 zw8jf2*C_EoGFY!IoAy53yNgi=?ZBqZd*oH=3)7fF(@Ow(M5#12ucYBBcETH%uD97b zPAtF*Em$Kr25KRmtMmpkc#<;tix`B+bVj>sTZj^r7H#-!qiQL3jIKq~eSEl*TZ2|z zY{+aq2y}zifc3;&(OF2e25AgI>`!9LIzv_0Rrp3n!*+xIb zC<{-^WHKS-Gs7#|a<9v+6 zy=X2kh1I2&fhp}5Gfu;`jLk-iQZ^WbV|tL(Z7*9N-QZPbCzFcKzy@mEmCMn&Rlde5 zmtuQkN0MfCommdnFNJcV$-)qAW5p%W*vNZ>yy4s`khNWh)@{7b9t-uN97ECA-?-t* zErI43w~r@dNAV3%y}wy~xiAf@-JXJrr`C=y80yO48PNoa(KDUC*2#mGFPwgtY%gSL`VkE^L=a*kab&A3+*Z)zl-r5LAf}iWU`vU+Gjva;T`~`M^~MSz zGxHrv{XIq1a-?RuaO9PaH{qjZhDID}{nPS5LEMBhs}hP+<1YCvE+xFKI34O)Sak~+T*`9MQlhv6K_x+kGN4H& zWX8_EV*LqMm6XiaD>M3AEKO?hwn$>no=8F1f}g{{{TC|B-A5p!MJHrneiG;C&@@mY zXZkHTm>q{@1~6ZoVb*fOTZ#P|doa^Xch-w;nfYmP{wP6*eYP4O7^4EIGs9r}3Dq5t z8`Lxhax?}s9lc#b#MqF&PmHYO$j_ZVG-E+~bf&^~AxzH*A8VTL0gOGlLYY5PYfNHi zjkwi9%SescnHkeCo3wCdfsARD@aa8eXd%U88pA&+#WilE*4Er)Jt10^J*76=a}L%u zRJ9oT|HpO+&M!k4=|_~vG!hqzq|F`jx#{y<8BItm1`Tkt_&?}f4FtyG@eIa&dLR&r zsiZHI{#UD2H9@vf19p%sBiz-K7ja&tqFv|;P~wN@2c3X!(-jc)Dpw6QPifKYdcN^u zQWMU4B5InZOKI6dzefJdjbzmvt2i^FC|!kO)32S&AWtEcZzy5YY%NYe>kl&Zur3Fa5{U94?bY@nlhgv?kUn71H_>I zpyE_Tg<2A-ft!>C@{S8}HDKweLU76|UF^{ldvN5Dl2V3lO`@ql<&oVlN{DV-##K&l zMI)>j?PkaKfrBAzzSC8wt5Y7!t9WQim9VEMq7JQWg~H-w8@2tCJ}}QaS-D#WYeZuA znS0ic{B)nW@XR4YAUvVt6QMc3qW6H z)M&i{B*rjQuYF1HNjF)B`$@+rDy?hqp3B+XN>e)8XhMk$F3nEaw;o)Z5Qg{OsA3Ei z6V8}V4$lJY?MbHa(Q`Ur!y8;MCM{9@)7hUY#AV^#woXfo6UHk=EHU+C=uPZPt~qp;WxgC&oH|8Qkcan zdwy#-&Zi}X15;U(1C&R?uuiF@#}u9TvZZhwqBnITbMCC9HDa3HiRR_eOWPu~kx*wX ziy|F?u>({L+cs3RuEQ;QsM8jb>5bKJu3{u0i%{`EhMA2e&qglB2?=S`8tM(zwlgif zdD~gmke!)vI4n@NXZ*#w1ucfkZwCpj$|0IFisbt&=psMNozQo$L_gD06j#t1AFbBc zUFC=bZqun9A_gxGXLg4r2b7Ors5pq#i7=I;E)6*gE<*Mcy;I0s=}VS1bp6|y)zpPb&Il?m5Av^@ly-S|ijK6JN~kyV}x zpeaxt|6GH;#m>^dQ5_k!?4MvgvW;qpu_&5 zr6f@i|8u7jpdNB_&LJ5&Y^2R|5X$X3%yg&pB3La|KHKc{++;@+F1nr=&Sm8cm3oQh z4Wt{;s+Nke6Iy6}r5rvaEcyi}bQaBc9EZTf6+FraEBRulrT$GbXT{t}F_WX?jpaDNw0sdLsMS$b40E$7#>*q(K#lC4G4~;kOtUQxdO{>0 zCxcKq;Nq@QZru1{Ie(9nB5ajN>qNleU=9kpn#kfrS`1(pMQn&tRdmjY?<6X*x3y zawO?$Sw=(Z72!5Q{h}~^w(h2O=jrB)&64`;iD}_316zV}Ohe1b$0jVbErJc2#cm#P zD8x+N=*^vag2LHt=}(MkTIDX?Hf^1#vv?#sTX(!?+NmB&3M;2YSUvVu>Qod?R~n8W z&G65TF6=54=}J^alUbjuxR}FM=njbckKxYji@wHmqO6e|F#^m3r0LOGq>)M(hb+PjT{xVu9Fovhe-W-D6+WqPn4 zKByeJBo4lI(_61l)bK2mET!YqWdhvvq~jY^HW`Dcva1Fd+Ht2+iZOr={AK>T3J7ha z|LM>5+V|jM&a!cFP`WC z+D>5>Cum)ju8pMSBy^)%ol5fJ__EPVgSuMl{N@47YGuuT&0JqOmTLNU8 z)-Ou|FzAu0#>E}x?zmX|2Ks`vPATk-t(-wjHXR{iPb;y?+BRKmkthg#Dl$ENEsjh& zvAj|-dT!^8p|m;EJ6Gg(=NS=;6e$p++6@}v+9eY#VkOTU{E%wu%OsPg z;qrIHaN;lBpx-O!xQy~aMf<5naN9oC)wSkzM?zjtU*aOpbQR1UF3J>SZ>7h6a!$Bp z`Xw1Xn$s|{D4Xn>XYE16V&FD6!k8VduKo5_3LUWQqKhGVZHg^X3~OcfhC=7yttt*K z%E;0F#$#r`yv_^NUpaPi1;UFq%M>F z1)~XCcb17Ev{~@x_T!ZFNsJ? z?yb+q^LZz(7WELPt71)Yg!LnQpq|Odu-PJ?@EnzTe$fs>=VO(AAMfa^Oh)T9z4GLp z7DG=#>EF_3v7BYaRAb)0QabP4r|4flARD~04gJ2_A*}qL6hnqA&h;JKo;Hg&5FhJY zu>surcDv7n`ajb<(=I|)2;Ex57Dbx;ac`6q&fK0^m8HO_QoU=(Y^;xFZQJbJbwl2e z%}IlERyL3Wg(v#PXfa*y)H5t{)}SSV)7}UeD{M?NgH5>u#f%oPIeQpU zMY7-ztjyp~Yr=X8wtd8g0~E<)UQAhL&n}Dv-4#gaYbP+1z3t^|cB*{=bgMBd(eA~%64=x=(IeWUSH?-md zv4nZ4KuT#^mcbb;iKz`*%BNbv=mm`&M?f(@bvb!>Ep8=qX#3GWk+`YGRI%3!(UgjtQtLz&q?Z7gcZ0m=>CJ+fr;e2}!W$ z#)O_G{?ML*?PJjUX-xO!nst*ITH3=VTebtST|ue~fg!s9NOonmo0MBag-+Zh4J~Ap zhN{L~Lekk0AZ@i2c5R|(6%o*AMdu}hKo`+gWgV#;oX4i#!0D{R-c`pcc9o{$-c-BK5PG&P9g(flDO-}2F2}jF0j7^# zT+&&Z{LM*M;?)(MD&MS!+C(7tt#8Y2q$1D4RyWX;a%59WrqZYwhU)go6j;k2Y}sdFuSG;CSeL9ltSeUu-n!km!5HE3y$gr z8ah0Hcq5)VS}YyKQB~#moGO>?slr88J@lpa5i&1ckYKQgsbT8Jk48VXn{ zOxkMCX-ya}=#|sTTLGo4mACAw1p%Ww_1pAZ-D#X}S88PS;~c_Zv77R?7&Pb+oqC&0 z9#)+!91#R8$l@PDsi)$ z5@e*S1rXn0(VZ$5d7QlhZZ~MuO0tJ`458eFikltwCUL2l_?{w#I2B-bXA6`Vv7eFF zl|yboQD|#*E)X^iA&x8M)DKo|PPi3nZjOVH-k&XZ?abs;F)=Tsa-~|}ouBa>`n006 z`Axl~noJ2=IARB0%HA-xmKKbrnWHaUH+Qcy&suLA! z=*e-hKDgyQ%jqJ0CrnJN%Olj`9#58OvjLRofNayyPJsrCy8blgEXT<5+i|Q;!lozS z_|&aRO2H;bFkJ|J;MFSKjG?N#60!1i{k8`5_$E9q^3K!Q(iqRqY*?NRsAS#lu5EO! z&XySM+CwwNDig0aRGV9j(exDMaE#e8GonyyvNtoKG{y(Q$w_lS(+@9fsjNFjGa2`; zHN38SFf2QonyYx$f(6%F-J1;*G@o~oAUaBCo2N8fw$)Y(p*|0c#%C5*RHm%DD3Dip zFQ%vB=8%reT3Vg6vOsfK`UTfvIQFkC#9jtYgDTUR@ea)0-rtc*#*aE?(T9UsnEW=C zvYFqJ3Dj(*`?)>!0MqyGcd+a!eE3w{(_U*~;*kl>rJ?Bn|3nR8)ok8X5YSZ;YD|=%5 z>K5O7GTz@<3Gx+3JI5m4GORyAW_Glgv9}h&sb9c3YBGAKTbJqJm@b;=cxkJ1G9BR` zx~VXs&dd*Mp<(Nz6+fM^oLiA=P}DZeFjmzPO+oATx4gxo!i>7xma+FqsZch5Iz=E> zrnDgcx3Fv3X&{Dy{1U%_xKu)1xN$*6YB?ZLgh1u<8QU3qoK4GhyUoUq-@15I{1H>6!LD4itpOl`kzw>k zL4(C!&bxXMSQ{*yP>7?$>EJ&&l_=SdXfb^mm(l{h39}Y|m?3@&ma}_9QJZyOUh6sI zz0vRXUw|2=G=UXc%|o;mrEktsml`2mkb(*h!oYSXqeZ?ON1f6*?BsH8Uo{FINJ4oX`$A$O*-kT)wVpI?%*M;-AYaWkX5t1hI}Yyl=D&vVE7vQU zv$8pE_}q{I_Dg7*h*C+C@jjT-*DmjE8{}Bp^@Fo!*5{EdY1|jA2MDYX)h+87YZ4Z;14n!CW;8NP@Oir>F3}ImCwF4E)9YR{p z@oPBzr*^C#-yIYZR9@2A^j=O%XWTA)5a$OiG^D;C3EVcX!>?XdST<1_royUn=EMe= zHzY*1+3Tk`F=zc76?$%&&NH^A! z9tJWQpEE`jOw6OB^r-g-X)qTAUfy^Mm3af?q&H5sQW7%w-|v1%R}}y|t%HDYvhxmv zzAL%9K86KL0n4R#!+GtfCy4iQllcoyfzL|2D%_)7@ZFe!85E>YE!dqP3FbF_N0?;* zj$jx^YC>=r%});Rn@!K#x}?lQf^s2Ab~geh_A%hx!Z)F;MWMR&2C>6(8tBE3KzQsI zL%uwJoU^{C%kUs&hNhXKh0}9#0!qFyOXiK^i=tQ%UEk~GC4LvH=P3uKs>=cJO;h35 v6FQD+5FbVe#!X;cO6E@Ls~BSxPID!Uom^ +# include "Camera.hpp" # include "Vector/Vector.hpp" # include "Window.hpp" # include "Shader.hpp" diff --git a/includes/Window.hpp b/includes/Window.hpp index 41f7db4..43fff61 100644 --- a/includes/Window.hpp +++ b/includes/Window.hpp @@ -15,17 +15,15 @@ # include "RT.hpp" +class Camera; + class Window { public: Window(int width, int height, const char *title, int sleep); - Window(Window const &src); ~Window(void); - Window &operator=(Window const &rhs); - GLFWwindow *getWindow(void) const; - RT::Vec2i getMousePos(void) const; float getFps(void) const; void display(); @@ -38,9 +36,7 @@ class Window private: GLFWwindow *_window; - RT::Vec2i _mousePos; - RT::Vec2i _prevMousePos; - RT::Vec2i _mouseDelta; + Camera *camera; float _fps; diff --git a/includes/glm/CMakeLists.txt b/includes/glm/CMakeLists.txt new file mode 100644 index 0000000..178d23a --- /dev/null +++ b/includes/glm/CMakeLists.txt @@ -0,0 +1,69 @@ +file(GLOB ROOT_SOURCE *.cpp) +file(GLOB ROOT_INLINE *.inl) +file(GLOB ROOT_HEADER *.hpp) +file(GLOB ROOT_TEXT ../*.txt) +file(GLOB ROOT_MD ../*.md) +file(GLOB ROOT_NAT ../util/glm.natvis) + +file(GLOB_RECURSE CORE_SOURCE ./detail/*.cpp) +file(GLOB_RECURSE CORE_INLINE ./detail/*.inl) +file(GLOB_RECURSE CORE_HEADER ./detail/*.hpp) + +file(GLOB_RECURSE EXT_SOURCE ./ext/*.cpp) +file(GLOB_RECURSE EXT_INLINE ./ext/*.inl) +file(GLOB_RECURSE EXT_HEADER ./ext/*.hpp) + +file(GLOB_RECURSE GTC_SOURCE ./gtc/*.cpp) +file(GLOB_RECURSE GTC_INLINE ./gtc/*.inl) +file(GLOB_RECURSE GTC_HEADER ./gtc/*.hpp) + +file(GLOB_RECURSE GTX_SOURCE ./gtx/*.cpp) +file(GLOB_RECURSE GTX_INLINE ./gtx/*.inl) +file(GLOB_RECURSE GTX_HEADER ./gtx/*.hpp) + +file(GLOB_RECURSE SIMD_SOURCE ./simd/*.cpp) +file(GLOB_RECURSE SIMD_INLINE ./simd/*.inl) +file(GLOB_RECURSE SIMD_HEADER ./simd/*.h) + +source_group("Text Files" FILES ${ROOT_TEXT} ${ROOT_MD}) +source_group("Core Files" FILES ${CORE_SOURCE}) +source_group("Core Files" FILES ${CORE_INLINE}) +source_group("Core Files" FILES ${CORE_HEADER}) +source_group("EXT Files" FILES ${EXT_SOURCE}) +source_group("EXT Files" FILES ${EXT_INLINE}) +source_group("EXT Files" FILES ${EXT_HEADER}) +source_group("GTC Files" FILES ${GTC_SOURCE}) +source_group("GTC Files" FILES ${GTC_INLINE}) +source_group("GTC Files" FILES ${GTC_HEADER}) +source_group("GTX Files" FILES ${GTX_SOURCE}) +source_group("GTX Files" FILES ${GTX_INLINE}) +source_group("GTX Files" FILES ${GTX_HEADER}) +source_group("SIMD Files" FILES ${SIMD_SOURCE}) +source_group("SIMD Files" FILES ${SIMD_INLINE}) +source_group("SIMD Files" FILES ${SIMD_HEADER}) + +add_library(glm-header-only INTERFACE) +add_library(glm::glm-header-only ALIAS glm-header-only) + +target_include_directories(glm-header-only INTERFACE + "$" + "$" +) + +if (GLM_BUILD_LIBRARY) + add_library(glm + ${ROOT_TEXT} ${ROOT_MD} ${ROOT_NAT} + ${ROOT_SOURCE} ${ROOT_INLINE} ${ROOT_HEADER} + ${CORE_SOURCE} ${CORE_INLINE} ${CORE_HEADER} + ${EXT_SOURCE} ${EXT_INLINE} ${EXT_HEADER} + ${GTC_SOURCE} ${GTC_INLINE} ${GTC_HEADER} + ${GTX_SOURCE} ${GTX_INLINE} ${GTX_HEADER} + ${SIMD_SOURCE} ${SIMD_INLINE} ${SIMD_HEADER} + ) + add_library(glm::glm ALIAS glm) + target_link_libraries(glm PUBLIC glm-header-only) +else() + add_library(glm INTERFACE) + add_library(glm::glm ALIAS glm) + target_link_libraries(glm INTERFACE glm-header-only) +endif() diff --git a/includes/glm/common.hpp b/includes/glm/common.hpp new file mode 100644 index 0000000..b59657d --- /dev/null +++ b/includes/glm/common.hpp @@ -0,0 +1,539 @@ +/// @ref core +/// @file glm/common.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.3 Common Functions +/// +/// @defgroup core_func_common Common functions +/// @ingroup core +/// +/// Provides GLSL common functions +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/qualifier.hpp" +#include "detail/_fixes.hpp" + +namespace glm +{ + /// @addtogroup core_func_common + /// @{ + + /// Returns x if x >= 0; otherwise, it returns -x. + /// + /// @tparam genType floating-point or signed integer; scalar or vector types. + /// + /// @see GLSL abs man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x); + + /// Returns x if x >= 0; otherwise, it returns -x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL abs man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec abs(vec const& x); + + /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sign man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec sign(vec const& x); + + /// Returns a value equal to the nearest integer that is less then or equal to x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floor man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floor(vec const& x); + + /// Returns a value equal to the nearest integer to x + /// whose absolute value is not larger than the absolute value of x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL trunc man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec trunc(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// This includes the possibility that round(x) returns the + /// same value as roundEven(x) for all values of x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL round man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec round(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// A fractional part of 0.5 will round toward the nearest even + /// integer. (Both 3.5 and 4.5 for x will return 4.0.) + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL roundEven man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// @see New round to even technique + template + GLM_FUNC_DECL vec roundEven(vec const& x); + + /// Returns a value equal to the nearest integer + /// that is greater than or equal to x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL ceil man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec ceil(vec const& x); + + /// Return x - floor(x). + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL fract man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType fract(genType x); + + /// Return x - floor(x). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL fract man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec fract(vec const& x); + + template + GLM_FUNC_DECL genType mod(genType x, genType y); + + template + GLM_FUNC_DECL vec mod(vec const& x, T y); + + /// Modulus. Returns x - y * floor(x / y) + /// for each component in x using the floating point value y. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types, include glm/gtc/integer for integer scalar types support + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL mod man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec mod(vec const& x, vec const& y); + + /// Returns the fractional part of x and sets i to the integer + /// part (as a whole number floating point value). Both the + /// return value and the output parameter will have the same + /// sign as x. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL modf man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType modf(genType x, genType& i); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, T y); + + /// Returns y if y < x; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL min man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, vec const& y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, T y); + + /// Returns y if x < y; otherwise, it returns x. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL max man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam genType Floating-point or integer; scalar or vector types. + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x + /// using the floating-point values minVal and maxVal. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL clamp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal); + + /// If genTypeU is a floating scalar or vector: + /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of + /// x and y using the floating-point value a. + /// The value for a is not restricted to the range [0, 1]. + /// + /// If genTypeU is a boolean scalar or vector: + /// Selects which vector each returned component comes + /// from. For a component of 'a' that is false, the + /// corresponding component of 'x' is returned. For a + /// component of 'a' that is true, the corresponding + /// component of 'y' is returned. Components of 'x' and 'y' that + /// are not selected are allowed to be invalid floating point + /// values and will have no effect on the results. Thus, this + /// provides different functionality than + /// genType mix(genType x, genType y, genType(a)) + /// where a is a Boolean vector. + /// + /// @see GLSL mix man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + /// + /// @param[in] x Value to interpolate. + /// @param[in] y Value to interpolate. + /// @param[in] a Interpolant. + /// + /// @tparam genTypeT Floating point scalar or vector. + /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT. + /// + /// @code + /// #include + /// ... + /// float a; + /// bool b; + /// glm::dvec3 e; + /// glm::dvec3 f; + /// glm::vec4 g; + /// glm::vec4 h; + /// ... + /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors. + /// glm::vec4 s = glm::mix(g, h, b); // Returns g or h; + /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second. + /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter. + /// @endcode + template + GLM_FUNC_DECL GLM_CONSTEXPR genTypeT mix(genTypeT x, genTypeT y, genTypeU a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec mix(vec const& x, vec const& y, vec const& a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec mix(vec const& x, vec const& y, U a); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType step(genType edge, genType x); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec step(T edge, vec const& x); + + /// Returns 0.0 if x < edge, otherwise it returns 1.0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL step man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec step(vec const& edge, vec const& x); + + /// Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and + /// performs smooth Hermite interpolation between 0 and 1 + /// when edge0 < x < edge1. This is useful in cases where + /// you would want a threshold function with a smooth + /// transition. This is equivalent to: + /// genType t; + /// t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); + /// return t * t * (3 - 2 * t); + /// Results are undefined if edge0 >= edge1. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL smoothstep man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x); + + template + GLM_FUNC_DECL vec smoothstep(T edge0, T edge1, vec const& x); + + template + GLM_FUNC_DECL vec smoothstep(vec const& edge0, vec const& edge1, vec const& x); + + /// Returns true if x holds a NaN (not a number) + /// representation in the underlying implementation's set of + /// floating point representations. Returns false otherwise, + /// including for implementations with no NaN + /// representations. + /// + /// /!\ When using compiler fast math, this function may fail. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL isnan man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec isnan(vec const& x); + + /// Returns true if x holds a positive infinity or negative + /// infinity representation in the underlying implementation's + /// set of floating point representations. Returns false + /// otherwise, including for implementations with no infinity + /// representations. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL isinf man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec isinf(vec const& x); + + /// Returns a signed integer value representing + /// the encoding of a floating-point value. The floating-point + /// value's bit-level representation is preserved. + /// + /// @see GLSL floatBitsToInt man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL int floatBitsToInt(float v); + + /// Returns a signed integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floatBitsToInt man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floatBitsToInt(vec const& v); + + /// Returns a unsigned integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @see GLSL floatBitsToUint man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL uint floatBitsToUint(float v); + + /// Returns a unsigned integer value representing + /// the encoding of a floating-point value. The floatingpoint + /// value's bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL floatBitsToUint man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec floatBitsToUint(vec const& v); + + /// Returns a floating-point value corresponding to a signed + /// integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @see GLSL intBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL float intBitsToFloat(int v); + + /// Returns a floating-point value corresponding to a signed + /// integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL intBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec intBitsToFloat(vec const& v); + + /// Returns a floating-point value corresponding to a + /// unsigned integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @see GLSL uintBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + GLM_FUNC_DECL float uintBitsToFloat(uint v); + + /// Returns a floating-point value corresponding to a + /// unsigned integer encoding of a floating-point value. + /// If an inf or NaN is passed in, it will not signal, and the + /// resulting floating point value is unspecified. Otherwise, + /// the bit-level representation is preserved. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL uintBitsToFloat man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL vec uintBitsToFloat(vec const& v); + + /// Computes and returns a * b + c. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL fma man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c); + + /// Splits x into a floating-point significand in the range + /// [0.5, 1.0) and an integral exponent of two, such that: + /// x = significand * exp(2, exponent) + /// + /// The significand is returned by the function and the + /// exponent is returned in the parameter exp. For a + /// floating-point value of zero, the significant and exponent + /// are both zero. For a floating-point value that is an + /// infinity or is not a number, the results are undefined. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL frexp man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType frexp(genType x, int& exp); + + template + GLM_FUNC_DECL vec frexp(vec const& v, vec& exp); + + /// Builds a floating-point number from x and the + /// corresponding integral exponent of two in exp, returning: + /// significand * exp(2, exponent) + /// + /// If this product is too large to be represented in the + /// floating-point type, the result is undefined. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL ldexp man page; + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp); + + template + GLM_FUNC_DECL vec ldexp(vec const& v, vec const& exp); + + /// @} +}//namespace glm + +#include "detail/func_common.inl" + diff --git a/includes/glm/detail/_features.hpp b/includes/glm/detail/_features.hpp new file mode 100644 index 0000000..b0cbe9f --- /dev/null +++ b/includes/glm/detail/_features.hpp @@ -0,0 +1,394 @@ +#pragma once + +// #define GLM_CXX98_EXCEPTIONS +// #define GLM_CXX98_RTTI + +// #define GLM_CXX11_RVALUE_REFERENCES +// Rvalue references - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html + +// GLM_CXX11_TRAILING_RETURN +// Rvalue references for *this - GCC not supported +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Initialization of class objects by rvalues - GCC any +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html + +// GLM_CXX11_NONSTATIC_MEMBER_INIT +// Non-static data member initializers - GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm + +// #define GLM_CXX11_VARIADIC_TEMPLATE +// Variadic templates - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf + +// +// Extending variadic template template parameters - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf + +// #define GLM_CXX11_GENERALIZED_INITIALIZERS +// Initializer lists - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm + +// #define GLM_CXX11_STATIC_ASSERT +// Static assertions - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html + +// #define GLM_CXX11_AUTO_TYPE +// auto-typed variables - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Multi-declarator auto - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf + +// #define GLM_CXX11_AUTO_TYPE +// Removal of auto as a storage-class specifier - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm + +// #define GLM_CXX11_AUTO_TYPE +// New function declarator syntax - GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm + +// #define GLM_CXX11_LAMBDAS +// New wording for C++0x lambdas - GCC 4.5 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf + +// #define GLM_CXX11_DECLTYPE +// Declared type of an expression - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf + +// +// Right angle brackets - GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html + +// +// Default template arguments for function templates DR226 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226 + +// +// Solving the SFINAE problem for expressions DR339 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html + +// #define GLM_CXX11_ALIAS_TEMPLATE +// Template aliases N2258 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf + +// +// Extern templates N1987 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm + +// #define GLM_CXX11_NULLPTR +// Null pointer constant N2431 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf + +// #define GLM_CXX11_STRONG_ENUMS +// Strongly-typed enums N2347 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf + +// +// Forward declarations for enums N2764 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf + +// +// Generalized attributes N2761 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf + +// +// Generalized constant expressions N2235 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf + +// +// Alignment support N2341 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf + +// #define GLM_CXX11_DELEGATING_CONSTRUCTORS +// Delegating constructors N1986 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf + +// +// Inheriting constructors N2540 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm + +// #define GLM_CXX11_EXPLICIT_CONVERSIONS +// Explicit conversion operators N2437 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf + +// +// New character types N2249 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html + +// +// Unicode string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Raw string literals N2442 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm + +// +// Universal character name literals N2170 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html + +// #define GLM_CXX11_USER_LITERALS +// User-defined literals N2765 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf + +// +// Standard Layout Types N2342 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm + +// #define GLM_CXX11_DEFAULTED_FUNCTIONS +// #define GLM_CXX11_DELETED_FUNCTIONS +// Defaulted and deleted functions N2346 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm + +// +// Extended friend declarations N1791 GCC 4.7 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf + +// +// Extending sizeof N2253 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html + +// #define GLM_CXX11_INLINE_NAMESPACES +// Inline namespaces N2535 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm + +// #define GLM_CXX11_UNRESTRICTED_UNIONS +// Unrestricted unions N2544 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf + +// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +// Local and unnamed types as template arguments N2657 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm + +// #define GLM_CXX11_RANGE_FOR +// Range-based for N2930 GCC 4.6 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html + +// #define GLM_CXX11_OVERRIDE_CONTROL +// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7 +// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm + +// +// Minimal support for garbage collection and reachability-based leak detection N2670 No +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm + +// #define GLM_CXX11_NOEXCEPT +// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only) +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html + +// +// Defining move special member functions N3053 GCC 4.6 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html + +// +// Sequence points N2239 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Atomic operations N2427 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html + +// +// Strong Compare and Exchange N2748 GCC 4.5 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html + +// +// Bidirectional Fences N2752 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm + +// +// Memory model N2429 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm + +// +// Data-dependency ordering: atomics and memory model N2664 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm + +// +// Propagating exceptions N2179 GCC 4.4 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html + +// +// Abandoning a process and at_quick_exit N2440 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm + +// +// Allow atomics use in signal handlers N2547 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm + +// +// Thread-local storage N2659 GCC 4.8 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm + +// +// Dynamic initialization and destruction with concurrency N2660 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm + +// +// __func__ predefined identifier N2340 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm + +// +// C99 preprocessor N1653 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm + +// +// long long N1811 GCC 4.3 +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf + +// +// Extended integral types N1988 Yes +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf + +#if(GLM_COMPILER & GLM_COMPILER_GCC) + +# define GLM_CXX11_STATIC_ASSERT + +#elif(GLM_COMPILER & GLM_COMPILER_CLANG) +# if(__has_feature(cxx_exceptions)) +# define GLM_CXX98_EXCEPTIONS +# endif + +# if(__has_feature(cxx_rtti)) +# define GLM_CXX98_RTTI +# endif + +# if(__has_feature(cxx_access_control_sfinae)) +# define GLM_CXX11_ACCESS_CONTROL_SFINAE +# endif + +# if(__has_feature(cxx_alias_templates)) +# define GLM_CXX11_ALIAS_TEMPLATE +# endif + +# if(__has_feature(cxx_alignas)) +# define GLM_CXX11_ALIGNAS +# endif + +# if(__has_feature(cxx_attributes)) +# define GLM_CXX11_ATTRIBUTES +# endif + +# if(__has_feature(cxx_constexpr)) +# define GLM_CXX11_CONSTEXPR +# endif + +# if(__has_feature(cxx_decltype)) +# define GLM_CXX11_DECLTYPE +# endif + +# if(__has_feature(cxx_default_function_template_args)) +# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_defaulted_functions)) +# define GLM_CXX11_DEFAULTED_FUNCTIONS +# endif + +# if(__has_feature(cxx_delegating_constructors)) +# define GLM_CXX11_DELEGATING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_deleted_functions)) +# define GLM_CXX11_DELETED_FUNCTIONS +# endif + +# if(__has_feature(cxx_explicit_conversions)) +# define GLM_CXX11_EXPLICIT_CONVERSIONS +# endif + +# if(__has_feature(cxx_generalized_initializers)) +# define GLM_CXX11_GENERALIZED_INITIALIZERS +# endif + +# if(__has_feature(cxx_implicit_moves)) +# define GLM_CXX11_IMPLICIT_MOVES +# endif + +# if(__has_feature(cxx_inheriting_constructors)) +# define GLM_CXX11_INHERITING_CONSTRUCTORS +# endif + +# if(__has_feature(cxx_inline_namespaces)) +# define GLM_CXX11_INLINE_NAMESPACES +# endif + +# if(__has_feature(cxx_lambdas)) +# define GLM_CXX11_LAMBDAS +# endif + +# if(__has_feature(cxx_local_type_template_args)) +# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS +# endif + +# if(__has_feature(cxx_noexcept)) +# define GLM_CXX11_NOEXCEPT +# endif + +# if(__has_feature(cxx_nonstatic_member_init)) +# define GLM_CXX11_NONSTATIC_MEMBER_INIT +# endif + +# if(__has_feature(cxx_nullptr)) +# define GLM_CXX11_NULLPTR +# endif + +# if(__has_feature(cxx_override_control)) +# define GLM_CXX11_OVERRIDE_CONTROL +# endif + +# if(__has_feature(cxx_reference_qualified_functions)) +# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS +# endif + +# if(__has_feature(cxx_range_for)) +# define GLM_CXX11_RANGE_FOR +# endif + +# if(__has_feature(cxx_raw_string_literals)) +# define GLM_CXX11_RAW_STRING_LITERALS +# endif + +# if(__has_feature(cxx_rvalue_references)) +# define GLM_CXX11_RVALUE_REFERENCES +# endif + +# if(__has_feature(cxx_static_assert)) +# define GLM_CXX11_STATIC_ASSERT +# endif + +# if(__has_feature(cxx_auto_type)) +# define GLM_CXX11_AUTO_TYPE +# endif + +# if(__has_feature(cxx_strong_enums)) +# define GLM_CXX11_STRONG_ENUMS +# endif + +# if(__has_feature(cxx_trailing_return)) +# define GLM_CXX11_TRAILING_RETURN +# endif + +# if(__has_feature(cxx_unicode_literals)) +# define GLM_CXX11_UNICODE_LITERALS +# endif + +# if(__has_feature(cxx_unrestricted_unions)) +# define GLM_CXX11_UNRESTRICTED_UNIONS +# endif + +# if(__has_feature(cxx_user_literals)) +# define GLM_CXX11_USER_LITERALS +# endif + +# if(__has_feature(cxx_variadic_templates)) +# define GLM_CXX11_VARIADIC_TEMPLATES +# endif + +#endif//(GLM_COMPILER & GLM_COMPILER_CLANG) diff --git a/includes/glm/detail/_fixes.hpp b/includes/glm/detail/_fixes.hpp new file mode 100644 index 0000000..a503c7c --- /dev/null +++ b/includes/glm/detail/_fixes.hpp @@ -0,0 +1,27 @@ +#include + +//! Workaround for compatibility with other libraries +#ifdef max +#undef max +#endif + +//! Workaround for compatibility with other libraries +#ifdef min +#undef min +#endif + +//! Workaround for Android +#ifdef isnan +#undef isnan +#endif + +//! Workaround for Android +#ifdef isinf +#undef isinf +#endif + +//! Workaround for Chrone Native Client +#ifdef log2 +#undef log2 +#endif + diff --git a/includes/glm/detail/_noise.hpp b/includes/glm/detail/_noise.hpp new file mode 100644 index 0000000..5a874a0 --- /dev/null +++ b/includes/glm/detail/_noise.hpp @@ -0,0 +1,81 @@ +#pragma once + +#include "../common.hpp" + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mod289(T const& x) + { + return x - floor(x * (static_cast(1.0) / static_cast(289.0))) * static_cast(289.0); + } + + template + GLM_FUNC_QUALIFIER T permute(T const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x) + { + return mod289(((x * static_cast(34)) + static_cast(1)) * x); + } + + template + GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r) + { + return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t) + { + return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); + } +}//namespace detail +}//namespace glm + diff --git a/includes/glm/detail/_swizzle.hpp b/includes/glm/detail/_swizzle.hpp new file mode 100644 index 0000000..0678982 --- /dev/null +++ b/includes/glm/detail/_swizzle.hpp @@ -0,0 +1,809 @@ +#pragma once + +namespace glm{ +namespace detail +{ + // Internal class for implementing swizzle operators + template + struct _swizzle_base0 + { + protected: + GLM_FUNC_QUALIFIER T& elem(int i){ return (reinterpret_cast(_buffer))[i]; } + GLM_FUNC_QUALIFIER T const& elem(int i) const{ return (reinterpret_cast(_buffer))[i]; } + + // Use an opaque buffer to *ensure* the compiler doesn't call a constructor. + // The size 1 buffer is assumed to aligned to the actual members so that the + // elem() + char _buffer[1]; + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + }; + + template + struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, false> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); } + }; + + template + struct _swizzle_base1<3, T, Q, E0,E1,E2,3, false> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); } + }; + + template + struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, false> : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); } + }; + + // Internal class for implementing swizzle operators + /* + Template parameters: + + T = type of scalar values (e.g. float, double) + N = number of components in the vector (e.g. 3) + E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec + + DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles + containing duplicate elements so that they cannot be used as r-values). + */ + template + struct _swizzle_base2 : public _swizzle_base1::value> + { + struct op_equal + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; } + }; + + struct op_minus + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; } + }; + + struct op_plus + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; } + }; + + struct op_mul + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; } + }; + + struct op_div + { + GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; } + }; + + public: + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t) + { + for (int i = 0; i < N; ++i) + (*this)[i] = t; + return *this; + } + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec const& that) + { + _apply_op(that, op_equal()); + return *this; + } + + GLM_FUNC_QUALIFIER void operator -= (vec const& that) + { + _apply_op(that, op_minus()); + } + + GLM_FUNC_QUALIFIER void operator += (vec const& that) + { + _apply_op(that, op_plus()); + } + + GLM_FUNC_QUALIFIER void operator *= (vec const& that) + { + _apply_op(that, op_mul()); + } + + GLM_FUNC_QUALIFIER void operator /= (vec const& that) + { + _apply_op(that, op_div()); + } + + GLM_FUNC_QUALIFIER T& operator[](int i) + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + GLM_FUNC_QUALIFIER T operator[](int i) const + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + + protected: + template + GLM_FUNC_QUALIFIER void _apply_op(vec const& that, const U& op) + { + // Make a copy of the data in this == &that. + // The copier should optimize out the copy in cases where the function is + // properly inlined and the copy is not necessary. + T t[N]; + for (int i = 0; i < N; ++i) + t[i] = that[i]; + for (int i = 0; i < N; ++i) + op( (*this)[i], t[i] ); + } + }; + + // Specialization for swizzles containing duplicate elements. These cannot be modified. + template + struct _swizzle_base2 : public _swizzle_base1::value> + { + struct Stub {}; + + GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; } + + GLM_FUNC_QUALIFIER T operator[] (int i) const + { + const int offset_dst[4] = { E0, E1, E2, E3 }; + return this->elem(offset_dst[i]); + } + }; + + template + struct _swizzle : public _swizzle_base2 + { + typedef _swizzle_base2 base_type; + + using base_type::operator=; + + GLM_FUNC_QUALIFIER operator vec () const { return (*this)(); } + }; + +// +// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros +// +#define GLM_SWIZZLE_TEMPLATE1 template +#define GLM_SWIZZLE_TEMPLATE2 template +#define GLM_SWIZZLE_TYPE1 _swizzle +#define GLM_SWIZZLE_TYPE2 _swizzle + +// +// Wrapper for a binary operator (e.g. u.yy + v.zy) +// +#define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ + { \ + return a() OPERAND b(); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec& b) \ + { \ + return a() OPERAND b; \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const vec& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz) +// +#define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \ + { \ + return a() OPERAND b; \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER vec operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return a OPERAND b(); \ + } + +// +// Macro for wrapping a function taking one argument (e.g. abs()) +// +#define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \ + { \ + return FUNCTION(a()); \ + } + +// +// Macro for wrapping a function taking two vector arguments (e.g. dot()). +// +#define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a(), b()); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \ + { \ + return FUNCTION(a(), b); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \ + { \ + return FUNCTION(a, b()); \ + } + +// +// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()). +// +#define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \ + GLM_SWIZZLE_TEMPLATE2 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a(), b(), c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\ + { \ + return FUNCTION(a(), b, c); \ + } \ + GLM_SWIZZLE_TEMPLATE1 \ + GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ + { \ + return FUNCTION(a, b(), c); \ + } + +}//namespace detail +}//namespace glm + +namespace glm +{ + namespace detail + { + GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-) + GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*) + GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/) + } + + // + // Swizzles are distinct types from the unswizzled type. The below macros will + // provide template specializations for the swizzle types for the given functions + // so that the compiler does not have any ambiguity to choosing how to handle + // the function. + // + // The alternative is to use the operator()() when calling the function in order + // to explicitly convert the swizzled type to the unswizzled type. + // + + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all); + //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any); + + //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot); + //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross); + //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step); + //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix); +} + +#define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; }; + +#define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; + +#define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \ + struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; + +#define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \ + struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; + +#define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \ + struct { detail::_swizzle<3, T, Q, 0,0,0,3> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,1,3> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,2,3> E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,0,3> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,1,3> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,2,3> E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,0,3> E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,1,3> E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,2,3> E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,0,3> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,1,3> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,2,3> E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,0,3> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,1,3> E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,2,3> E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,0,3> E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,1,3> E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,2,3> E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,0,3> E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,1,3> E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,2,3> E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,0,3> E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,1,3> E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,2,3> E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,0,3> E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,1,3> E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,2,3> E2 ## E2 ## E2; }; + +#define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \ + struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; + +#define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \ + struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \ + struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \ + struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \ + struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; }; + +#define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<3, T, Q, 0,0,0,3> E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,1,3> E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,2,3> E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,0,3,3> E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,0,3> E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,1,3> E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,2,3> E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,1,3,3> E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,0,3> E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,1,3> E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,2,3> E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,2,3,3> E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,0,3> E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,1,3> E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,2,3> E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 0,3,3,3> E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,0,3> E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,1,3> E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,2,3> E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,0,3,3> E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,0,3> E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,1,3> E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,2,3> E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,1,3,3> E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,0,3> E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,1,3> E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,2,3> E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,2,3,3> E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,0,3> E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,1,3> E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,2,3> E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 1,3,3,3> E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,0,3> E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,1,3> E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,2,3> E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,0,3,3> E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,0,3> E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,1,3> E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,2,3> E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,1,3,3> E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,0,3> E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,1,3> E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,2,3> E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,2,3,3> E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,0,3> E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,1,3> E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,2,3> E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 2,3,3,3> E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,0,3> E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,1,3> E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,2,3> E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,0,3,3> E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,0,3> E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,1,3> E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,2,3> E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,1,3,3> E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,0,3> E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,1,3> E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,2,3> E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,2,3,3> E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,0,3> E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,1,3> E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,2,3> E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<3, T, Q, 3,3,3,3> E3 ## E3 ## E3; }; + +#define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \ + struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \ + struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; }; diff --git a/includes/glm/detail/_swizzle_func.hpp b/includes/glm/detail/_swizzle_func.hpp new file mode 100644 index 0000000..a264ae9 --- /dev/null +++ b/includes/glm/detail/_swizzle_func.hpp @@ -0,0 +1,682 @@ +#pragma once + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \ + GLM_FUNC_QUALIFIER vec<2, T, Q> A ## B() CONST \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \ + GLM_FUNC_QUALIFIER vec<3, T, Q> A ## B ## C() CONST \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \ + GLM_FUNC_QUALIFIER vec<4, T, Q> A ## B ## C ## D() CONST \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \ + template \ + GLM_FUNC_QUALIFIER vec vec::A ## B() CONST \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \ + template \ + GLM_FUNC_QUALIFIER vec<3, T, Q> vec::A ## B ## C() CONST \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \ + template \ + GLM_FUNC_QUALIFIER vec<4, T, Q> vec::A ## B ## C ## D() CONST \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_MUTABLE + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p) + +#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C) + +#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B) + +#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) + +#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \ + GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q) + diff --git a/includes/glm/detail/_vectorize.hpp b/includes/glm/detail/_vectorize.hpp new file mode 100644 index 0000000..807b1b1 --- /dev/null +++ b/includes/glm/detail/_vectorize.hpp @@ -0,0 +1,230 @@ +#pragma once + +namespace glm{ +namespace detail +{ + template class vec, length_t L, typename R, typename T, qualifier Q> + struct functor1{}; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v) + { + return vec<1, R, Q>(Func(v.x)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v) + { + return vec<2, R, Q>(Func(v.x), Func(v.y)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v) + { + return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z)); + } + }; + + template class vec, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v) + { + return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w)); + } + }; + + template class vec, length_t L, typename T, qualifier Q> + struct functor2{}; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b) + { + return vec<1, T, Q>(Func(a.x, b.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, T, Q> call(Fct Func, vec<1, T, Q> const& a, vec<1, T, Q> const& b) + { + return vec<1, T, Q>(Func(a.x, b.x)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b) + { + return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, T, Q> call(Fct Func, vec<2, T, Q> const& a, vec<2, T, Q> const& b) + { + return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, T, Q> call(Fct Func, vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(Fct Func, vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + }; + + template class vec, length_t L, typename T, qualifier Q> + struct functor2_vec_sca{}; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b) + { + return vec<1, T, Q>(Func(a.x, b)); + } + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, T, Q> call(Fct Func, vec<1, T, Q> const& a, T b) + { + return vec<1, T, Q>(Func(a.x, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b) + { + return vec<2, T, Q>(Func(a.x, b), Func(a.y, b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, T, Q> call(Fct Func, vec<2, T, Q> const& a, T b) + { + return vec<2, T, Q>(Func(a.x, b), Func(a.y, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b) + { + return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, T, Q> call(Fct Func, vec<3, T, Q> const& a, T b) + { + return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b)); + } + }; + + template class vec, typename T, qualifier Q> + struct functor2_vec_sca + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b) + { + return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b)); + } + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(Fct Func, vec<4, T, Q> const& a, T b) + { + return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b)); + } + }; + + template + struct functor2_vec_int {}; + + template + struct functor2_vec_int<1, T, Q> + { + GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b) + { + return vec<1, int, Q>(Func(a.x, b.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, int, Q> call(Fct Func, vec<1, T, Q> const& a, vec<1, int, Q> const& b) + { + return vec<1, int, Q>(Func(a.x, b.x)); + } + }; + + template + struct functor2_vec_int<2, T, Q> + { + GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b) + { + return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, int, Q> call(Fct Func, vec<2, T, Q> const& a, vec<2, int, Q> const& b) + { + return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y)); + } + }; + + template + struct functor2_vec_int<3, T, Q> + { + GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b) + { + return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, int, Q> call(Fct Func, vec<3, T, Q> const& a, vec<3, int, Q> const& b) + { + return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); + } + }; + + template + struct functor2_vec_int<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b) + { + return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, int, Q> call(Fct Func, vec<4, T, Q> const& a, vec<4, int, Q> const& b) + { + return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); + } + }; +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/compute_common.hpp b/includes/glm/detail/compute_common.hpp new file mode 100644 index 0000000..83362bc --- /dev/null +++ b/includes/glm/detail/compute_common.hpp @@ -0,0 +1,50 @@ +#pragma once + +#include "setup.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_abs + {}; + + template + struct compute_abs + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_signed, + "'abs' only accept floating-point and integer scalar or vector inputs"); + + return x >= genFIType(0) ? x : -x; + // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff; + } + }; + +#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + template<> + struct compute_abs + { + GLM_FUNC_QUALIFIER static float call(float x) + { + return fabsf(x); + } + }; +#endif + + template + struct compute_abs + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) + { + GLM_STATIC_ASSERT( + (!std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'abs' only accept floating-point and integer scalar or vector inputs"); + return x; + } + }; +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/compute_vector_decl.hpp b/includes/glm/detail/compute_vector_decl.hpp new file mode 100644 index 0000000..709f8e4 --- /dev/null +++ b/includes/glm/detail/compute_vector_decl.hpp @@ -0,0 +1,190 @@ + +#pragma once +#include +#include "_vectorize.hpp" + +namespace glm { + namespace detail + { + template + struct compute_vec_add {}; + + template + struct compute_vec_sub {}; + + template + struct compute_vec_mul {}; + + template + struct compute_vec_div {}; + + template + struct compute_vec_mod {}; + + template + struct compute_splat {}; + + template + struct compute_vec_and {}; + + template + struct compute_vec_or {}; + + template + struct compute_vec_xor {}; + + template + struct compute_vec_shift_left {}; + + template + struct compute_vec_shift_right {}; + + template + struct compute_vec_equal {}; + + template + struct compute_vec_nequal {}; + + template + struct compute_vec_bitwise_not {}; + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::plus(), a, b); + } + }; + + template + struct compute_vec_sub + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::minus(), a, b); + } + }; + + template + struct compute_vec_mul + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::multiplies(), a, b); + } + }; + + template + struct compute_vec_div + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::divides(), a, b); + } + }; + + template + struct compute_vec_mod + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(std::modulus(), a, b); + } + }; + + template + struct compute_vec_and + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] &= static_cast(b[i]); + return v; + } + }; + + template + struct compute_vec_or + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] |= static_cast(b[i]); + return v; + } + }; + + template + struct compute_vec_xor + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] ^= static_cast(b[i]); + return v; + } + }; + + template + struct compute_vec_shift_left + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] <<= static_cast(b[i]); + return v; + } + }; + + template + struct compute_vec_shift_right + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a, vec const& b) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] >>= static_cast(b[i]); + return v; + } + }; + + template + struct compute_vec_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec const& v1, vec const& v2) + { + bool b = true; + for (length_t i = 0; b && i < L; ++i) + b = detail::compute_equal::is_iec559>::call(v1[i], v2[i]); + return b; + } + }; + + template + struct compute_vec_nequal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return !compute_vec_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + }; + + template + struct compute_vec_bitwise_not + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec v(a); + for (length_t i = 0; i < L; ++i) + v[i] = ~v[i]; + return v; + } + }; + + } +} diff --git a/includes/glm/detail/compute_vector_relational.hpp b/includes/glm/detail/compute_vector_relational.hpp new file mode 100644 index 0000000..167b634 --- /dev/null +++ b/includes/glm/detail/compute_vector_relational.hpp @@ -0,0 +1,30 @@ +#pragma once + +//#include "compute_common.hpp" +#include "setup.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) + { + return a == b; + } + }; +/* + template + struct compute_equal + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) + { + return detail::compute_abs::is_signed>::call(b - a) <= static_cast(0); + //return std::memcmp(&a, &b, sizeof(T)) == 0; + } + }; +*/ +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/func_common.inl b/includes/glm/detail/func_common.inl new file mode 100644 index 0000000..f0397d4 --- /dev/null +++ b/includes/glm/detail/func_common.inl @@ -0,0 +1,936 @@ +/// @ref core +/// @file glm/detail/func_common.inl + +#include "../vector_relational.hpp" +#include "compute_common.hpp" +#include "type_vec1.hpp" +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include "_vectorize.hpp" +#include + +namespace glm +{ + // min + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType min(genType x, genType y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return (y < x) ? y : x; + } + + template + struct TMin { + T operator()(const T& a, const T& b) { return min(a, b); } + }; + + // max + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType max(genType x, genType y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + + return (x < y) ? y : x; + } + + template + struct TMax { + T operator()(const T& a, const T& b) { return max(a, b); } + }; + + // abs + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR int abs(int x) + { + int const y = x >> (sizeof(int) * 8 - 1); + return (x ^ y) - y; + } + + template + struct TAbs { + T operator()(const T& a) { return abs(a); } + }; + + // round +# if GLM_HAS_CXX11_STL + using ::std::round; +# else + template + GLM_FUNC_QUALIFIER genType round(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'round' only accept floating-point inputs"); + + return x < static_cast(0) ? static_cast(int(x - static_cast(0.5))) : static_cast(int(x + static_cast(0.5))); + } +# endif + + template + struct TRound { + T operator()(const T& a) { return round(a); } + }; + + // trunc +# if GLM_HAS_CXX11_STL + using ::std::trunc; +# else + template + GLM_FUNC_QUALIFIER genType trunc(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'trunc' only accept floating-point inputs"); + + return x < static_cast(0) ? -std::floor(-x) : std::floor(x); + } +# endif + + template + struct TTrunc { + T operator()(const T& a) { return trunc(a); } + }; + + template + struct TFmod { + T operator()(const T& a, const T& b) { return std::fmod(a, b); } + }; + +}//namespace glm + +namespace glm{ +namespace detail +{ + template + struct compute_abs_vector + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) + { + return detail::functor1::call(abs, x); + } + }; + + template + struct compute_mix_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); + } + }; + + template + struct compute_mix_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) + { + vec Result(0); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = a[i] ? y[i] : x[i]; + return Result; + } + }; + + template + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, U const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); + } + }; + + template + struct compute_mix_scalar + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, bool const& a) + { + return a ? y : x; + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, U const& a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); + + return static_cast(static_cast(x) * (static_cast(1) - a) + static_cast(y) * a); + } + }; + + template + struct compute_mix + { + GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, bool const& a) + { + return a ? y : x; + } + }; + + template + struct compute_sign + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return vec(glm::lessThan(vec(0), x)) - vec(glm::lessThan(x, vec(0))); + } + }; + +# if GLM_ARCH == GLM_ARCH_X86 + template + struct compute_sign + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + T const Shift(static_cast(sizeof(T) * 8 - 1)); + vec const y(vec::type, Q>(-x) >> typename detail::make_unsigned::type(Shift)); + + return (x >> Shift) | y; + } + }; +# endif + + template + struct compute_floor + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::floor, x); + } + }; + + template + struct compute_ceil + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::ceil, x); + } + }; + + template + struct compute_fract + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return x - floor(x); + } + }; + + template + struct compute_trunc + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(trunc, x); + } + }; + + template + struct compute_round + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(round, x); + } + }; + + template + struct compute_mod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'mod' only accept floating-point inputs. Include for integer inputs."); + return a - b * floor(a / b); + } + }; + + template + struct compute_fma + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b, vec const& c) + { + return a * b + c; + } + }; + + template + struct compute_min_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) + { + return detail::functor2::call(TMin(), x, y); + } + }; + + template + struct compute_max_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) + { + return detail::functor2::call(TMax(), x, y); + } + }; + + template + struct compute_clamp_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& minVal, vec const& maxVal) + { + return min(max(x, minVal), maxVal); + } + }; + + template + struct compute_step_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& edge, vec const& x) + { + return mix(vec(1), vec(0), glm::lessThan(x, edge)); + } + }; + + template + struct compute_smoothstep_vector + { + GLM_FUNC_QUALIFIER static vec call(vec const& edge0, vec const& edge1, vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); + vec const tmp(clamp((x - edge0) / (edge1 - edge0), static_cast(0), static_cast(1))); + return tmp * tmp * (static_cast(3) - static_cast(2) * tmp); + } + }; + + template + struct convert_vec3_to_vec4W0 + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<3, T, Q> const& a) + { + return vec<4, T, Q>(a.x, a.y, a.z, 0.0f); + } + }; + + template + struct convert_vec3_to_vec4WZ + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<3, T, Q> const& a) + { + return vec<4, T, Q>(a.x, a.y, a.z, a.z); + } + }; + + template + struct convert_vec3_to_vec4W1 + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<3, T, Q> const& a) + { + return vec<4, T, Q>(a.x, a.y, a.z, 1.0f); + } + }; + + template + struct convert_vec4_to_vec3 + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<3, T, Q> const& a) + { + return vec<4, T, Q>(a.x, a.y, a.z, 0.0f); + } + }; + + template + struct convert_splat { + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec v; + for (int i = 0; i < L; ++i) + v[i] = a[c]; + return v; + } + }; + + +}//namespace detail + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType abs(genFIType x) + { + return detail::compute_abs::is_signed>::call(x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec abs(vec const& x) + { + return detail::compute_abs_vector::value>::call(x); + } + + // sign + // fast and works for any type + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType sign(genFIType x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'sign' only accept signed inputs"); + + return detail::compute_sign<1, genFIType, defaultp, + std::numeric_limits::is_iec559, detail::is_aligned::value>::call(vec<1, genFIType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec sign(vec const& x) + { + GLM_STATIC_ASSERT( + std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), + "'sign' only accept signed inputs"); + + return detail::compute_sign::is_iec559, detail::is_aligned::value>::call(x); + } + + // floor + using ::std::floor; + template + GLM_FUNC_QUALIFIER vec floor(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'floor' only accept floating-point inputs."); + return detail::compute_floor::value>::call(x); + } + + template + GLM_FUNC_QUALIFIER vec trunc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'trunc' only accept floating-point inputs"); + return detail::compute_trunc::value>::call(x); + } + + template + GLM_FUNC_QUALIFIER vec round(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'round' only accept floating-point inputs"); + return detail::compute_round::value>::call(x); + } + +/* + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + + return genType(int(x + genType(int(x) % 2))); + } +*/ + + // roundEven + template + GLM_FUNC_QUALIFIER genType roundEven(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + + int Integer = static_cast(x); + genType IntegerPart = static_cast(Integer); + genType FractionalPart = fract(x); + + if(FractionalPart > static_cast(0.5) || FractionalPart < static_cast(0.5)) + { + return round(x); + } + else if((Integer % 2) == 0) + { + return IntegerPart; + } + else if(x <= static_cast(0)) // Work around... + { + return IntegerPart - static_cast(1); + } + else + { + return IntegerPart + static_cast(1); + } + //else // Bug on MinGW 4.5.2 + //{ + // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0)); + //} + } + + template + GLM_FUNC_QUALIFIER vec roundEven(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'roundEven' only accept floating-point inputs"); + return detail::functor1::call(roundEven, x); + } + + // ceil + using ::std::ceil; + template + GLM_FUNC_QUALIFIER vec ceil(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceil' only accept floating-point inputs"); + return detail::compute_ceil::value>::call(x); + } + + // fract + template + GLM_FUNC_QUALIFIER genType fract(genType x) + { + return fract(vec<1, genType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec fract(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fract' only accept floating-point inputs"); + return detail::compute_fract::value>::call(x); + } + + // mod + template + GLM_FUNC_QUALIFIER genType mod(genType x, genType y) + { +# if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + // Another Cuda compiler bug https://github.com/g-truc/glm/issues/530 + vec<1, genType, defaultp> Result(mod(vec<1, genType, defaultp>(x), y)); + return Result.x; +# else + return mod(vec<1, genType, defaultp>(x), y).x; +# endif + } + + template + GLM_FUNC_QUALIFIER vec mod(vec const& x, T y) + { + return detail::compute_mod::value>::call(x, vec(y)); + } + + template + GLM_FUNC_QUALIFIER vec mod(vec const& x, vec const& y) + { + return detail::compute_mod::value>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER vec fma(vec const& a, vec const& b, vec const& c) + { + return detail::compute_fma::value>::call(a, b, c); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> xyz0(vec<3, T, Q> const& a) + { + return detail::convert_vec3_to_vec4W0::value>::call(a); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> xyz1(vec<3, T, Q> const& a) + { + return detail::convert_vec3_to_vec4W1::value>::call(a); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> xyzz(vec<3, T, Q> const& a) + { + return detail::convert_vec3_to_vec4WZ::value>::call(a); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> xyz(vec<4, T, Q> const& a) + { + return detail::convert_vec4_to_vec3::value>::call(a); + } + + template + GLM_FUNC_QUALIFIER vec splatX(vec const& a) + { + return detail::convert_splat::value>::template call<0>(a); + } + + template + GLM_FUNC_QUALIFIER vec splatY(vec const& a) + { + return detail::convert_splat::value>::template call<1>(a); + } + + template + GLM_FUNC_QUALIFIER vec splatZ(vec const& a) + { + return detail::convert_splat::value>::template call<2>(a); + } + + template + GLM_FUNC_QUALIFIER vec splatW(vec const& a) + { + return detail::convert_splat::value>::template call<3>(a); + } + + + // modf + template + GLM_FUNC_QUALIFIER genType modf(genType x, genType & i) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'modf' only accept floating-point inputs"); + return std::modf(x, &i); + } + + template + GLM_FUNC_QUALIFIER vec<1, T, Q> modf(vec<1, T, Q> const& x, vec<1, T, Q> & i) + { + return vec<1, T, Q>( + modf(x.x, i.x)); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> modf(vec<2, T, Q> const& x, vec<2, T, Q> & i) + { + return vec<2, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> modf(vec<3, T, Q> const& x, vec<3, T, Q> & i) + { + return vec<3, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> modf(vec<4, T, Q> const& x, vec<4, T, Q> & i) + { + return vec<4, T, Q>( + modf(x.x, i.x), + modf(x.y, i.y), + modf(x.z, i.z), + modf(x.w, i.w)); + } + + //// Only valid if (INT_MIN <= x-y <= INT_MAX) + //// min(x,y) + //r = y + ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + //// max(x,y) + //r = x - ((x - y) & ((x - y) >> (sizeof(int) * + //CHAR_BIT - 1))); + + // min + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return detail::compute_min_vector::value>::call(a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, vec const& b) + { + return detail::compute_min_vector::value>::call(a, b); + } + + // max + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return detail::compute_max_vector::value>::call(a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, vec const& b) + { + return detail::compute_max_vector::value>::call(a, b); + } + + // clamp + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return min(max(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return detail::compute_clamp_vector::value>::call(x, vec(minVal), vec(maxVal)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); + return detail::compute_clamp_vector::value>::call(x, minVal, maxVal); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genTypeT mix(genTypeT x, genTypeT y, genTypeU a) + { + return detail::compute_mix::call(x, y, a); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec mix(vec const& x, vec const& y, U a) + { + return detail::compute_mix_scalar::value>::call(x, y, a); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec mix(vec const& x, vec const& y, vec const& a) + { + return detail::compute_mix_vector::value>::call(x, y, a); + } + + // step + template + GLM_FUNC_QUALIFIER genType step(genType edge, genType x) + { + return mix(static_cast(1), static_cast(0), x < edge); + } + + template + GLM_FUNC_QUALIFIER vec step(T edge, vec const& x) + { + return detail::compute_step_vector::value>::call(vec(edge), x); + } + + template + GLM_FUNC_QUALIFIER vec step(vec const& edge, vec const& x) + { + return detail::compute_step_vector::value>::call(edge, x); + } + + // smoothstep + template + GLM_FUNC_QUALIFIER genType smoothstep(genType edge0, genType edge1, genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); + + genType const tmp(clamp((x - edge0) / (edge1 - edge0), genType(0), genType(1))); + return tmp * tmp * (genType(3) - genType(2) * tmp); + } + + template + GLM_FUNC_QUALIFIER vec smoothstep(T edge0, T edge1, vec const& x) + { + return detail::compute_smoothstep_vector::value>::call(vec(edge0), vec(edge1), x); + } + + template + GLM_FUNC_QUALIFIER vec smoothstep(vec const& edge0, vec const& edge1, vec const& x) + { + return detail::compute_smoothstep_vector::value>::call(edge0, edge1, x); + } + +# if GLM_HAS_CXX11_STL + using std::isnan; +# else + template + GLM_FUNC_QUALIFIER bool isnan(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::isnan(x); +# elif GLM_COMPILER & GLM_COMPILER_VC + return _isnan(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# if GLM_PLATFORM & GLM_PLATFORM_WINDOWS + return _isnan(x) != 0; +# else + return ::isnan(x) != 0; +# endif +# elif (GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)) && (GLM_PLATFORM & GLM_PLATFORM_ANDROID) && __cplusplus < 201103L + return _isnan(x) != 0; +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + return ::isnan(x) != 0; +# else + return std::isnan(x); +# endif + } +# endif + + template + GLM_FUNC_QUALIFIER vec isnan(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + + vec Result(0); + for (length_t l = 0; l < v.length(); ++l) + Result[l] = glm::isnan(v[l]); + return Result; + } + +# if GLM_HAS_CXX11_STL + using std::isinf; +# else + template + GLM_FUNC_QUALIFIER bool isinf(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::isinf(x); +# elif GLM_COMPILER & (GLM_COMPILER_INTEL | GLM_COMPILER_VC) +# if(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) + return _fpclass(x) == _FPCLASS_NINF || _fpclass(x) == _FPCLASS_PINF; +# else + return ::isinf(x); +# endif +# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) +# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID && __cplusplus < 201103L) + return _isinf(x) != 0; +# else + return std::isinf(x); +# endif +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) + // http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDA__MATH__DOUBLE_g13431dd2b40b51f9139cbb7f50c18fab.html#g13431dd2b40b51f9139cbb7f50c18fab + return ::isinf(double(x)) != 0; +# else + return std::isinf(x); +# endif + } +# endif + + template + GLM_FUNC_QUALIFIER vec isinf(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + + vec Result(0); + for (length_t l = 0; l < v.length(); ++l) + Result[l] = glm::isinf(v[l]); + return Result; + } + + GLM_FUNC_QUALIFIER int floatBitsToInt(float v) + { + union + { + float in; + int out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec floatBitsToInt(vec const& v) + { + return detail::functor1::call(floatBitsToInt, v); + } + + GLM_FUNC_QUALIFIER uint floatBitsToUint(float v) + { + union + { + float in; + uint out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec floatBitsToUint(vec const& v) + { + return detail::functor1::call(floatBitsToUint, v); + } + + GLM_FUNC_QUALIFIER float intBitsToFloat(int v) + { + union + { + int in; + float out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec intBitsToFloat(vec const& v) + { + return detail::functor1::call(intBitsToFloat, v); + } + + GLM_FUNC_QUALIFIER float uintBitsToFloat(uint v) + { + union + { + uint in; + float out; + } u; + + u.in = v; + + return u.out; + } + + template + GLM_FUNC_QUALIFIER vec uintBitsToFloat(vec const& v) + { + return reinterpret_cast&>(const_cast&>(v)); + } + +# if GLM_HAS_CXX11_STL + using std::fma; +# else + template + GLM_FUNC_QUALIFIER genType fma(genType const& a, genType const& b, genType const& c) + { + return a * b + c; + } +# endif + + template + GLM_FUNC_QUALIFIER genType frexp(genType x, int& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'frexp' only accept floating-point inputs"); + + return std::frexp(x, &exp); + } + + template + GLM_FUNC_QUALIFIER vec frexp(vec const& v, vec& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'frexp' only accept floating-point inputs"); + + vec Result(0); + for (length_t l = 0; l < v.length(); ++l) + Result[l] = std::frexp(v[l], &exp[l]); + return Result; + } + + template + GLM_FUNC_QUALIFIER genType ldexp(genType const& x, int const& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ldexp' only accept floating-point inputs"); + + return std::ldexp(x, exp); + } + + template + GLM_FUNC_QUALIFIER vec ldexp(vec const& v, vec const& exp) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ldexp' only accept floating-point inputs"); + + vec Result(0); + for (length_t l = 0; l < v.length(); ++l) + Result[l] = std::ldexp(v[l], exp[l]); + return Result; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_common_simd.inl" +#endif diff --git a/includes/glm/detail/func_common_simd.inl b/includes/glm/detail/func_common_simd.inl new file mode 100644 index 0000000..55c7228 --- /dev/null +++ b/includes/glm/detail/func_common_simd.inl @@ -0,0 +1,613 @@ +/// @ref core +/// @file glm/detail/func_common_simd.inl + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#include "../simd/common.h" + +#include + +namespace glm{ +namespace detail +{ + template + struct compute_abs_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_abs(v.data); + return result; + } + }; + + template + struct compute_abs_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) + { + vec<4, int, Q> result; + result.data = glm_ivec4_abs(v.data); + return result; + } + }; + + template + struct compute_floor<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_floor(v.data); + return result; + } + }; + + template + struct compute_ceil<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_ceil(v.data); + return result; + } + }; + + template + struct compute_fract<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_fract(v.data); + return result; + } + }; + + template + struct compute_round<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> result; + result.data = glm_vec4_round(v.data); + return result; + } + }; + + template + struct compute_mod<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { + vec<4, float, Q> result; + result.data = glm_vec4_mod(x.data, y.data); + return result; + } + }; + + template + struct compute_min_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + vec<4, float, Q> result; + result.data = _mm_min_ps(v1.data, v2.data); + return result; + } + }; + + template + struct compute_min_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + vec<4, int, Q> result; + result.data = _mm_min_epi32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_min_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + vec<4, uint, Q> result; + result.data = _mm_min_epu32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) + { + vec<4, float, Q> result; + result.data = _mm_max_ps(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) + { + vec<4, int, Q> result; + result.data = _mm_max_epi32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_max_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) + { + vec<4, uint, Q> result; + result.data = _mm_max_epu32(v1.data, v2.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& minVal, vec<4, float, Q> const& maxVal) + { + vec<4, float, Q> result; + result.data = _mm_min_ps(_mm_max_ps(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, int, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& x, vec<4, int, Q> const& minVal, vec<4, int, Q> const& maxVal) + { + vec<4, int, Q> result; + result.data = _mm_min_epi32(_mm_max_epi32(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_clamp_vector<4, uint, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& x, vec<4, uint, Q> const& minVal, vec<4, uint, Q> const& maxVal) + { + vec<4, uint, Q> result; + result.data = _mm_min_epu32(_mm_max_epu32(x.data, minVal.data), maxVal.data); + return result; + } + }; + + template + struct compute_mix_vector<4, float, bool, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y, vec<4, bool, Q> const& a) + { + __m128i const Load = _mm_set_epi32(-static_cast(a.w), -static_cast(a.z), -static_cast(a.y), -static_cast(a.x)); + __m128 const Mask = _mm_castsi128_ps(Load); + + vec<4, float, Q> Result; +# if 0 && GLM_ARCH & GLM_ARCH_AVX + Result.data = _mm_blendv_ps(x.data, y.data, Mask); +# else + Result.data = _mm_or_ps(_mm_and_ps(Mask, y.data), _mm_andnot_ps(Mask, x.data)); +# endif + return Result; + } + }; +/* FIXME + template + struct compute_step_vector + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge, vec<4, float, Q> const& x) + { + vec<4, float, Q> Result; + result.data = glm_vec4_step(edge.data, x.data); + return result; + } + }; +*/ + template + struct compute_smoothstep_vector<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge0, vec<4, float, Q> const& edge1, vec<4, float, Q> const& x) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_smoothstep(edge0.data, edge1.data, x.data); + return Result; + } + }; + + template + struct compute_fma<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b, vec<4, float, Q> const& c) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_fma(a.data, b.data, c.data); + return Result; + } + }; + + template + struct compute_fma<3, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b, vec<3, float, Q> const& c) + { + vec<3, float, Q> Result; + Result.data = glm_vec4_fma(a.data, b.data, c.data); + return Result; + } + }; + + + template + struct compute_fma<4, double, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b, vec<4, double, Q> const& c) + { + vec<4, double, Q> Result; +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) + Result.data = _mm256_fmadd_pd(a.data, b.data, c.data); +# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) + Result.data = _mm256_add_pd(_mm256_mul_pd(a.data, b.data), c.data); +# else + Result.data.setv(0, _mm_add_pd(_mm_mul_pd(a.data.getv(0), b.data.getv(0)), c.data.getv(0))); + Result.data.setv(1, _mm_add_pd(_mm_mul_pd(a.data.getv(1), b.data.getv(1)), c.data.getv(1))); +# endif + return Result; + } + }; + + // copy vec3 to vec4 and set w to 0 + template + struct convert_vec3_to_vec4W0 + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<3, float, Q> const& a) + { + vec<4, float, Q> v; +#if (GLM_ARCH & GLM_ARCH_SSE41_BIT) + v.data = _mm_blend_ps(a.data, _mm_setzero_ps(), 8); +#else + __m128i mask = _mm_set_epi32(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + __m128 v0 = _mm_castsi128_ps(_mm_and_si128(_mm_castps_si128(a.data), mask)); + v.data = v0; +#endif + return v; + } + }; + + // copy vec3 to vec4 and set w to 1 + template + struct convert_vec3_to_vec4W1 + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<3, float, Q> const& a) + { + vec<4, float, Q> v; +#if (GLM_ARCH & GLM_ARCH_SSE41_BIT) + v.data = _mm_blend_ps(a.data, _mm_set1_ps(1.0f), 8); +#else + __m128 t1 = _mm_shuffle_ps(a.data, a.data, _MM_SHUFFLE(0, 2, 1, 3)); //permute x, w + __m128 t2 = _mm_move_ss(t1, _mm_set_ss(1.0f)); // set x to 1.0f + v.data = _mm_shuffle_ps(t2, t2, _MM_SHUFFLE(0, 2, 1, 3)); //permute x, w +#endif + return v; + } + }; + + // copy vec3 to vec4 and set w to vec3.z + template + struct convert_vec3_to_vec4WZ + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<3, float, Q> const& a) + { + vec<4, float, Q> v; + v.data = _mm_shuffle_ps(a.data, a.data, _MM_SHUFFLE(2, 2, 1, 0)); + return v; + } + }; + + // copy vec3 to vec4 and set w to 0 + template + struct convert_vec3_to_vec4W0 + { + GLM_FUNC_QUALIFIER static vec<4, double, Q> call(vec<3, double, Q> const& a) + { + vec<4, double, Q> v; +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + v.data = _mm256_blend_pd(a.data, _mm256_setzero_pd(), 8); +#else + v.data.setv(0, a.data.getv(0)); + glm_dvec2 av2 = a.data.getv(1); + av2 = _mm_shuffle_pd(av2, _mm_setzero_pd(), 2); + v.data.setv(1, av2); +#endif + return v; + } + }; + + // copy vec3 to vec4 and set w to vec3.z + template + struct convert_vec3_to_vec4WZ + { + GLM_FUNC_QUALIFIER static vec<4, double, Q> call(vec<3, double, Q> const& a) + { + vec<4, double, Q> v; +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + v.data = _mm256_permute_pd(a.data, 2); +#else + v.data.setv(0, a.data.getv(0)); + glm_dvec2 av2 = a.data.getv(1); + __m128d t1 = _mm_shuffle_pd(av2, av2, 0); + v.data.setv(1, t1); +#endif + return v; + } + }; + + // copy vec3 to vec4 and set w to 1 + template + struct convert_vec3_to_vec4W1 + { + GLM_FUNC_QUALIFIER static vec<4, double, Q> call(vec<3, double, Q> const& a) + { + vec<4, double, Q> v; +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + v.data = _mm256_blend_pd(a.data, _mm256_set1_pd(1.0), 8); +#else + v.data.setv(0, a.data.getv(0)); + glm_dvec2 av2 = a.data.getv(1); + av2 = _mm_shuffle_pd(av2, _mm_set1_pd(1.), 2); + v.data.setv(1, av2); +#endif + return v; + } + }; + + template + struct convert_vec4_to_vec3 { + GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<4, float, Q> const& a) + { + vec<3, float, Q> v; + v.data = a.data; + return v; + } + }; + + template + struct convert_vec4_to_vec3 { + GLM_FUNC_QUALIFIER static vec<3, double, Q> call(vec<4, double, Q> const& a) + { + vec<3, double, Q> v; +#if GLM_ARCH & GLM_ARCH_AVX_BIT + v.data = a.data; +#else + v.data.setv(0, a.data.getv(0)); + v.data.setv(1, a.data.getv(1)); +#endif + return v; + } + }; + + + // set all coordinates to same value vec[c] + template + struct convert_splat { + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + const int s = _MM_SHUFFLE(c, c, c, c); + glm_f32vec4 va = static_cast(a.data); +# if GLM_ARCH & GLM_ARCH_AVX_BIT + Result.data = _mm_permute_ps(va, s); +# else + Result.data = _mm_shuffle_ps(va, va, s); +# endif + return Result; + } + }; + + // set all coordinates to same value vec[c] + template + struct convert_splat { + + template + struct detailSSE + {}; + + template + struct detailSSE + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + glm_f64vec2 r0 = _mm_shuffle_pd(a.data.getv(0), a.data.getv(0), c | c << 1); + Result.data.setv(0, r0); + Result.data.setv(1, r0); + return Result; + } + }; + + template + struct detailSSE + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + const unsigned int d = static_cast(c - 2); + glm_f64vec2 r0 = _mm_shuffle_pd(a.data.getv(1), a.data.getv(1), d | d << 1); + Result.data.setv(0, r0); + Result.data.setv(1, r0); + return Result; + } + }; + +#if GLM_ARCH & GLM_ARCH_AVX_BIT + template //note: bool is useless but needed to compil on linux (gcc) + struct detailAVX + {}; + + template + struct detailAVX + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + __m256d t1 = _mm256_permute2f128_pd(a.data, a.data, 0x0); + Result.data = _mm256_permute_pd(t1, 0); + return Result; + } + }; + + template + struct detailAVX + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + __m256d t1 = _mm256_permute2f128_pd(a.data, a.data, 0x0); + Result.data = _mm256_permute_pd(t1, 0xf); + return Result; + } + }; + + template + struct detailAVX + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + __m256d t2 = _mm256_permute2f128_pd(a.data, a.data, 0x11); + Result.data = _mm256_permute_pd(t2, 0x0); + return Result; + } + }; + + template + struct detailAVX + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + vec Result; + __m256d t2 = _mm256_permute2f128_pd(a.data, a.data, 0x11); + Result.data = _mm256_permute_pd(t2, 0xf); + return Result; + } + }; +#endif //GLM_ARCH & GLM_ARCH_AVX_BIT + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + { + //return compute_splat::call(a); + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + Result.data = _mm256_permute4x64_pd(a.data, _MM_SHUFFLE(c, c, c, c)); +# elif GLM_ARCH & GLM_ARCH_AVX_BIT + Result = detailAVX::call(a); +# else +#if 1 //detail<(c <= 1), c>::call2(a) is equivalent to following code but without if constexpr usage + Result = detailSSE<(c <= 1), c>::call(a); +#else + if constexpr (c <= 1) + { + glm_f64vec2 r0 = _mm_shuffle_pd(a.data.getv(0), a.data.getv(0), c | c << 1); + Result.data.setv(0, r0); + Result.data.setv(1, r0); + } + else + { + const unsigned int d = (unsigned int)(c - 2); + glm_f64vec2 r0 = _mm_shuffle_pd(a.data.getv(1), a.data.getv(1), d | d << 1); + Result.data.setv(0, r0); + Result.data.setv(1, r0); + } +#endif +# endif + return Result; + } + }; + + +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + +#if GLM_ARCH & GLM_ARCH_NEON_BIT +namespace glm { + namespace detail { + + template + struct convert_vec3_to_vec4W0 + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<3, float, Q> const& a) + { + vec<4, float, Q> v; + static const uint32x4_t mask = { 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; + v.data = vbslq_f32(mask, a.data, vdupq_n_f32(0)); + return v; + } + }; + + template + struct convert_vec4_to_vec3 { + GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<4, float, Q> const& a) + { + vec<3, float, Q> v; + v.data = a.data; + return v; + } + }; + + template + struct compute_splat { + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& a) + {} + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call<0>(vec const& a) + { + vec Result; + Result.data = vdupq_lane_f32(vget_low_f32(a.data), 0); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call<1>(vec const& a) + { + vec Result; + Result.data = vdupq_lane_f32(vget_low_f32(a.data), 1); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call<2>(vec const& a) + { + vec Result; + Result.data = vdupq_lane_f32(vget_high_f32(a.data), 0); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call<3>(vec const& a) + { + vec Result; + Result.data = vdupq_lane_f32(vget_high_f32(a.data), 1); + return Result; + } + }; + +}//namespace detail +}//namespace glm +#endif //GLM_ARCH & GLM_ARCH_NEON_BIT diff --git a/includes/glm/detail/func_exponential.inl b/includes/glm/detail/func_exponential.inl new file mode 100644 index 0000000..7b91f14 --- /dev/null +++ b/includes/glm/detail/func_exponential.inl @@ -0,0 +1,152 @@ +/// @ref core +/// @file glm/detail/func_exponential.inl + +#include "../vector_relational.hpp" +#include "_vectorize.hpp" +#include +#include +#include + +namespace glm{ +namespace detail +{ +# if GLM_HAS_CXX11_STL + using std::log2; +# else + template + GLM_FUNC_QUALIFIER genType log2(genType Value) + { + return std::log(Value) * static_cast(1.4426950408889634073599246810019); + } +# endif + + template + struct compute_log2 + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'log2' only accept floating-point inputs. Include for integer inputs."); + + return detail::functor1::call(log2, v); + } + }; + + template + struct compute_sqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(std::sqrt, x); + } + }; + + template + struct compute_inversesqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return static_cast(1) / sqrt(x); + } + }; + + template + struct compute_inversesqrt + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + vec tmp(x); + vec xhalf(tmp * 0.5f); + vec* p = reinterpret_cast*>(const_cast*>(&x)); + vec i = vec(0x5f375a86) - (*p >> vec(1)); + vec* ptmp = reinterpret_cast*>(&i); + tmp = *ptmp; + tmp = tmp * (1.5f - xhalf * tmp * tmp); + return tmp; + } + }; +}//namespace detail + + // pow + using std::pow; + template + GLM_FUNC_QUALIFIER vec pow(vec const& base, vec const& exponent) + { + return detail::functor2::call(pow, base, exponent); + } + + // exp + using std::exp; + template + GLM_FUNC_QUALIFIER vec exp(vec const& x) + { + return detail::functor1::call(exp, x); + } + + // log + using std::log; + template + GLM_FUNC_QUALIFIER vec log(vec const& x) + { + return detail::functor1::call(log, x); + } + +# if GLM_HAS_CXX11_STL + using std::exp2; +# else + //exp2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType exp2(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'exp2' only accept floating-point inputs"); + + return std::exp(static_cast(0.69314718055994530941723212145818) * x); + } +# endif + + template + GLM_FUNC_QUALIFIER vec exp2(vec const& x) + { + return detail::functor1::call(exp2, x); + } + + // log2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType log2(genType x) + { + return log2(vec<1, genType>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec log2(vec const& x) + { + return detail::compute_log2::is_iec559, detail::is_aligned::value>::call(x); + } + + // sqrt + using std::sqrt; + template + GLM_FUNC_QUALIFIER vec sqrt(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sqrt' only accept floating-point inputs"); + return detail::compute_sqrt::value>::call(x); + } + + // inversesqrt + template + GLM_FUNC_QUALIFIER genType inversesqrt(genType x) + { + return static_cast(1) / sqrt(x); + } + + template + GLM_FUNC_QUALIFIER vec inversesqrt(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'inversesqrt' only accept floating-point inputs"); + return detail::compute_inversesqrt::value>::call(x); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_exponential_simd.inl" +#endif + diff --git a/includes/glm/detail/func_exponential_simd.inl b/includes/glm/detail/func_exponential_simd.inl new file mode 100644 index 0000000..fb78951 --- /dev/null +++ b/includes/glm/detail/func_exponential_simd.inl @@ -0,0 +1,37 @@ +/// @ref core +/// @file glm/detail/func_exponential_simd.inl + +#include "../simd/exponential.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_sqrt<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> Result; + Result.data = _mm_sqrt_ps(v.data); + return Result; + } + }; + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + struct compute_sqrt<4, float, aligned_lowp, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& v) + { + vec<4, float, aligned_lowp> Result; + Result.data = glm_vec4_sqrt_lowp(v.data); + return Result; + } + }; +# endif +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/detail/func_geometric.inl b/includes/glm/detail/func_geometric.inl new file mode 100644 index 0000000..8f3cc77 --- /dev/null +++ b/includes/glm/detail/func_geometric.inl @@ -0,0 +1,259 @@ +#include "../exponential.hpp" +#include "../common.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_length + { + GLM_FUNC_QUALIFIER static T call(vec const& v) + { + return sqrt(dot(v, v)); + } + }; + + template + struct compute_distance + { + GLM_FUNC_QUALIFIER static T call(vec const& p0, vec const& p1) + { + return length(p1 - p0); + } + }; + + template + struct compute_dot{}; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<1, T, Q> const& a, vec<1, T, Q> const& b) + { + return a.x * b.x; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<2, T, Q> const& a, vec<2, T, Q> const& b) + { + vec<2, T, Q> tmp(a * b); + return tmp.x + tmp.y; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + vec<3, T, Q> tmp(a * b); + return tmp.x + tmp.y + tmp.z; + } + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) + { + // VS 17.7.4 generates longer assembly (~20 instructions vs 11 instructions) + #if defined(_MSC_VER) + return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; + #else + vec<4, T, Q> tmp(a * b); + return (tmp.x + tmp.y) + (tmp.z + tmp.w); + #endif + } + }; + + template + struct compute_cross + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, T, Q> call(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); + + return vec<3, T, Q>( + x.y * y.z - y.y * x.z, + x.z * y.x - y.z * x.x, + x.x * y.y - y.x * x.y); + } + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& x, vec<4, T, Q> const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); + + return vec<4, T, Q>( + x.y * y.z - y.y * x.z, + x.z * y.x - y.z * x.x, + x.x * y.y - y.x * x.y, + 0.0f); + } + }; + + template + struct compute_normalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return v * inversesqrt(dot(v, v)); + } + }; + + template + struct compute_faceforward + { + GLM_FUNC_QUALIFIER static vec call(vec const& N, vec const& I, vec const& Nref) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return dot(Nref, I) < static_cast(0) ? N : -N; + } + }; + + template + struct compute_reflect + { + GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N) + { + return I - N * dot(N, I) * static_cast(2); + } + }; + + template + struct compute_refract + { + GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N, T eta) + { + T const dotValue(dot(N, I)); + T const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); + vec const Result = + (k >= static_cast(0)) ? (eta * I - (eta * dotValue + std::sqrt(k)) * N) : vec(0); + return Result; + } + }; +}//namespace detail + + // length + template + GLM_FUNC_QUALIFIER genType length(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); + + return abs(x); + } + + template + GLM_FUNC_QUALIFIER T length(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); + + return detail::compute_length::value>::call(v); + } + + // distance + template + GLM_FUNC_QUALIFIER genType distance(genType const& p0, genType const& p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance' accepts only floating-point inputs"); + + return length(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T distance(vec const& p0, vec const& p1) + { + return detail::compute_distance::value>::call(p0, p1); + } + + // dot + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(T x, T y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); + return x * y; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(vec const& x, vec const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); + return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); + } + + // cross + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + return detail::compute_cross::value>::call(x, y); + } +/* + // normalize + template + GLM_FUNC_QUALIFIER genType normalize(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return x < genType(0) ? genType(-1) : genType(1); + } +*/ + template + GLM_FUNC_QUALIFIER vec normalize(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); + + return detail::compute_normalize::value>::call(x); + } + + // faceforward + template + GLM_FUNC_QUALIFIER genType faceforward(genType const& N, genType const& I, genType const& Nref) + { + return dot(Nref, I) < static_cast(0) ? N : -N; + } + + template + GLM_FUNC_QUALIFIER vec faceforward(vec const& N, vec const& I, vec const& Nref) + { + return detail::compute_faceforward::value>::call(N, I, Nref); + } + + // reflect + template + GLM_FUNC_QUALIFIER genType reflect(genType const& I, genType const& N) + { + return I - N * dot(N, I) * genType(2); + } + + template + GLM_FUNC_QUALIFIER vec reflect(vec const& I, vec const& N) + { + return detail::compute_reflect::value>::call(I, N); + } + + // refract + template + GLM_FUNC_QUALIFIER genType refract(genType const& I, genType const& N, genType eta) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); + genType const dotValue(dot(N, I)); + genType const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); + return (eta * I - (eta * dotValue + sqrt(k)) * N) * static_cast(k >= static_cast(0)); + } + + template + GLM_FUNC_QUALIFIER vec refract(vec const& I, vec const& N, T eta) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); + return detail::compute_refract::value>::call(I, N, eta); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_geometric_simd.inl" +#endif diff --git a/includes/glm/detail/func_geometric_simd.inl b/includes/glm/detail/func_geometric_simd.inl new file mode 100644 index 0000000..4c7f56b --- /dev/null +++ b/includes/glm/detail/func_geometric_simd.inl @@ -0,0 +1,181 @@ +/// @ref core +/// @file glm/detail/func_geometric_simd.inl + +#include "../simd/geometric.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_length<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) + { + return _mm_cvtss_f32(glm_vec4_length(v.data)); + } + }; + + template + struct compute_distance<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) + { + return _mm_cvtss_f32(glm_vec4_distance(p0.data, p1.data)); + } + }; + + template + struct compute_dot, float, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { + return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); + } + }; + + template + struct compute_dot, float, true> + { + GLM_FUNC_QUALIFIER static float call(vec<3, float, Q> const& a, vec<3, float, Q> const& b) + { + vec<4, float, Q> aa = xyz0(a); + vec<4, float, Q> bb = xyz0(b); + return _mm_cvtss_f32(glm_vec1_dot(aa.data, bb.data)); + } + }; + + template + struct compute_cross + { + GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b) + { + vec<4, float, Q> aa = xyzz(a); + vec<4, float, Q> bb = xyzz(b); + __m128 const xpd0 = glm_vec4_cross(aa.data, bb.data); + + vec<3, float, Q> Result; + Result.data = xpd0; + return Result; + } + + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_cross(a.data, b.data); + return Result; + } + }; + + template + struct compute_normalize<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_normalize(v.data); + return Result; + } + }; + + template + struct compute_faceforward<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& N, vec<4, float, Q> const& I, vec<4, float, Q> const& Nref) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_faceforward(N.data, I.data, Nref.data); + return Result; + } + }; + + template + struct compute_reflect<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_reflect(I.data, N.data); + return Result; + } + }; + + template + struct compute_refract<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N, float eta) + { + vec<4, float, Q> Result; + Result.data = glm_vec4_refract(I.data, N.data, _mm_set1_ps(eta)); + return Result; + } + }; +}//namespace detail +}//namespace glm + +#elif GLM_ARCH & GLM_ARCH_NEON_BIT +namespace glm{ +namespace detail +{ + template + struct compute_length<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) + { + return sqrt(compute_dot, float, true>::call(v, v)); + } + }; + + template + struct compute_distance<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) + { + return compute_length<4, float, Q, true>::call(p1 - p0); + } + }; + + + template + struct compute_dot, float, true> + { + GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) + { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + float32x4_t v = vmulq_f32(x.data, y.data); + return vaddvq_f32(v); +#else // Armv7a with Neon + float32x4_t p = vmulq_f32(x.data, y.data); + float32x2_t v = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); + v = vpadd_f32(v, v); + return vget_lane_f32(v, 0); +#endif + } + }; + + template + struct compute_normalize<4, float, Q, true> + { + GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) + { + float32x4_t p = vmulq_f32(v.data, v.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + p = vpaddq_f32(p, p); + p = vpaddq_f32(p, p); +#else + float32x2_t t = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); + t = vpadd_f32(t, t); + p = vcombine_f32(t, t); +#endif + + float32x4_t vd = vrsqrteq_f32(p); + vec<4, float, Q> Result; + Result.data = vmulq_f32(v.data, vd); + return Result; + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/detail/func_integer.inl b/includes/glm/detail/func_integer.inl new file mode 100644 index 0000000..67177a0 --- /dev/null +++ b/includes/glm/detail/func_integer.inl @@ -0,0 +1,392 @@ +/// @ref core + +#include "_vectorize.hpp" +#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) +# include +# pragma intrinsic(_BitScanReverse) +#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) +#include + +#if !GLM_HAS_EXTENDED_INTEGER_TYPE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wlong-long" +# endif +# if (GLM_COMPILER & GLM_COMPILER_CLANG) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wc++11-long-long" +# endif +#endif + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER T mask(T Bits) + { + return Bits >= static_cast(sizeof(T) * 8) ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); + } + + template + struct compute_bitfieldReverseStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) + { + return v; + } + }; + + template + struct compute_bitfieldReverseStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) + { + return (v & Mask) << Shift | (v & (~Mask)) >> Shift; + } + }; + + template + struct compute_bitfieldBitCountStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) + { + return v; + } + }; + + template + struct compute_bitfieldBitCountStep + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) + { + return (v & Mask) + ((v >> Shift) & Mask); + } + }; + + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + if(Value == 0) + return -1; + + return glm::bitCount(~Value & (Value - static_cast(1))); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + struct compute_findLSB + { + GLM_FUNC_QUALIFIER static int call(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS + + template + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, T Shift) + { + return x | (x >> Shift); + } + }; + + template + struct compute_findMSB_step_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x, T) + { + return x; + } + }; + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + vec x(v); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); + x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); + x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); + x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); + x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); + return vec(sizeof(T) * 8 - 1) - glm::bitCount(~x); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(compute_findMSB_32, x); + } + }; + +# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) + template + GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) + { + unsigned long Result(0); + unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); + return IsNotNull ? int(Result) : -1; + } + + template + struct compute_findMSB_vec + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + return detail::functor1::call(compute_findMSB_64, x); + } + }; +# endif +# endif//GLM_HAS_BITSCAN_WINDOWS +}//namespace detail + + // uaddCarry + GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry) + { + detail::uint64 const Value64(static_cast(x) + static_cast(y)); + detail::uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = Value64 > Max32 ? 1u : 0u; + return static_cast(Value64 % (Max32 + static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER vec uaddCarry(vec const& x, vec const& y, vec& Carry) + { + vec Value64(vec(x) + vec(y)); + vec Max32((static_cast(1) << static_cast(32)) - static_cast(1)); + Carry = mix(vec(0), vec(1), greaterThan(Value64, Max32)); + return vec(Value64 % (Max32 + static_cast(1))); + } + + // usubBorrow + GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow) + { + Borrow = x >= y ? static_cast(0) : static_cast(1); + if(y >= x) + return y - x; + else + return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x))); + } + + template + GLM_FUNC_QUALIFIER vec usubBorrow(vec const& x, vec const& y, vec& Borrow) + { + Borrow = mix(vec(1), vec(0), greaterThanEqual(x, y)); + vec const YgeX(y - x); + vec const XgeY(vec((static_cast(1) << static_cast(32)) + (vec(y) - vec(x)))); + return mix(XgeY, YgeX, greaterThanEqual(y, x)); + } + + // umulExtended + GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb) + { + detail::uint64 Value64 = static_cast(x) * static_cast(y); + msb = static_cast(Value64 >> static_cast(32)); + lsb = static_cast(Value64); + } + + template + GLM_FUNC_QUALIFIER void umulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) + { + vec Value64(vec(x) * vec(y)); + msb = vec(Value64 >> static_cast(32)); + lsb = vec(Value64); + } + + // imulExtended + GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb) + { + detail::int64 Value64 = static_cast(x) * static_cast(y); + msb = static_cast(Value64 >> static_cast(32)); + lsb = static_cast(Value64); + } + + template + GLM_FUNC_QUALIFIER void imulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) + { + vec Value64(vec(x) * vec(y)); + lsb = vec(Value64 & static_cast(0xFFFFFFFF)); + msb = vec((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); + } + + // bitfieldExtract + template + GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) + { + return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldExtract(vec const& Value, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); + + return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); + } + + // bitfieldInsert + template + GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); + + return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldInsert(vec const& Base, vec const& Insert, int Offset, int Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); + + T const Mask = detail::mask(static_cast(Bits)) << Offset; + return (Base & ~Mask) | ((Insert << static_cast(Offset)) & Mask); + } + +#if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4309) +#endif + + // bitfieldReverse + template + GLM_FUNC_QUALIFIER genIUType bitfieldReverse(genIUType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); + + return bitfieldReverse(glm::vec<1, genIUType, glm::defaultp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec bitfieldReverse(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); + + vec x(v); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 2>::call(x, static_cast(0x5555555555555555ull), static_cast( 1)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 4>::call(x, static_cast(0x3333333333333333ull), static_cast( 2)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 8>::call(x, static_cast(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 16>::call(x, static_cast(0x00FF00FF00FF00FFull), static_cast( 8)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 32>::call(x, static_cast(0x0000FFFF0000FFFFull), static_cast(16)); + x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 64>::call(x, static_cast(0x00000000FFFFFFFFull), static_cast(32)); + return x; + } + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif + + // bitCount + template + GLM_FUNC_QUALIFIER int bitCount(genIUType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); + + return bitCount(glm::vec<1, genIUType, glm::defaultp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec bitCount(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4310) //cast truncates constant value +# endif + + vec::type, Q> x(v); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); + x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); + return vec(x); + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif + } + + // findLSB + template + GLM_FUNC_QUALIFIER int findLSB(genIUType Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::compute_findLSB::call(Value); + } + + template + GLM_FUNC_QUALIFIER vec findLSB(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); + + return detail::functor1::call(findLSB, x); + } + + // findMSB + template + GLM_FUNC_QUALIFIER int findMSB(genIUType v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return findMSB(vec<1, genIUType>(v)).x; + } + + template + GLM_FUNC_QUALIFIER vec findMSB(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); + + return detail::compute_findMSB_vec(sizeof(T) * 8)>::call(v); + } +}//namespace glm + +#if !GLM_HAS_EXTENDED_INTEGER_TYPE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# endif +# if (GLM_COMPILER & GLM_COMPILER_CLANG) +# pragma clang diagnostic pop +# endif +#endif + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_integer_simd.inl" +#endif + diff --git a/includes/glm/detail/func_integer_simd.inl b/includes/glm/detail/func_integer_simd.inl new file mode 100644 index 0000000..5600c84 --- /dev/null +++ b/includes/glm/detail/func_integer_simd.inl @@ -0,0 +1,65 @@ +#include "../simd/integer.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_bitfieldReverseStep<4, uint, Q, true, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) + { + __m128i const set0 = v.data; + + __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); + __m128i const and1 = _mm_and_si128(set0, set1); + __m128i const sft1 = _mm_slli_epi32(and1, static_cast(Shift)); + + __m128i const set2 = _mm_andnot_si128(set0, _mm_set1_epi32(-1)); + __m128i const and2 = _mm_and_si128(set0, set2); + __m128i const sft2 = _mm_srai_epi32(and2, static_cast(Shift)); + + __m128i const or0 = _mm_or_si128(sft1, sft2); + + return or0; + } + }; + + template + struct compute_bitfieldBitCountStep<4, uint, Q, true, true> + { + GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) + { + __m128i const set0 = v.data; + + __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); + __m128i const and0 = _mm_and_si128(set0, set1); + __m128i const sft0 = _mm_slli_epi32(set0, static_cast(Shift)); + __m128i const and1 = _mm_and_si128(sft0, set1); + __m128i const add0 = _mm_add_epi32(and0, and1); + + return add0; + } + }; +}//namespace detail + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template<> + GLM_FUNC_QUALIFIER int bitCount(uint x) + { + return _mm_popcnt_u32(x); + } + +# if(GLM_MODEL == GLM_MODEL_64) + template<> + GLM_FUNC_QUALIFIER int bitCount(detail::uint64 x) + { + return static_cast(_mm_popcnt_u64(x)); + } +# endif//GLM_MODEL +# endif//GLM_ARCH + +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/detail/func_matrix.inl b/includes/glm/detail/func_matrix.inl new file mode 100644 index 0000000..9c7f7eb --- /dev/null +++ b/includes/glm/detail/func_matrix.inl @@ -0,0 +1,484 @@ +#include "../geometric.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct compute_matrixCompMult + { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + mat Result(1); + for(length_t i = 0; i < Result.length(); ++i) + Result[i] = x[i] * y[i]; + return Result; + } + }; + + template + struct compute_matrixCompMult_type { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'matrixCompMult' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_matrixCompMult::value>::call(x, y); + } + }; + + template + struct compute_outerProduct { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + typename detail::outerProduct_trait::type m(0); + for(length_t i = 0; i < m.length(); ++i) + m[i] = c * r[i]; + return m; + } + }; + + template + struct compute_outerProduct_type { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'outerProduct' only accept floating-point inputs, include to discard this restriction."); + + return detail::compute_outerProduct::call(c, r); + } + }; + + template + struct compute_transpose{}; + + template + struct compute_transpose<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) + { + mat<2, 2, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + return Result; + } + }; + + template + struct compute_transpose<2, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 2, T, Q> call(mat<2, 3, T, Q> const& m) + { + mat<3,2, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + return Result; + } + }; + + template + struct compute_transpose<2, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 2, T, Q> call(mat<2, 4, T, Q> const& m) + { + mat<4, 2, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + return Result; + } + }; + + template + struct compute_transpose<3, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 3, T, Q> call(mat<3, 2, T, Q> const& m) + { + mat<2, 3, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + return Result; + } + }; + + template + struct compute_transpose<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + mat<3, 3, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + return Result; + } + }; + + template + struct compute_transpose<3, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 3, T, Q> call(mat<3, 4, T, Q> const& m) + { + mat<4, 3, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + return Result; + } + }; + + template + struct compute_transpose<4, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 4, T, Q> call(mat<4, 2, T, Q> const& m) + { + mat<2, 4, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + return Result; + } + }; + + template + struct compute_transpose<4, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 4, T, Q> call(mat<4, 3, T, Q> const& m) + { + mat<3, 4, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + return Result; + } + }; + + template + struct compute_transpose<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) + { + mat<4, 4, T, Q> Result(1); + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + Result[3][3] = m[3][3]; + return Result; + } + }; + + template + struct compute_transpose_type { + GLM_FUNC_QUALIFIER static mat call(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'transpose' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_transpose::value>::call(m); + } + }; + + template + struct compute_determinant{}; + + template + struct compute_determinant<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<2, 2, T, Q> const& m) + { + return m[0][0] * m[1][1] - m[1][0] * m[0][1]; + } + }; + + template + struct compute_determinant<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<3, 3, T, Q> const& m) + { + return + + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) + - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) + + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]); + } + }; + + template + struct compute_determinant<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static T call(mat<4, 4, T, Q> const& m) + { + T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + vec<4, T, Q> DetCof( + + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + return + m[0][0] * DetCof[0] + m[0][1] * DetCof[1] + + m[0][2] * DetCof[2] + m[0][3] * DetCof[3]; + } + }; + + template + struct compute_determinant_type{ + + GLM_FUNC_QUALIFIER static T call(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, + "'determinant' only accept floating-point inputs, include to discard this restriction."); + return detail::compute_determinant::value>::call(m); + } + }; + + template + struct compute_inverse{}; + + template + struct compute_inverse<2, 2, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) + { + T OneOverDeterminant = static_cast(1) / ( + + m[0][0] * m[1][1] + - m[1][0] * m[0][1]); + + mat<2, 2, T, Q> Inverse( + + m[1][1] * OneOverDeterminant, + - m[0][1] * OneOverDeterminant, + - m[1][0] * OneOverDeterminant, + + m[0][0] * OneOverDeterminant); + + return Inverse; + } + }; + + template + struct inv3x3 {}; + + template + struct inv3x3 + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + // see: https://www.onlinemathstutor.org/post/3x3_inverses + + vec<4, T, Q> a = xyz0(m[0]); + vec<4, T, Q> b = xyz0(m[1]); + vec<4, T, Q> c = xyz0(m[2]); + + vec<4, T, Q> i0 = compute_cross::call(b, c); + vec<4, T, Q> i1 = compute_cross::call(c, a); + vec<4, T, Q> i2 = compute_cross::call(a, b); + + mat<3, 3, T, Q> Inverse; + Inverse[0] = xyz(i0); + Inverse[1] = xyz(i1); + Inverse[2] = xyz(i2); + Inverse = transpose(Inverse); + + T Determinant = compute_dot, T, true>::call(a, compute_cross::call(b, c)); + vec<3, T, Q> OneOverDeterminant(static_cast(1) / Determinant); + Inverse *= OneOverDeterminant; + return Inverse; + } + }; + + template + struct inv3x3 + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + T OneOverDeterminant = static_cast(1) / ( + +m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) + - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) + + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])); + + mat<3, 3, T, Q> Inverse; + Inverse[0][0] = +(m[1][1] * m[2][2] - m[2][1] * m[1][2]); + Inverse[1][0] = -(m[1][0] * m[2][2] - m[2][0] * m[1][2]); + Inverse[2][0] = +(m[1][0] * m[2][1] - m[2][0] * m[1][1]); + Inverse[0][1] = -(m[0][1] * m[2][2] - m[2][1] * m[0][2]); + Inverse[1][1] = +(m[0][0] * m[2][2] - m[2][0] * m[0][2]); + Inverse[2][1] = -(m[0][0] * m[2][1] - m[2][0] * m[0][1]); + Inverse[0][2] = +(m[0][1] * m[1][2] - m[1][1] * m[0][2]); + Inverse[1][2] = -(m[0][0] * m[1][2] - m[1][0] * m[0][2]); + Inverse[2][2] = +(m[0][0] * m[1][1] - m[1][0] * m[0][1]); + + Inverse *= OneOverDeterminant; + return Inverse; + } + }; + + template + struct compute_inverse<3, 3, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) + { + return detail::inv3x3::value>::call(m); + } + }; + + template + struct compute_inverse<4, 4, T, Q, Aligned> + { + GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) + { + T Coef00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T Coef02 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + T Coef03 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + T Coef04 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T Coef06 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + T Coef07 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + T Coef08 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T Coef10 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + T Coef11 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + T Coef12 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T Coef14 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + T Coef15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + T Coef16 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T Coef18 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + T Coef19 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + T Coef20 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + T Coef22 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + T Coef23 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + vec<4, T, Q> Fac0(Coef00, Coef00, Coef02, Coef03); + vec<4, T, Q> Fac1(Coef04, Coef04, Coef06, Coef07); + vec<4, T, Q> Fac2(Coef08, Coef08, Coef10, Coef11); + vec<4, T, Q> Fac3(Coef12, Coef12, Coef14, Coef15); + vec<4, T, Q> Fac4(Coef16, Coef16, Coef18, Coef19); + vec<4, T, Q> Fac5(Coef20, Coef20, Coef22, Coef23); + + vec<4, T, Q> Vec0(m[1][0], m[0][0], m[0][0], m[0][0]); + vec<4, T, Q> Vec1(m[1][1], m[0][1], m[0][1], m[0][1]); + vec<4, T, Q> Vec2(m[1][2], m[0][2], m[0][2], m[0][2]); + vec<4, T, Q> Vec3(m[1][3], m[0][3], m[0][3], m[0][3]); + + vec<4, T, Q> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2); + vec<4, T, Q> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4); + vec<4, T, Q> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5); + vec<4, T, Q> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5); + + vec<4, T, Q> SignA(+1, -1, +1, -1); + vec<4, T, Q> SignB(-1, +1, -1, +1); + mat<4, 4, T, Q> Inverse(Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB); + + vec<4, T, Q> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]); + + vec<4, T, Q> Dot0(m[0] * Row0); + T Dot1 = (Dot0.x + Dot0.y) + (Dot0.z + Dot0.w); + + T OneOverDeterminant = static_cast(1) / Dot1; + + return Inverse * OneOverDeterminant; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER mat matrixCompMult(mat const& x, mat const& y) + { + return detail::compute_matrixCompMult_type::is_iec559, detail::is_aligned::value>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r) + { + return detail::compute_outerProduct_type::is_iec559>::call(c, r); + } + + template + GLM_FUNC_QUALIFIER typename mat::transpose_type transpose(mat const& m) + { + return detail::compute_transpose_type::is_iec559, detail::is_aligned::value>::call(m); + } + + template + GLM_FUNC_QUALIFIER T determinant(mat const& m) + { + return detail::compute_determinant_type::is_iec559, detail::is_aligned::value>::call(m); + } + + template + GLM_FUNC_QUALIFIER mat inverse(mat const& m) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'inverse' only accept floating-point inputs"); + return detail::compute_inverse::value>::call(m); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_matrix_simd.inl" +#endif + diff --git a/includes/glm/detail/func_matrix_simd.inl b/includes/glm/detail/func_matrix_simd.inl new file mode 100644 index 0000000..9e47bf7 --- /dev/null +++ b/includes/glm/detail/func_matrix_simd.inl @@ -0,0 +1,263 @@ +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#include "type_mat4x4.hpp" +#include "../geometric.hpp" +#include "../simd/matrix.h" +#include + +namespace glm{ +namespace detail +{ +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template + struct compute_matrixCompMult<4, 4, float, Q, true> + { + GLM_STATIC_ASSERT(detail::is_aligned::value, "Specialization requires aligned"); + + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y) + { + mat<4, 4, float, Q> Result; + glm_mat4_matrixCompMult( + &x[0].data, + &y[0].data, + &Result[0].data); + return Result; + } + }; +# endif + + template + struct compute_transpose<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + mat<4, 4, float, Q> Result; + glm_mat4_transpose(&m[0].data, &Result[0].data); + return Result; + } + }; + + template + struct compute_transpose<3, 3, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<3, 3, float, Q> call(mat<3, 3, float, Q> const& m) + { + mat<3, 3, float, Q> Result; + glm_mat3_transpose(&m[0].data, &Result[0].data); + return Result; + } + }; + + template + struct compute_determinant<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static float call(mat<4, 4, float, Q> const& m) + { + return _mm_cvtss_f32(glm_mat4_determinant(&m[0].data)); + } + }; + + template + struct compute_inverse<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + mat<4, 4, float, Q> Result; + glm_mat4_inverse(&m[0].data, &Result[0].data); + return Result; + } + }; +}//namespace detail + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_lowp> outerProduct<4, 4, float, aligned_lowp>(vec<4, float, aligned_lowp> const& c, vec<4, float, aligned_lowp> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_lowp> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_mediump> outerProduct<4, 4, float, aligned_mediump>(vec<4, float, aligned_mediump> const& c, vec<4, float, aligned_mediump> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_mediump> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } + + template<> + GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_highp> outerProduct<4, 4, float, aligned_highp>(vec<4, float, aligned_highp> const& c, vec<4, float, aligned_highp> const& r) + { + __m128 NativeResult[4]; + glm_mat4_outerProduct(c.data, r.data, NativeResult); + mat<4, 4, float, aligned_highp> Result; + std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); + return Result; + } +# endif +}//namespace glm + +#elif GLM_ARCH & GLM_ARCH_NEON_BIT + +namespace glm { +#if GLM_LANG & GLM_LANG_CXX11_FLAG + template + GLM_FUNC_QUALIFIER + typename std::enable_if::value, mat<4, 4, float, Q>>::type + operator*(mat<4, 4, float, Q> const & m1, mat<4, 4, float, Q> const & m2) + { + auto MulRow = [&](int l) { + float32x4_t const SrcA = m2[l].data; + + float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0); + r = neon::madd_lane(r, m1[1].data, SrcA, 1); + r = neon::madd_lane(r, m1[2].data, SrcA, 2); + r = neon::madd_lane(r, m1[3].data, SrcA, 3); + + return r; + }; + + mat<4, 4, float, aligned_highp> Result; + Result[0].data = MulRow(0); + Result[1].data = MulRow(1); + Result[2].data = MulRow(2); + Result[3].data = MulRow(3); + + return Result; + } +#endif // CXX11 + +namespace detail +{ + template + struct compute_inverse<4, 4, float, Q, true> + { + GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) + { + float32x4_t const& m0 = m[0].data; + float32x4_t const& m1 = m[1].data; + float32x4_t const& m2 = m[2].data; + float32x4_t const& m3 = m[3].data; + + // m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + float32x4_t Fac0; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac0 = w0 * w1 - w2 * w3; + } + + // m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + float32x4_t Fac1; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac1 = w0 * w1 - w2 * w3; + } + + // m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + float32x4_t Fac2; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + Fac2 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + float32x4_t Fac3; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); + Fac3 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + float32x4_t Fac4; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); + Fac4 = w0 * w1 - w2 * w3; + } + + // m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + float32x4_t Fac5; + { + float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); + float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); + float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); + float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); + Fac5 = w0 * w1 - w2 * w3; + } + + float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]); + float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]); + float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]); + float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]); + + float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2; + float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4; + float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5; + float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5; + + float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0; + float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1; + float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2; + float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3; + + float32x4_t det = neon::mul_lane(r0, m0, 0); + det = neon::madd_lane(det, r1, m0, 1); + det = neon::madd_lane(det, r2, m0, 2); + det = neon::madd_lane(det, r3, m0, 3); + + float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0)); + + mat<4, 4, float, Q> r; + r[0].data = vmulq_f32(r0, rdet); + r[1].data = vmulq_f32(r1, rdet); + r[2].data = vmulq_f32(r2, rdet); + r[3].data = vmulq_f32(r3, rdet); + return r; + } + }; +}//namespace detail +}//namespace glm +#endif diff --git a/includes/glm/detail/func_packing.inl b/includes/glm/detail/func_packing.inl new file mode 100644 index 0000000..234b093 --- /dev/null +++ b/includes/glm/detail/func_packing.inl @@ -0,0 +1,189 @@ +/// @ref core +/// @file glm/detail/func_packing.inl + +#include "../common.hpp" +#include "type_half.hpp" + +namespace glm +{ + GLM_FUNC_QUALIFIER uint packUnorm2x16(vec2 const& v) + { + union + { + unsigned short in[2]; + uint out; + } u; + + vec<2, unsigned short, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 65535.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x16(uint p) + { + union + { + uint in; + unsigned short out[2]; + } u; + + u.in = p; + + return vec2(u.out[0], u.out[1]) * 1.5259021896696421759365224689097e-5f; + } + + GLM_FUNC_QUALIFIER uint packSnorm2x16(vec2 const& v) + { + union + { + signed short in[2]; + uint out; + } u; + + vec<2, short, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 32767.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackSnorm2x16(uint p) + { + union + { + uint in; + signed short out[2]; + } u; + + u.in = p; + + return clamp(vec2(u.out[0], u.out[1]) * 3.0518509475997192297128208258309e-5f, -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint packUnorm4x8(vec4 const& v) + { + union + { + unsigned char in[4]; + uint out; + } u; + + vec<4, unsigned char, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + u.in[2] = result[2]; + u.in[3] = result[3]; + + return u.out; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x8(uint p) + { + union + { + uint in; + unsigned char out[4]; + } u; + + u.in = p; + + return vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0039215686274509803921568627451f; + } + + GLM_FUNC_QUALIFIER uint packSnorm4x8(vec4 const& v) + { + union + { + signed char in[4]; + uint out; + } u; + + vec<4, signed char, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 127.0f)); + + u.in[0] = result[0]; + u.in[1] = result[1]; + u.in[2] = result[2]; + u.in[3] = result[3]; + + return u.out; + } + + GLM_FUNC_QUALIFIER glm::vec4 unpackSnorm4x8(uint p) + { + union + { + uint in; + signed char out[4]; + } u; + + u.in = p; + + return clamp(vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0078740157480315f, -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER double packDouble2x32(uvec2 const& v) + { + union + { + uint in[2]; + double out; + } u; + + u.in[0] = v[0]; + u.in[1] = v[1]; + + return u.out; + } + + GLM_FUNC_QUALIFIER uvec2 unpackDouble2x32(double v) + { + union + { + double in; + uint out[2]; + } u; + + u.in = v; + + return uvec2(u.out[0], u.out[1]); + } + + GLM_FUNC_QUALIFIER uint packHalf2x16(vec2 const& v) + { + union + { + signed short in[2]; + uint out; + } u; + + u.in[0] = detail::toFloat16(v.x); + u.in[1] = detail::toFloat16(v.y); + + return u.out; + } + + GLM_FUNC_QUALIFIER vec2 unpackHalf2x16(uint v) + { + union + { + uint in; + signed short out[2]; + } u; + + u.in = v; + + return vec2( + detail::toFloat32(u.out[0]), + detail::toFloat32(u.out[1])); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_packing_simd.inl" +#endif + diff --git a/includes/glm/detail/func_packing_simd.inl b/includes/glm/detail/func_packing_simd.inl new file mode 100644 index 0000000..fd0fe8b --- /dev/null +++ b/includes/glm/detail/func_packing_simd.inl @@ -0,0 +1,6 @@ +namespace glm{ +namespace detail +{ + +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/func_trigonometric.inl b/includes/glm/detail/func_trigonometric.inl new file mode 100644 index 0000000..9e6d9cf --- /dev/null +++ b/includes/glm/detail/func_trigonometric.inl @@ -0,0 +1,197 @@ +#include "_vectorize.hpp" +#include +#include + +namespace glm +{ + // radians + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType radians(genType degrees) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'radians' only accept floating-point input"); + + return degrees * static_cast(0.01745329251994329576923690768489); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec radians(vec const& v) + { + return detail::functor1::call(radians, v); + } + + // degrees + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType degrees(genType radians) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'degrees' only accept floating-point input"); + + return radians * static_cast(57.295779513082320876798154814105); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec degrees(vec const& v) + { + return detail::functor1::call(degrees, v); + } + + // sin + using ::std::sin; + + template + GLM_FUNC_QUALIFIER vec sin(vec const& v) + { + return detail::functor1::call(sin, v); + } + + // cos + using std::cos; + + template + GLM_FUNC_QUALIFIER vec cos(vec const& v) + { + return detail::functor1::call(cos, v); + } + + // tan + using std::tan; + + template + GLM_FUNC_QUALIFIER vec tan(vec const& v) + { + return detail::functor1::call(tan, v); + } + + // asin + using std::asin; + + template + GLM_FUNC_QUALIFIER vec asin(vec const& v) + { + return detail::functor1::call(asin, v); + } + + // acos + using std::acos; + + template + GLM_FUNC_QUALIFIER vec acos(vec const& v) + { + return detail::functor1::call(acos, v); + } + + // atan + template + GLM_FUNC_QUALIFIER genType atan(genType y, genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'atan' only accept floating-point input"); + + return ::std::atan2(y, x); + } + + template + GLM_FUNC_QUALIFIER vec atan(vec const& y, vec const& x) + { + return detail::functor2::call(::std::atan2, y, x); + } + + using std::atan; + + template + GLM_FUNC_QUALIFIER vec atan(vec const& v) + { + return detail::functor1::call(atan, v); + } + + // sinh + using std::sinh; + + template + GLM_FUNC_QUALIFIER vec sinh(vec const& v) + { + return detail::functor1::call(sinh, v); + } + + // cosh + using std::cosh; + + template + GLM_FUNC_QUALIFIER vec cosh(vec const& v) + { + return detail::functor1::call(cosh, v); + } + + // tanh + using std::tanh; + + template + GLM_FUNC_QUALIFIER vec tanh(vec const& v) + { + return detail::functor1::call(tanh, v); + } + + // asinh +# if GLM_HAS_CXX11_STL + using std::asinh; +# else + template + GLM_FUNC_QUALIFIER genType asinh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asinh' only accept floating-point input"); + + return (x < static_cast(0) ? static_cast(-1) : (x > static_cast(0) ? static_cast(1) : static_cast(0))) * log(std::abs(x) + sqrt(static_cast(1) + x * x)); + } +# endif + + template + GLM_FUNC_QUALIFIER vec asinh(vec const& v) + { + return detail::functor1::call(asinh, v); + } + + // acosh +# if GLM_HAS_CXX11_STL + using std::acosh; +# else + template + GLM_FUNC_QUALIFIER genType acosh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acosh' only accept floating-point input"); + + if(x < static_cast(1)) + return static_cast(0); + return log(x + sqrt(x * x - static_cast(1))); + } +# endif + + template + GLM_FUNC_QUALIFIER vec acosh(vec const& v) + { + return detail::functor1::call(acosh, v); + } + + // atanh +# if GLM_HAS_CXX11_STL + using std::atanh; +# else + template + GLM_FUNC_QUALIFIER genType atanh(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'atanh' only accept floating-point input"); + + if(std::abs(x) >= static_cast(1)) + return 0; + return static_cast(0.5) * log((static_cast(1) + x) / (static_cast(1) - x)); + } +# endif + + template + GLM_FUNC_QUALIFIER vec atanh(vec const& v) + { + return detail::functor1::call(atanh, v); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_trigonometric_simd.inl" +#endif + diff --git a/includes/glm/detail/func_trigonometric_simd.inl b/includes/glm/detail/func_trigonometric_simd.inl new file mode 100644 index 0000000..e69de29 diff --git a/includes/glm/detail/func_vector_relational.inl b/includes/glm/detail/func_vector_relational.inl new file mode 100644 index 0000000..80c9e87 --- /dev/null +++ b/includes/glm/detail/func_vector_relational.inl @@ -0,0 +1,87 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] < y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] <= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] > y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] >= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] == y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = x[i] != y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool any(vec const& v) + { + bool Result = false; + for(length_t i = 0; i < L; ++i) + Result = Result || v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool all(vec const& v) + { + bool Result = true; + for(length_t i = 0; i < L; ++i) + Result = Result && v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec not_(vec const& v) + { + vec Result(true); + for(length_t i = 0; i < L; ++i) + Result[i] = !v[i]; + return Result; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "func_vector_relational_simd.inl" +#endif diff --git a/includes/glm/detail/func_vector_relational_simd.inl b/includes/glm/detail/func_vector_relational_simd.inl new file mode 100644 index 0000000..fd0fe8b --- /dev/null +++ b/includes/glm/detail/func_vector_relational_simd.inl @@ -0,0 +1,6 @@ +namespace glm{ +namespace detail +{ + +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/glm.cpp b/includes/glm/detail/glm.cpp new file mode 100644 index 0000000..e0755bd --- /dev/null +++ b/includes/glm/detail/glm.cpp @@ -0,0 +1,263 @@ +/// @ref core +/// @file glm/glm.cpp + +#ifndef GLM_ENABLE_EXPERIMENTAL +#define GLM_ENABLE_EXPERIMENTAL +#endif +#include +#include +#include +#include +#include +#include + +namespace glm +{ +// tvec1 type explicit instantiation +template struct vec<1, uint8, lowp>; +template struct vec<1, uint16, lowp>; +template struct vec<1, uint32, lowp>; +template struct vec<1, uint64, lowp>; +template struct vec<1, int8, lowp>; +template struct vec<1, int16, lowp>; +template struct vec<1, int32, lowp>; +template struct vec<1, int64, lowp>; +template struct vec<1, float32, lowp>; +template struct vec<1, float64, lowp>; + +template struct vec<1, uint8, mediump>; +template struct vec<1, uint16, mediump>; +template struct vec<1, uint32, mediump>; +template struct vec<1, uint64, mediump>; +template struct vec<1, int8, mediump>; +template struct vec<1, int16, mediump>; +template struct vec<1, int32, mediump>; +template struct vec<1, int64, mediump>; +template struct vec<1, float32, mediump>; +template struct vec<1, float64, mediump>; + +template struct vec<1, uint8, highp>; +template struct vec<1, uint16, highp>; +template struct vec<1, uint32, highp>; +template struct vec<1, uint64, highp>; +template struct vec<1, int8, highp>; +template struct vec<1, int16, highp>; +template struct vec<1, int32, highp>; +template struct vec<1, int64, highp>; +template struct vec<1, float32, highp>; +template struct vec<1, float64, highp>; + +// tvec2 type explicit instantiation +template struct vec<2, uint8, lowp>; +template struct vec<2, uint16, lowp>; +template struct vec<2, uint32, lowp>; +template struct vec<2, uint64, lowp>; +template struct vec<2, int8, lowp>; +template struct vec<2, int16, lowp>; +template struct vec<2, int32, lowp>; +template struct vec<2, int64, lowp>; +template struct vec<2, float32, lowp>; +template struct vec<2, float64, lowp>; + +template struct vec<2, uint8, mediump>; +template struct vec<2, uint16, mediump>; +template struct vec<2, uint32, mediump>; +template struct vec<2, uint64, mediump>; +template struct vec<2, int8, mediump>; +template struct vec<2, int16, mediump>; +template struct vec<2, int32, mediump>; +template struct vec<2, int64, mediump>; +template struct vec<2, float32, mediump>; +template struct vec<2, float64, mediump>; + +template struct vec<2, uint8, highp>; +template struct vec<2, uint16, highp>; +template struct vec<2, uint32, highp>; +template struct vec<2, uint64, highp>; +template struct vec<2, int8, highp>; +template struct vec<2, int16, highp>; +template struct vec<2, int32, highp>; +template struct vec<2, int64, highp>; +template struct vec<2, float32, highp>; +template struct vec<2, float64, highp>; + +// tvec3 type explicit instantiation +template struct vec<3, uint8, lowp>; +template struct vec<3, uint16, lowp>; +template struct vec<3, uint32, lowp>; +template struct vec<3, uint64, lowp>; +template struct vec<3, int8, lowp>; +template struct vec<3, int16, lowp>; +template struct vec<3, int32, lowp>; +template struct vec<3, int64, lowp>; +template struct vec<3, float32, lowp>; +template struct vec<3, float64, lowp>; + +template struct vec<3, uint8, mediump>; +template struct vec<3, uint16, mediump>; +template struct vec<3, uint32, mediump>; +template struct vec<3, uint64, mediump>; +template struct vec<3, int8, mediump>; +template struct vec<3, int16, mediump>; +template struct vec<3, int32, mediump>; +template struct vec<3, int64, mediump>; +template struct vec<3, float32, mediump>; +template struct vec<3, float64, mediump>; + +template struct vec<3, uint8, highp>; +template struct vec<3, uint16, highp>; +template struct vec<3, uint32, highp>; +template struct vec<3, uint64, highp>; +template struct vec<3, int8, highp>; +template struct vec<3, int16, highp>; +template struct vec<3, int32, highp>; +template struct vec<3, int64, highp>; +template struct vec<3, float32, highp>; +template struct vec<3, float64, highp>; + +// tvec4 type explicit instantiation +template struct vec<4, uint8, lowp>; +template struct vec<4, uint16, lowp>; +template struct vec<4, uint32, lowp>; +template struct vec<4, uint64, lowp>; +template struct vec<4, int8, lowp>; +template struct vec<4, int16, lowp>; +template struct vec<4, int32, lowp>; +template struct vec<4, int64, lowp>; +template struct vec<4, float32, lowp>; +template struct vec<4, float64, lowp>; + +template struct vec<4, uint8, mediump>; +template struct vec<4, uint16, mediump>; +template struct vec<4, uint32, mediump>; +template struct vec<4, uint64, mediump>; +template struct vec<4, int8, mediump>; +template struct vec<4, int16, mediump>; +template struct vec<4, int32, mediump>; +template struct vec<4, int64, mediump>; +template struct vec<4, float32, mediump>; +template struct vec<4, float64, mediump>; + +template struct vec<4, uint8, highp>; +template struct vec<4, uint16, highp>; +template struct vec<4, uint32, highp>; +template struct vec<4, uint64, highp>; +template struct vec<4, int8, highp>; +template struct vec<4, int16, highp>; +template struct vec<4, int32, highp>; +template struct vec<4, int64, highp>; +template struct vec<4, float32, highp>; +template struct vec<4, float64, highp>; + +// tmat2x2 type explicit instantiation +template struct mat<2, 2, float32, lowp>; +template struct mat<2, 2, float64, lowp>; + +template struct mat<2, 2, float32, mediump>; +template struct mat<2, 2, float64, mediump>; + +template struct mat<2, 2, float32, highp>; +template struct mat<2, 2, float64, highp>; + +// tmat2x3 type explicit instantiation +template struct mat<2, 3, float32, lowp>; +template struct mat<2, 3, float64, lowp>; + +template struct mat<2, 3, float32, mediump>; +template struct mat<2, 3, float64, mediump>; + +template struct mat<2, 3, float32, highp>; +template struct mat<2, 3, float64, highp>; + +// tmat2x4 type explicit instantiation +template struct mat<2, 4, float32, lowp>; +template struct mat<2, 4, float64, lowp>; + +template struct mat<2, 4, float32, mediump>; +template struct mat<2, 4, float64, mediump>; + +template struct mat<2, 4, float32, highp>; +template struct mat<2, 4, float64, highp>; + +// tmat3x2 type explicit instantiation +template struct mat<3, 2, float32, lowp>; +template struct mat<3, 2, float64, lowp>; + +template struct mat<3, 2, float32, mediump>; +template struct mat<3, 2, float64, mediump>; + +template struct mat<3, 2, float32, highp>; +template struct mat<3, 2, float64, highp>; + +// tmat3x3 type explicit instantiation +template struct mat<3, 3, float32, lowp>; +template struct mat<3, 3, float64, lowp>; + +template struct mat<3, 3, float32, mediump>; +template struct mat<3, 3, float64, mediump>; + +template struct mat<3, 3, float32, highp>; +template struct mat<3, 3, float64, highp>; + +// tmat3x4 type explicit instantiation +template struct mat<3, 4, float32, lowp>; +template struct mat<3, 4, float64, lowp>; + +template struct mat<3, 4, float32, mediump>; +template struct mat<3, 4, float64, mediump>; + +template struct mat<3, 4, float32, highp>; +template struct mat<3, 4, float64, highp>; + +// tmat4x2 type explicit instantiation +template struct mat<4, 2, float32, lowp>; +template struct mat<4, 2, float64, lowp>; + +template struct mat<4, 2, float32, mediump>; +template struct mat<4, 2, float64, mediump>; + +template struct mat<4, 2, float32, highp>; +template struct mat<4, 2, float64, highp>; + +// tmat4x3 type explicit instantiation +template struct mat<4, 3, float32, lowp>; +template struct mat<4, 3, float64, lowp>; + +template struct mat<4, 3, float32, mediump>; +template struct mat<4, 3, float64, mediump>; + +template struct mat<4, 3, float32, highp>; +template struct mat<4, 3, float64, highp>; + +// tmat4x4 type explicit instantiation +template struct mat<4, 4, float32, lowp>; +template struct mat<4, 4, float64, lowp>; + +template struct mat<4, 4, float32, mediump>; +template struct mat<4, 4, float64, mediump>; + +template struct mat<4, 4, float32, highp>; +template struct mat<4, 4, float64, highp>; + +// tquat type explicit instantiation +template struct qua; +template struct qua; + +template struct qua; +template struct qua; + +template struct qua; +template struct qua; + +//tdualquat type explicit instantiation +template struct tdualquat; +template struct tdualquat; + +template struct tdualquat; +template struct tdualquat; + +template struct tdualquat; +template struct tdualquat; + +}//namespace glm + diff --git a/includes/glm/detail/qualifier.hpp b/includes/glm/detail/qualifier.hpp new file mode 100644 index 0000000..cb4e108 --- /dev/null +++ b/includes/glm/detail/qualifier.hpp @@ -0,0 +1,293 @@ +#pragma once + +#include "setup.hpp" + +namespace glm +{ + /// Qualify GLM types in term of alignment (packed, aligned) and precision in term of ULPs (lowp, mediump, highp) + enum qualifier + { + packed_highp, ///< Typed data is tightly packed in memory and operations are executed with high precision in term of ULPs + packed_mediump, ///< Typed data is tightly packed in memory and operations are executed with medium precision in term of ULPs for higher performance + packed_lowp, ///< Typed data is tightly packed in memory and operations are executed with low precision in term of ULPs to maximize performance + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + aligned_highp, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs + aligned_mediump, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs for higher performance + aligned_lowp, // ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs to maximize performance + aligned = aligned_highp, ///< By default aligned qualifier is also high precision +# endif + + highp = packed_highp, ///< By default highp qualifier is also packed + mediump = packed_mediump, ///< By default mediump qualifier is also packed + lowp = packed_lowp, ///< By default lowp qualifier is also packed + packed = packed_highp, ///< By default packed qualifier is also high precision + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE && defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) + defaultp = aligned_highp +# else + defaultp = highp +# endif + }; + + typedef qualifier precision; + + template struct vec; + template struct mat; + template struct qua; + +# if GLM_HAS_TEMPLATE_ALIASES + template using tvec1 = vec<1, T, Q>; + template using tvec2 = vec<2, T, Q>; + template using tvec3 = vec<3, T, Q>; + template using tvec4 = vec<4, T, Q>; + template using tmat2x2 = mat<2, 2, T, Q>; + template using tmat2x3 = mat<2, 3, T, Q>; + template using tmat2x4 = mat<2, 4, T, Q>; + template using tmat3x2 = mat<3, 2, T, Q>; + template using tmat3x3 = mat<3, 3, T, Q>; + template using tmat3x4 = mat<3, 4, T, Q>; + template using tmat4x2 = mat<4, 2, T, Q>; + template using tmat4x3 = mat<4, 3, T, Q>; + template using tmat4x4 = mat<4, 4, T, Q>; + template using tquat = qua; +# endif + +namespace detail +{ + template + struct is_aligned + { + static const bool value = false; + }; + +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + template<> + struct is_aligned + { + static const bool value = true; + }; + + template<> + struct is_aligned + { + static const bool value = true; + }; + + template<> + struct is_aligned + { + static const bool value = true; + }; +# endif + + template + struct storage + { + typedef struct type { + T data[L]; + } type; + }; + +# if GLM_HAS_ALIGNOF + template + struct storage + { + typedef struct alignas(L * sizeof(T)) type { + T data[L]; + } type; + }; + + template + struct storage<3, T, true> + { + typedef struct alignas(4 * sizeof(T)) type { + T data[4]; + } type; + }; +# endif + +# if GLM_ARCH & GLM_ARCH_SSE2_BIT + template<> + struct storage<4, float, true> + { + typedef glm_f32vec4 type; + }; + + template<> + struct storage<4, int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<4, unsigned int, true> + { + typedef glm_u32vec4 type; + }; + + template<> + struct storage<3, float, true> + { + typedef glm_f32vec4 type; + }; + + template<> + struct storage<3, int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<3, unsigned int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<2, double, true> + { + typedef glm_f64vec2 type; + }; + + template<> + struct storage<2, detail::int64, true> + { + typedef glm_i64vec2 type; + }; + + template<> + struct storage<2, detail::uint64, true> + { + typedef glm_u64vec2 type; + }; + + + template<> + struct storage<3, detail::uint64, true> + { + typedef glm_u64vec2 type; + }; + + template<> + struct storage<4, double, true> + { +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + typedef glm_f64vec4 type; +# else + struct type + { + glm_f64vec2 data[2]; + GLM_CONSTEXPR glm_f64vec2 getv(int i) const { + return data[i]; + } + GLM_CONSTEXPR void setv(int i, const glm_f64vec2& v) { + data[i] = v; + } + }; +# endif + }; + + + template<> + struct storage<3, double, true> : public storage<4, double, true> + {}; + +# endif + +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) + template<> + struct storage<4, detail::int64, true> + { + typedef glm_i64vec4 type; + }; + + template<> + struct storage<4, detail::uint64, true> + { + typedef glm_u64vec4 type; + }; +# endif + +# if GLM_ARCH & GLM_ARCH_NEON_BIT + template<> + struct storage<4, float, true> + { + typedef glm_f32vec4 type; + }; + + template<> + struct storage<3, float, true> : public storage<4, float, true> + {}; + + template<> + struct storage<4, int, true> + { + typedef glm_i32vec4 type; + }; + + template<> + struct storage<3, int, true> : public storage<4, int, true> + {}; + + template<> + struct storage<4, unsigned int, true> + { + typedef glm_u32vec4 type; + }; + + template<> + struct storage<3, unsigned int, true> : public storage<4, unsigned int, true> + {}; + + template<> + struct storage<3, double, true> + { + typedef struct alignas(4 * sizeof(double)) type { + double data[4]; + } type; + }; + +# endif + + enum genTypeEnum + { + GENTYPE_VEC, + GENTYPE_MAT, + GENTYPE_QUAT + }; + + template + struct genTypeTrait + {}; + + template + struct genTypeTrait > + { + static const genTypeEnum GENTYPE = GENTYPE_MAT; + }; + + template + struct init_gentype + { + }; + + template + struct init_gentype + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() + { + return genType(1, 0, 0, 0); + } + }; + + template + struct init_gentype + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() + { + return genType(1); + } + }; +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/setup.hpp b/includes/glm/detail/setup.hpp new file mode 100644 index 0000000..5e94f2d --- /dev/null +++ b/includes/glm/detail/setup.hpp @@ -0,0 +1,1191 @@ +#ifndef GLM_SETUP_INCLUDED + +#include +#include + +#define GLM_VERSION_MAJOR 1 +#define GLM_VERSION_MINOR 0 +#define GLM_VERSION_PATCH 2 +#define GLM_VERSION_REVISION 0 // Deprecated +#define GLM_VERSION 1000 // Deprecated + +#define GLM_MAKE_API_VERSION(variant, major, minor, patch) \ + ((((uint32_t)(variant)) << 29U) | (((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + +#define GLM_VERSION_COMPLETE GLM_MAKE_API_VERSION(0, GLM_VERSION_MAJOR, GLM_VERSION_MINOR, GLM_VERSION_PATCH) + +#define GLM_SETUP_INCLUDED GLM_VERSION + +#define GLM_GET_VERSION_VARIANT(version) ((uint32_t)(version) >> 29U) +#define GLM_GET_VERSION_MAJOR(version) (((uint32_t)(version) >> 22U) & 0x7FU) +#define GLM_GET_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) +#define GLM_GET_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + +/////////////////////////////////////////////////////////////////////////////////// +// Active states + +#define GLM_DISABLE 0 +#define GLM_ENABLE 1 + +/////////////////////////////////////////////////////////////////////////////////// +// Messages + +#if defined(GLM_FORCE_MESSAGES) +# define GLM_MESSAGES GLM_ENABLE +#else +# define GLM_MESSAGES GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Detect the platform + +#include "../simd/platform.h" + +/////////////////////////////////////////////////////////////////////////////////// +// Build model + +#if defined(_M_ARM64) || defined(__LP64__) || defined(_M_X64) || defined(__ppc64__) || defined(__x86_64__) +# define GLM_MODEL GLM_MODEL_64 +#elif defined(__i386__) || defined(__ppc__) || defined(__ILP32__) || defined(_M_ARM) +# define GLM_MODEL GLM_MODEL_32 +#else +# define GLM_MODEL GLM_MODEL_32 +#endif// + +#if !defined(GLM_MODEL) && GLM_COMPILER != 0 +# error "GLM_MODEL undefined, your compiler may not be supported by GLM. Add #define GLM_MODEL 0 to ignore this message." +#endif//GLM_MODEL + +/////////////////////////////////////////////////////////////////////////////////// +// C++ Version + +// User defines: GLM_FORCE_CXX98, GLM_FORCE_CXX03, GLM_FORCE_CXX11, GLM_FORCE_CXX14, GLM_FORCE_CXX17, GLM_FORCE_CXX2A + +#define GLM_LANG_CXX98_FLAG (1 << 1) +#define GLM_LANG_CXX03_FLAG (1 << 2) +#define GLM_LANG_CXX0X_FLAG (1 << 3) +#define GLM_LANG_CXX11_FLAG (1 << 4) +#define GLM_LANG_CXX14_FLAG (1 << 5) +#define GLM_LANG_CXX17_FLAG (1 << 6) +#define GLM_LANG_CXX20_FLAG (1 << 7) +#define GLM_LANG_CXXMS_FLAG (1 << 8) +#define GLM_LANG_CXXGNU_FLAG (1 << 9) + +#define GLM_LANG_CXX98 GLM_LANG_CXX98_FLAG +#define GLM_LANG_CXX03 (GLM_LANG_CXX98 | GLM_LANG_CXX03_FLAG) +#define GLM_LANG_CXX0X (GLM_LANG_CXX03 | GLM_LANG_CXX0X_FLAG) +#define GLM_LANG_CXX11 (GLM_LANG_CXX0X | GLM_LANG_CXX11_FLAG) +#define GLM_LANG_CXX14 (GLM_LANG_CXX11 | GLM_LANG_CXX14_FLAG) +#define GLM_LANG_CXX17 (GLM_LANG_CXX14 | GLM_LANG_CXX17_FLAG) +#define GLM_LANG_CXX20 (GLM_LANG_CXX17 | GLM_LANG_CXX20_FLAG) +#define GLM_LANG_CXXMS GLM_LANG_CXXMS_FLAG +#define GLM_LANG_CXXGNU GLM_LANG_CXXGNU_FLAG + +#if (defined(_MSC_EXTENSIONS)) +# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG +#elif ((GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) && (GLM_ARCH & GLM_ARCH_SIMD_BIT)) +# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG +#else +# define GLM_LANG_EXT 0 +#endif + +#if (defined(GLM_FORCE_CXX_UNKNOWN)) +# define GLM_LANG 0 +#elif defined(GLM_FORCE_CXX20) +# define GLM_LANG (GLM_LANG_CXX20 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX17) +# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX14) +# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX11) +# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) +# define GLM_LANG_STL11_FORCED +#elif defined(GLM_FORCE_CXX03) +# define GLM_LANG (GLM_LANG_CXX03 | GLM_LANG_EXT) +#elif defined(GLM_FORCE_CXX98) +# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) +#else +# if GLM_COMPILER & GLM_COMPILER_VC && defined(_MSVC_LANG) +# if GLM_COMPILER >= GLM_COMPILER_VC15_7 +# define GLM_LANG_PLATFORM _MSVC_LANG +# elif GLM_COMPILER >= GLM_COMPILER_VC15 +# if _MSVC_LANG > 201402L +# define GLM_LANG_PLATFORM 201402L +# else +# define GLM_LANG_PLATFORM _MSVC_LANG +# endif +# else +# define GLM_LANG_PLATFORM 0 +# endif +# else +# define GLM_LANG_PLATFORM 0 +# endif + +# if __cplusplus > 201703L || GLM_LANG_PLATFORM > 201703L +# define GLM_LANG (GLM_LANG_CXX20 | GLM_LANG_EXT) +# elif __cplusplus == 201703L || GLM_LANG_PLATFORM == 201703L +# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) +# elif __cplusplus == 201402L || __cplusplus == 201406L || __cplusplus == 201500L || GLM_LANG_PLATFORM == 201402L +# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) +# elif __cplusplus == 201103L || GLM_LANG_PLATFORM == 201103L +# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) +# elif defined(__INTEL_CXX11_MODE__) || defined(_MSC_VER) || defined(__GXX_EXPERIMENTAL_CXX0X__) +# define GLM_LANG (GLM_LANG_CXX0X | GLM_LANG_EXT) +# elif __cplusplus == 199711L +# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) +# else +# define GLM_LANG (0 | GLM_LANG_EXT) +# endif +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Has of C++ features + +// http://clang.llvm.org/cxx_status.html +// http://gcc.gnu.org/projects/cxx0x.html +// http://msdn.microsoft.com/en-us/library/vstudio/hh567368(v=vs.120).aspx + +// Android has multiple STLs but C++11 STL detection doesn't always work #284 #564 +#if GLM_PLATFORM == GLM_PLATFORM_ANDROID && !defined(GLM_LANG_STL11_FORCED) +# define GLM_HAS_CXX11_STL 0 +#elif (GLM_COMPILER & GLM_COMPILER_CUDA_RTC) == GLM_COMPILER_CUDA_RTC +# define GLM_HAS_CXX11_STL 0 +#elif (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_HAS_CXX11_STL 0 +#elif GLM_COMPILER & GLM_COMPILER_CLANG +# if (defined(_LIBCPP_VERSION) || (GLM_LANG & GLM_LANG_CXX11_FLAG) || defined(GLM_LANG_STL11_FORCED)) +# define GLM_HAS_CXX11_STL 1 +# else +# define GLM_HAS_CXX11_STL 0 +# endif +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_CXX11_STL 1 +#else +# define GLM_HAS_CXX11_STL ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC48)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_PLATFORM != GLM_PLATFORM_WINDOWS) && (GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)))) +#endif + +// N1720 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_STATIC_ASSERT __has_feature(cxx_static_assert) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_STATIC_ASSERT 1 +#else +# define GLM_HAS_STATIC_ASSERT ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N1988 +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_EXTENDED_INTEGER_TYPE 1 +#else +# define GLM_HAS_EXTENDED_INTEGER_TYPE (\ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CLANG)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2672 Initializer lists http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_INITIALIZER_LISTS __has_feature(cxx_generalized_initializers) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_INITIALIZER_LISTS 1 +#else +# define GLM_HAS_INITIALIZER_LISTS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2544 Unrestricted unions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_UNRESTRICTED_UNIONS __has_feature(cxx_unrestricted_unions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_UNRESTRICTED_UNIONS 1 +#else +# define GLM_HAS_UNRESTRICTED_UNIONS (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + (GLM_COMPILER & GLM_COMPILER_VC) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2346 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_DEFAULTED_FUNCTIONS __has_feature(cxx_defaulted_functions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_DEFAULTED_FUNCTIONS 1 +#else +# define GLM_HAS_DEFAULTED_FUNCTIONS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP))) +#endif + +// N2118 +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_RVALUE_REFERENCES __has_feature(cxx_rvalue_references) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_RVALUE_REFERENCES 1 +#else +# define GLM_HAS_RVALUE_REFERENCES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2437 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS __has_feature(cxx_explicit_conversions) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS 1 +#else +# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2258 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_TEMPLATE_ALIASES __has_feature(cxx_alias_templates) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_TEMPLATE_ALIASES 1 +#else +# define GLM_HAS_TEMPLATE_ALIASES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2930 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_RANGE_FOR __has_feature(cxx_range_for) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_RANGE_FOR 1 +#else +# define GLM_HAS_RANGE_FOR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2341 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf +#if GLM_COMPILER & GLM_COMPILER_CLANG +# define GLM_HAS_ALIGNOF __has_feature(cxx_alignas) +#elif GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_ALIGNOF 1 +#else +# define GLM_HAS_ALIGNOF ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf +// N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html +#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr... +# define GLM_HAS_CONSTEXPR 0 +#elif (GLM_COMPILER & GLM_COMPILER_CLANG) +# define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr) +#elif (GLM_LANG & GLM_LANG_CXX14_FLAG) +# define GLM_HAS_CONSTEXPR 1 +#else +# define GLM_HAS_CONSTEXPR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && GLM_HAS_INITIALIZER_LISTS && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL17)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)))) +#endif + +#if GLM_HAS_CONSTEXPR +# define GLM_CONSTEXPR constexpr +#else +# define GLM_CONSTEXPR +#endif + +// +#if GLM_HAS_CONSTEXPR +# if (GLM_COMPILER & GLM_COMPILER_CLANG) +# if __has_feature(cxx_if_constexpr) +# define GLM_HAS_IF_CONSTEXPR 1 +# else +# define GLM_HAS_IF_CONSTEXPR 0 +# endif +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) +# define GLM_HAS_IF_CONSTEXPR 1 +# else +# define GLM_HAS_IF_CONSTEXPR 0 +# endif +#else +# define GLM_HAS_IF_CONSTEXPR 0 +#endif + +#if GLM_HAS_IF_CONSTEXPR +# define GLM_IF_CONSTEXPR if constexpr +#else +# define GLM_IF_CONSTEXPR if +#endif + +// [nodiscard] +#if GLM_LANG & GLM_LANG_CXX17_FLAG +# define GLM_NODISCARD [[nodiscard]] +#else +# define GLM_NODISCARD +#endif + +// +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_ASSIGNABLE 1 +#else +# define GLM_HAS_ASSIGNABLE ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ + ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC49)))) +#endif + +// +#define GLM_HAS_TRIVIAL_QUERIES 0 + +// +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_MAKE_SIGNED 1 +#else +# define GLM_HAS_MAKE_SIGNED ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ + ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ + ((GLM_COMPILER & GLM_COMPILER_HIP)))) +#endif + +// +#if defined(GLM_FORCE_INTRINSICS) +# define GLM_HAS_BITSCAN_WINDOWS ((GLM_PLATFORM & GLM_PLATFORM_WINDOWS) && (\ + ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ + ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14) && (GLM_ARCH & GLM_ARCH_X86_BIT)))) +#else +# define GLM_HAS_BITSCAN_WINDOWS 0 +#endif + +#if GLM_LANG & GLM_LANG_CXX11_FLAG +# define GLM_HAS_NOEXCEPT 1 +#else +# define GLM_HAS_NOEXCEPT 0 +#endif + +#if GLM_HAS_NOEXCEPT +# define GLM_NOEXCEPT noexcept +#else +# define GLM_NOEXCEPT +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// OpenMP +#ifdef _OPENMP +# if GLM_COMPILER & GLM_COMPILER_GCC +# if GLM_COMPILER >= GLM_COMPILER_GCC61 +# define GLM_HAS_OPENMP 45 +# elif GLM_COMPILER >= GLM_COMPILER_GCC49 +# define GLM_HAS_OPENMP 40 +# elif GLM_COMPILER >= GLM_COMPILER_GCC47 +# define GLM_HAS_OPENMP 31 +# else +# define GLM_HAS_OPENMP 0 +# endif +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# if GLM_COMPILER >= GLM_COMPILER_CLANG38 +# define GLM_HAS_OPENMP 31 +# else +# define GLM_HAS_OPENMP 0 +# endif +# elif GLM_COMPILER & GLM_COMPILER_VC +# define GLM_HAS_OPENMP 20 +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# if GLM_COMPILER >= GLM_COMPILER_INTEL16 +# define GLM_HAS_OPENMP 40 +# else +# define GLM_HAS_OPENMP 0 +# endif +# else +# define GLM_HAS_OPENMP 0 +# endif +#else +# define GLM_HAS_OPENMP 0 +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// nullptr + +#if GLM_LANG & GLM_LANG_CXX0X_FLAG +# define GLM_CONFIG_NULLPTR GLM_ENABLE +#else +# define GLM_CONFIG_NULLPTR GLM_DISABLE +#endif + +#if GLM_CONFIG_NULLPTR == GLM_ENABLE +# define GLM_NULLPTR nullptr +#else +# define GLM_NULLPTR 0 +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Static assert + +#if GLM_HAS_STATIC_ASSERT +# define GLM_STATIC_ASSERT(x, message) static_assert(x, message) +#elif GLM_COMPILER & GLM_COMPILER_VC +# define GLM_STATIC_ASSERT(x, message) typedef char __CASSERT__##__LINE__[(x) ? 1 : -1] +#else +# define GLM_STATIC_ASSERT(x, message) assert(x) +#endif//GLM_LANG + +/////////////////////////////////////////////////////////////////////////////////// +// Qualifiers + +// User defines: GLM_CUDA_FORCE_DEVICE_FUNC, GLM_CUDA_FORCE_HOST_FUNC + +#if (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# if defined(GLM_CUDA_FORCE_DEVICE_FUNC) && defined(GLM_CUDA_FORCE_HOST_FUNC) +# error "GLM error: GLM_CUDA_FORCE_DEVICE_FUNC and GLM_CUDA_FORCE_HOST_FUNC should not be defined at the same time, GLM by default generates both device and host code for CUDA compiler." +# endif//defined(GLM_CUDA_FORCE_DEVICE_FUNC) && defined(GLM_CUDA_FORCE_HOST_FUNC) + +# if defined(GLM_CUDA_FORCE_DEVICE_FUNC) +# define GLM_CUDA_FUNC_DEF __device__ +# define GLM_CUDA_FUNC_DECL __device__ +# elif defined(GLM_CUDA_FORCE_HOST_FUNC) +# define GLM_CUDA_FUNC_DEF __host__ +# define GLM_CUDA_FUNC_DECL __host__ +# else +# define GLM_CUDA_FUNC_DEF __device__ __host__ +# define GLM_CUDA_FUNC_DECL __device__ __host__ +# endif//defined(GLM_CUDA_FORCE_XXXX_FUNC) +#else +# define GLM_CUDA_FUNC_DEF +# define GLM_CUDA_FUNC_DECL +#endif + +#if defined(GLM_FORCE_INLINE) +# if GLM_COMPILER & GLM_COMPILER_VC +# define GLM_INLINE __forceinline +# define GLM_NEVER_INLINE __declspec(noinline) +# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) +# define GLM_INLINE inline __attribute__((__always_inline__)) +# define GLM_NEVER_INLINE __attribute__((__noinline__)) +# elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_INLINE __forceinline__ +# define GLM_NEVER_INLINE __noinline__ +# else +# define GLM_INLINE inline +# define GLM_NEVER_INLINE +# endif//GLM_COMPILER +#else +# define GLM_INLINE inline +# define GLM_NEVER_INLINE +#endif//defined(GLM_FORCE_INLINE) + +#define GLM_CTOR_DECL GLM_CUDA_FUNC_DECL GLM_CONSTEXPR +#define GLM_FUNC_DISCARD_DECL GLM_CUDA_FUNC_DECL +#define GLM_FUNC_DECL GLM_NODISCARD GLM_CUDA_FUNC_DECL +#define GLM_FUNC_QUALIFIER GLM_CUDA_FUNC_DEF GLM_INLINE + +// Do not use CUDA function qualifiers on CUDA compiler when functions are made default +#if GLM_HAS_DEFAULTED_FUNCTIONS +# define GLM_DEFAULTED_FUNC_DECL +# define GLM_DEFAULTED_FUNC_QUALIFIER GLM_INLINE +#else +# define GLM_DEFAULTED_FUNC_DECL GLM_FUNC_DISCARD_DECL +# define GLM_DEFAULTED_FUNC_QUALIFIER GLM_FUNC_QUALIFIER +#endif//GLM_HAS_DEFAULTED_FUNCTIONS +#if !defined(GLM_FORCE_CTOR_INIT) +# define GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CUDA_FUNC_DECL +# define GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_DEFAULTED_FUNC_QUALIFIER +#else +# define GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_FUNC_DISCARD_DECL +# define GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_FUNC_QUALIFIER +#endif//GLM_FORCE_CTOR_INIT + +/////////////////////////////////////////////////////////////////////////////////// +// Swizzle operators + +// User defines: GLM_FORCE_SWIZZLE + +#define GLM_SWIZZLE_DISABLED 0 +#define GLM_SWIZZLE_OPERATOR 1 +#define GLM_SWIZZLE_FUNCTION 2 + +#if defined(GLM_SWIZZLE) +# pragma message("GLM: GLM_SWIZZLE is deprecated, use GLM_FORCE_SWIZZLE instead.") +# define GLM_FORCE_SWIZZLE +#endif + +#if defined(GLM_FORCE_SWIZZLE) && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && !defined(GLM_FORCE_XYZW_ONLY) +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_OPERATOR +#elif defined(GLM_FORCE_SWIZZLE) +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION +#else +# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_DISABLED +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Allows using not basic types as genType + +// #define GLM_FORCE_UNRESTRICTED_GENTYPE + +#ifdef GLM_FORCE_UNRESTRICTED_GENTYPE +# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_ENABLE +#else +# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Allows using any scaler as float + +// #define GLM_FORCE_UNRESTRICTED_FLOAT + +#ifdef GLM_FORCE_UNRESTRICTED_FLOAT +# define GLM_CONFIG_UNRESTRICTED_FLOAT GLM_ENABLE +#else +# define GLM_CONFIG_UNRESTRICTED_FLOAT GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Clip control, define GLM_FORCE_DEPTH_ZERO_TO_ONE before including GLM +// to use a clip space between 0 to 1. +// Coordinate system, define GLM_FORCE_LEFT_HANDED before including GLM +// to use left handed coordinate system by default. + +#define GLM_CLIP_CONTROL_ZO_BIT (1 << 0) // ZERO_TO_ONE +#define GLM_CLIP_CONTROL_NO_BIT (1 << 1) // NEGATIVE_ONE_TO_ONE +#define GLM_CLIP_CONTROL_LH_BIT (1 << 2) // LEFT_HANDED, For DirectX, Metal, Vulkan +#define GLM_CLIP_CONTROL_RH_BIT (1 << 3) // RIGHT_HANDED, For OpenGL, default in GLM + +#define GLM_CLIP_CONTROL_LH_ZO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_ZO_BIT) +#define GLM_CLIP_CONTROL_LH_NO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_NO_BIT) +#define GLM_CLIP_CONTROL_RH_ZO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_ZO_BIT) +#define GLM_CLIP_CONTROL_RH_NO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_NO_BIT) + +#ifdef GLM_FORCE_DEPTH_ZERO_TO_ONE +# ifdef GLM_FORCE_LEFT_HANDED +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_ZO +# else +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_ZO +# endif +#else +# ifdef GLM_FORCE_LEFT_HANDED +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_NO +# else +# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_NO +# endif +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Qualifiers + +#if (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)) +# define GLM_DEPRECATED __declspec(deprecated) +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name +#elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG | GLM_COMPILER_INTEL) +# if GLM_LANG & GLM_LANG_CXX14_FLAG +# define GLM_DEPRECATED [[deprecated]] +# else +# define GLM_DEPRECATED __attribute__((__deprecated__)) +# endif +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment))) +#elif (GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP) +# define GLM_DEPRECATED +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __align__(x) +#else +# define GLM_DEPRECATED +# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name +#endif + +/////////////////////////////////////////////////////////////////////////////////// + +#ifdef GLM_FORCE_EXPLICIT_CTOR +# define GLM_EXPLICIT explicit +#else +# define GLM_EXPLICIT +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Length type: all length functions returns a length_t type. +// When GLM_FORCE_SIZE_T_LENGTH is defined, length_t is a typedef of size_t otherwise +// length_t is a typedef of int like GLSL defines it. + +#define GLM_LENGTH_INT 1 +#define GLM_LENGTH_SIZE_T 2 + +#ifdef GLM_FORCE_SIZE_T_LENGTH +# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_SIZE_T +# define GLM_ASSERT_LENGTH(l, max) (assert ((l) < (max))) +#else +# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_INT +# define GLM_ASSERT_LENGTH(l, max) (assert ((l) >= 0 && (l) < (max))) +#endif + +namespace glm +{ + using std::size_t; +# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T + typedef size_t length_t; +# else + typedef int length_t; +# endif +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// constexpr + +#if GLM_HAS_CONSTEXPR +# define GLM_CONFIG_CONSTEXP GLM_ENABLE + + namespace glm + { + template + constexpr std::size_t countof(T const (&)[N]) + { + return N; + } + }//namespace glm +# define GLM_COUNTOF(arr) glm::countof(arr) +#elif defined(_MSC_VER) +# define GLM_CONFIG_CONSTEXP GLM_DISABLE + +# define GLM_COUNTOF(arr) _countof(arr) +#else +# define GLM_CONFIG_CONSTEXP GLM_DISABLE + +# define GLM_COUNTOF(arr) sizeof(arr) / sizeof(arr[0]) +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// uint + +namespace glm{ +namespace detail +{ + template + struct is_int + { + enum test {value = 0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + typedef unsigned int uint; +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// 64-bit int + +#if GLM_HAS_EXTENDED_INTEGER_TYPE +# include +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::uint64_t uint64; + typedef std::int64_t int64; +# elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) // C99 detected, 64 bit types available + typedef uint64_t uint64; + typedef int64_t int64; +# elif GLM_COMPILER & GLM_COMPILER_VC + typedef unsigned __int64 uint64; + typedef signed __int64 int64; +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic ignored "-Wlong-long" + __extension__ typedef unsigned long long uint64; + __extension__ typedef signed long long int64; +# elif (GLM_COMPILER & GLM_COMPILER_CLANG) +# pragma clang diagnostic ignored "-Wc++11-long-long" + typedef unsigned long long uint64; + typedef signed long long int64; +# else//unknown compiler + typedef unsigned long long uint64; + typedef signed long long int64; +# endif +}//namespace detail +}//namespace glm + +/////////////////////////////////////////////////////////////////////////////////// +// make_unsigned + +#if GLM_HAS_MAKE_SIGNED +# include + +namespace glm{ +namespace detail +{ + using std::make_unsigned; +}//namespace detail +}//namespace glm + +#else + +namespace glm{ +namespace detail +{ + template + struct make_unsigned + {}; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned short type; + }; + + template<> + struct make_unsigned + { + typedef unsigned int type; + }; + + template<> + struct make_unsigned + { + typedef unsigned long type; + }; + + template<> + struct make_unsigned + { + typedef uint64 type; + }; + + template<> + struct make_unsigned + { + typedef unsigned char type; + }; + + template<> + struct make_unsigned + { + typedef unsigned short type; + }; + + template<> + struct make_unsigned + { + typedef unsigned int type; + }; + + template<> + struct make_unsigned + { + typedef unsigned long type; + }; + + template<> + struct make_unsigned + { + typedef uint64 type; + }; +}//namespace detail +}//namespace glm +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Only use x, y, z, w as vector type components + +#ifdef GLM_FORCE_XYZW_ONLY +# define GLM_CONFIG_XYZW_ONLY GLM_ENABLE +#else +# define GLM_CONFIG_XYZW_ONLY GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of defaulted initialized types + +#define GLM_CTOR_INIT_DISABLE 0 +#define GLM_CTOR_INITIALIZER_LIST 1 +#define GLM_CTOR_INITIALISATION 2 + +#if defined(GLM_FORCE_CTOR_INIT) && GLM_HAS_INITIALIZER_LISTS +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALIZER_LIST +#elif defined(GLM_FORCE_CTOR_INIT) && !GLM_HAS_INITIALIZER_LISTS +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALISATION +#else +# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INIT_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Use SIMD instruction sets + +#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (GLM_ARCH & GLM_ARCH_SIMD_BIT) +# define GLM_CONFIG_SIMD GLM_ENABLE +#else +# define GLM_CONFIG_SIMD GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of defaulted function + +#if GLM_HAS_DEFAULTED_FUNCTIONS +# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_ENABLE +# define GLM_DEFAULT = default +#else +# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_DISABLE +# define GLM_DEFAULT +#endif + +#if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INIT_DISABLE && GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_ENABLE +# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_ENABLE +# define GLM_DEFAULT_CTOR GLM_DEFAULT +#else +# define GLM_CONFIG_DEFAULTED_DEFAULT_CTOR GLM_DISABLE +# define GLM_DEFAULT_CTOR +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of aligned gentypes + +#ifdef GLM_FORCE_ALIGNED // Legacy define +# define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +#endif + +#ifdef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +# define GLM_FORCE_ALIGNED_GENTYPES +#endif + +#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE)) +# define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE +#else +# define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Configure the use of anonymous structure as implementation detail + +#if ((GLM_CONFIG_SIMD == GLM_ENABLE) || (GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR) || (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE)) +# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_ENABLE +#else +# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_DISABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Silent warnings + +#ifdef GLM_FORCE_WARNINGS +# define GLM_SILENT_WARNINGS GLM_DISABLE +#else +# define GLM_SILENT_WARNINGS GLM_ENABLE +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Precision + +#define GLM_HIGHP 1 +#define GLM_MEDIUMP 2 +#define GLM_LOWP 3 + +#if defined(GLM_FORCE_PRECISION_HIGHP_BOOL) || defined(GLM_PRECISION_HIGHP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_BOOL) || defined(GLM_PRECISION_MEDIUMP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_BOOL) || defined(GLM_PRECISION_LOWP_BOOL) +# define GLM_CONFIG_PRECISION_BOOL GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_INT) || defined(GLM_PRECISION_HIGHP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_INT) || defined(GLM_PRECISION_MEDIUMP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_INT) || defined(GLM_PRECISION_LOWP_INT) +# define GLM_CONFIG_PRECISION_INT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_INT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_UINT) || defined(GLM_PRECISION_HIGHP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_UINT) || defined(GLM_PRECISION_MEDIUMP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_UINT) || defined(GLM_PRECISION_LOWP_UINT) +# define GLM_CONFIG_PRECISION_UINT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_FLOAT) || defined(GLM_PRECISION_HIGHP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_FLOAT) || defined(GLM_PRECISION_MEDIUMP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_FLOAT) || defined(GLM_PRECISION_LOWP_FLOAT) +# define GLM_CONFIG_PRECISION_FLOAT GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP +#endif + +#if defined(GLM_FORCE_PRECISION_HIGHP_DOUBLE) || defined(GLM_PRECISION_HIGHP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP +#elif defined(GLM_FORCE_PRECISION_MEDIUMP_DOUBLE) || defined(GLM_PRECISION_MEDIUMP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_MEDIUMP +#elif defined(GLM_FORCE_PRECISION_LOWP_DOUBLE) || defined(GLM_PRECISION_LOWP_DOUBLE) +# define GLM_CONFIG_PRECISION_DOUBLE GLM_LOWP +#else +# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP +#endif + +/////////////////////////////////////////////////////////////////////////////////// +// Check inclusions of different versions of GLM + +#elif ((GLM_SETUP_INCLUDED != GLM_VERSION) && !defined(GLM_FORCE_IGNORE_VERSION)) +# error "GLM error: A different version of GLM is already included. Define GLM_FORCE_IGNORE_VERSION before including GLM headers to ignore this error." +#elif GLM_SETUP_INCLUDED == GLM_VERSION + +/////////////////////////////////////////////////////////////////////////////////// +// Messages + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_DISPLAYED) +# define GLM_MESSAGE_DISPLAYED +# define GLM_STR_HELPER(x) #x +# define GLM_STR(x) GLM_STR_HELPER(x) + + // Report GLM version +# pragma message ("GLM: version " GLM_STR(GLM_VERSION_MAJOR) "." GLM_STR(GLM_VERSION_MINOR) "." GLM_STR(GLM_VERSION_PATCH)) + + // Report C++ language +# if (GLM_LANG & GLM_LANG_CXX20_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 20 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX20_FLAG) +# pragma message("GLM: C++ 2A") +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 17 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) +# pragma message("GLM: C++ 17") +# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 14 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) +# pragma message("GLM: C++ 14") +# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 11 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) +# pragma message("GLM: C++ 11") +# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 0x with extensions") +# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) +# pragma message("GLM: C++ 0x") +# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 03 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) +# pragma message("GLM: C++ 03") +# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) && (GLM_LANG & GLM_LANG_EXT) +# pragma message("GLM: C++ 98 with extensions") +# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) +# pragma message("GLM: C++ 98") +# else +# pragma message("GLM: C++ language undetected") +# endif//GLM_LANG + + // Report compiler detection +# if GLM_COMPILER & GLM_COMPILER_CUDA +# pragma message("GLM: CUDA compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_HIP +# pragma message("GLM: HIP compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma message("GLM: Visual C++ compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma message("GLM: Clang compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_INTEL +# pragma message("GLM: Intel Compiler detected") +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma message("GLM: GCC compiler detected") +# else +# pragma message("GLM: Compiler not detected") +# endif + + // Report build target +# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with AVX2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with AVX2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with AVX instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with AVX instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE4.2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE4.2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE4.1 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE4.1 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSSE3 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSSE3 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE3 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE3 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits with SSE2 instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits with SSE2 instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: x86 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: x86 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: ARM 64 bits with Neon instruction set build target") +# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: ARM 32 bits with Neon instruction set build target") + +# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: ARM 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: ARM 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: MIPS 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: MIPS 32 bits build target") + +# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_64) +# pragma message("GLM: PowerPC 64 bits build target") +# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_32) +# pragma message("GLM: PowerPC 32 bits build target") +# else +# pragma message("GLM: Unknown build target") +# endif//GLM_ARCH + + // Report platform name +# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO) +# pragma message("GLM: QNX platform detected") +//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS) +//# pragma message("GLM: iOS platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE) +# pragma message("GLM: Apple platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE) +# pragma message("GLM: WinCE platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) +# pragma message("GLM: Windows platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL) +# pragma message("GLM: Native Client detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) +# pragma message("GLM: Android platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX) +# pragma message("GLM: Linux platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX) +# pragma message("GLM: UNIX platform detected") +# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN) +# pragma message("GLM: platform unknown") +# else +# pragma message("GLM: platform not detected") +# endif + + // Report whether only xyzw component are used +# if defined GLM_FORCE_XYZW_ONLY +# pragma message("GLM: GLM_FORCE_XYZW_ONLY is defined. Only x, y, z and w component are available in vector type. This define disables swizzle operators and SIMD instruction sets.") +# endif + + // Report swizzle operator support +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling operators enabled.") +# elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling functions enabled. Enable compiler C++ language extensions to enable swizzle operators.") +# else +# pragma message("GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.") +# endif + + // Report .length() type +# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T +# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is defined. .length() returns a glm::length_t, a typedef of std::size_t.") +# else +# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.") +# endif + +# if GLM_CONFIG_UNRESTRICTED_GENTYPE == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is defined. Removes GLSL restrictions on valid function genTypes.") +# else +# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.") +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is defined. Ignores C++ warnings from using C++ language extensions.") +# else +# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is undefined. Shows C++ warnings from using C++ language extensions.") +# endif + +# ifdef GLM_FORCE_SINGLE_ONLY +# pragma message("GLM: GLM_FORCE_SINGLE_ONLY is defined. Using only single precision floating-point types.") +# endif + +# if defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE) +# undef GLM_FORCE_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined, allowing aligned types. This prevents the use of C++ constexpr.") +# elif defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) +# undef GLM_FORCE_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") +# endif + +# if defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE +# undef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES +# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") +# elif GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined. All gentypes (e.g. vec3) will be aligned and padded by default.") +# endif +# endif + +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT +# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is defined. Using zero to one depth clip space.") +# else +# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.") +# endif + +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT +# pragma message("GLM: GLM_FORCE_LEFT_HANDED is defined. Using left handed coordinate system.") +# else +# pragma message("GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.") +# endif +#endif//GLM_MESSAGES + +#endif//GLM_SETUP_INCLUDED diff --git a/includes/glm/detail/type_float.hpp b/includes/glm/detail/type_float.hpp new file mode 100644 index 0000000..c8037eb --- /dev/null +++ b/includes/glm/detail/type_float.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include "setup.hpp" + +#if GLM_COMPILER == GLM_COMPILER_VC12 +# pragma warning(push) +# pragma warning(disable: 4512) // assignment operator could not be generated +#endif + +namespace glm{ +namespace detail +{ + template + union float_t + {}; + + // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + template <> + union float_t + { + typedef int int_type; + typedef float float_type; + + GLM_CONSTEXPR float_t(float_type Num = 0.0f) : f(Num) {} + + GLM_CONSTEXPR float_t& operator=(float_t const& x) + { + f = x.f; + return *this; + } + + // Portable extraction of components. + GLM_CONSTEXPR bool negative() const { return i < 0; } + GLM_CONSTEXPR int_type mantissa() const { return i & ((1 << 23) - 1); } + GLM_CONSTEXPR int_type exponent() const { return (i >> 23) & ((1 << 8) - 1); } + + int_type i; + float_type f; + }; + + template <> + union float_t + { + typedef detail::int64 int_type; + typedef double float_type; + + GLM_CONSTEXPR float_t(float_type Num = static_cast(0)) : f(Num) {} + + GLM_CONSTEXPR float_t& operator=(float_t const& x) + { + f = x.f; + return *this; + } + + // Portable extraction of components. + GLM_CONSTEXPR bool negative() const { return i < 0; } + GLM_CONSTEXPR int_type mantissa() const { return i & ((int_type(1) << 52) - 1); } + GLM_CONSTEXPR int_type exponent() const { return (i >> 52) & ((int_type(1) << 11) - 1); } + + int_type i; + float_type f; + }; +}//namespace detail +}//namespace glm + +#if GLM_COMPILER == GLM_COMPILER_VC12 +# pragma warning(pop) +#endif diff --git a/includes/glm/detail/type_half.hpp b/includes/glm/detail/type_half.hpp new file mode 100644 index 0000000..40b8bec --- /dev/null +++ b/includes/glm/detail/type_half.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include "setup.hpp" + +namespace glm{ +namespace detail +{ + typedef short hdata; + + GLM_FUNC_DECL float toFloat32(hdata value); + GLM_FUNC_DECL hdata toFloat16(float const& value); + +}//namespace detail +}//namespace glm + +#include "type_half.inl" diff --git a/includes/glm/detail/type_half.inl b/includes/glm/detail/type_half.inl new file mode 100644 index 0000000..5d239cf --- /dev/null +++ b/includes/glm/detail/type_half.inl @@ -0,0 +1,241 @@ +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER float overflow() + { + volatile float f = 1e10; + + for(int i = 0; i < 10; ++i) + f = f * f; // this will overflow before the for loop terminates + return f; + } + + union uif32 + { + GLM_FUNC_QUALIFIER uif32() : + i(0) + {} + + GLM_FUNC_QUALIFIER uif32(float f_) : + f(f_) + {} + + GLM_FUNC_QUALIFIER uif32(unsigned int i_) : + i(i_) + {} + + float f; + unsigned int i; + }; + + GLM_FUNC_QUALIFIER float toFloat32(hdata value) + { + int s = (value >> 15) & 0x00000001; + int e = (value >> 10) & 0x0000001f; + int m = value & 0x000003ff; + + if(e == 0) + { + if(m == 0) + { + // + // Plus or minus zero + // + + detail::uif32 result; + result.i = static_cast(s << 31); + return result.f; + } + else + { + // + // Denormalized number -- renormalize it + // + + while(!(m & 0x00000400)) + { + m <<= 1; + e -= 1; + } + + e += 1; + m &= ~0x00000400; + } + } + else if(e == 31) + { + if(m == 0) + { + // + // Positive or negative infinity + // + + uif32 result; + result.i = static_cast((s << 31) | 0x7f800000); + return result.f; + } + else + { + // + // Nan -- preserve sign and significand bits + // + + uif32 result; + result.i = static_cast((s << 31) | 0x7f800000 | (m << 13)); + return result.f; + } + } + + // + // Normalized number + // + + e = e + (127 - 15); + m = m << 13; + + // + // Assemble s, e and m. + // + + uif32 Result; + Result.i = static_cast((s << 31) | (e << 23) | m); + return Result.f; + } + + GLM_FUNC_QUALIFIER hdata toFloat16(float const& f) + { + uif32 Entry; + Entry.f = f; + int i = static_cast(Entry.i); + + // + // Our floating point number, f, is represented by the bit + // pattern in integer i. Disassemble that bit pattern into + // the sign, s, the exponent, e, and the significand, m. + // Shift s into the position where it will go in the + // resulting half number. + // Adjust e, accounting for the different exponent bias + // of float and half (127 versus 15). + // + + int s = (i >> 16) & 0x00008000; + int e = ((i >> 23) & 0x000000ff) - (127 - 15); + int m = i & 0x007fffff; + + // + // Now reassemble s, e and m into a half: + // + + if(e <= 0) + { + if(e < -10) + { + // + // E is less than -10. The absolute value of f is + // less than half_MIN (f may be a small normalized + // float, a denormalized float or a zero). + // + // We convert f to a half zero. + // + + return hdata(s); + } + + // + // E is between -10 and 0. F is a normalized float, + // whose magnitude is less than __half_NRM_MIN. + // + // We convert f to a denormalized half. + // + + m = (m | 0x00800000) >> (1 - e); + + // + // Round to nearest, round "0.5" up. + // + // Rounding may cause the significand to overflow and make + // our number normalized. Because of the way a half's bits + // are laid out, we don't have to treat this case separately; + // the code below will handle it correctly. + // + + if(m & 0x00001000) + m += 0x00002000; + + // + // Assemble the half from s, e (zero) and m. + // + + return hdata(s | (m >> 13)); + } + else if(e == 0xff - (127 - 15)) + { + if(m == 0) + { + // + // F is an infinity; convert f to a half + // infinity with the same sign as f. + // + + return hdata(s | 0x7c00); + } + else + { + // + // F is a NAN; we produce a half NAN that preserves + // the sign bit and the 10 leftmost bits of the + // significand of f, with one exception: If the 10 + // leftmost bits are all zero, the NAN would turn + // into an infinity, so we have to set at least one + // bit in the significand. + // + + m >>= 13; + + return hdata(s | 0x7c00 | m | (m == 0)); + } + } + else + { + // + // E is greater than zero. F is a normalized float. + // We try to convert f to a normalized half. + // + + // + // Round to nearest, round "0.5" up + // + + if(m & 0x00001000) + { + m += 0x00002000; + + if(m & 0x00800000) + { + m = 0; // overflow in significand, + e += 1; // adjust exponent + } + } + + // + // Handle exponent overflow + // + + if (e > 30) + { + overflow(); // Cause a hardware floating point overflow; + + return hdata(s | 0x7c00); + // if this returns, the half becomes an + } // infinity with the same sign as f. + + // + // Assemble the half from s, e and m. + // + + return hdata(s | (e << 10) | (m >> 13)); + } + } + +}//namespace detail +}//namespace glm diff --git a/includes/glm/detail/type_mat2x2.hpp b/includes/glm/detail/type_mat2x2.hpp new file mode 100644 index 0000000..82e9f66 --- /dev/null +++ b/includes/glm/detail/type_mat2x2.hpp @@ -0,0 +1,177 @@ +/// @ref core +/// @file glm/detail/type_mat2x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 2, T, Q> type; + typedef mat<2, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<2, 2, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T const& x1, T const& y1, + T const& x2, T const& y2); + GLM_CTOR_DECL mat( + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template + GLM_CTOR_DECL mat( + U const& x1, V const& y1, + M const& x2, N const& y2); + + template + GLM_CTOR_DECL mat( + vec<2, U, Q> const& v1, + vec<2, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator+=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator*=(mat<2, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator/=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator/=(mat<2, 2, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator++ (); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator*(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator*(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); +} //namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x2.inl" +#endif diff --git a/includes/glm/detail/type_mat2x2.inl b/includes/glm/detail/type_mat2x2.inl new file mode 100644 index 0000000..afb2b31 --- /dev/null +++ b/includes/glm/detail/type_mat2x2.inl @@ -0,0 +1,536 @@ +#include "../matrix.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(T scalar) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(scalar, 0), col_type(0, scalar)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(scalar, 0); + this->value[1] = col_type(0, scalar); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat + ( + T const& x0, T const& y0, + T const& x1, T const& y1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{v0, v1} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; +# endif + } + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat + ( + X1 const& x1, Y1 const& y1, + X2 const& x2, Y2 const& y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(static_cast(x1), value_type(y1)), col_type(static_cast(x2), value_type(y2)) } +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(static_cast(x1), value_type(y1)); + this->value[1] = col_type(static_cast(x2), value_type(y2)); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- mat2x2 matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type const& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator=(mat<2, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(U scalar) + { + this->value[0] += scalar; + this->value[1] += scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(mat<2, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(U scalar) + { + this->value[0] -= scalar; + this->value[1] -= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(mat<2, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(U scalar) + { + this->value[0] *= scalar; + this->value[1] *= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(mat<2, 2, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(U scalar) + { + this->value[0] /= scalar; + this->value[1] /= scalar; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(mat<2, 2, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> mat<2, 2, T, Q>::operator++(int) + { + mat<2, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> mat<2, 2, T, Q>::operator--(int) + { + mat<2, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + scalar - m[0], + scalar - m[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator* + ( + mat<2, 2, T, Q> const& m, + typename mat<2, 2, T, Q>::row_type const& v + ) + { + return vec<2, T, Q>( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator* + ( + typename mat<2, 2, T, Q>::col_type const& v, + mat<2, 2, T, Q> const& m + ) + { + return vec<2, T, Q>( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar) + { + return mat<2, 2, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + mat<2, 2, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat2x3.hpp b/includes/glm/detail/type_mat2x3.hpp new file mode 100644 index 0000000..b65d1c5 --- /dev/null +++ b/includes/glm/detail/type_mat2x3.hpp @@ -0,0 +1,159 @@ +/// @ref core +/// @file glm/detail/type_mat2x3.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 3, T, Q> type; + typedef mat<3, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<2, 3, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, T z0, + T x1, T y1, T z1); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1); + + // -- Conversions -- + + template + GLM_CTOR_DECL mat( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2); + + template + GLM_CTOR_DECL mat( + vec<3, U, Q> const& v1, + vec<3, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator+=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-=(mat<2, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator++ (); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 3, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type operator*(mat<2, 3, T, Q> const& m, typename mat<2, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 3, T, Q>::row_type operator*(typename mat<2, 3, T, Q>::col_type const& v, mat<2, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x3.inl" +#endif diff --git a/includes/glm/detail/type_mat2x3.inl b/includes/glm/detail/type_mat2x3.inl new file mode 100644 index 0000000..345ac8a --- /dev/null +++ b/includes/glm/detail/type_mat2x3.inl @@ -0,0 +1,510 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m.value[0]; + this->value[1] = m.value[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(T scalar) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(scalar, 0, 0), col_type(0, scalar, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(scalar, 0, 0); + this->value[1] = col_type(0, scalar, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat + ( + T x0, T y0, T z0, + T x1, T y1, T z1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1), col_type(x2, y2, z2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1); + this->value[1] = col_type(x2, y2, z2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type & mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type const& mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator=(mat<2, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator+=(mat<2, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(mat<2, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> mat<2, 3, T, Q>::operator++(int) + { + mat<2, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> mat<2, 3, T, Q>::operator--(int) + { + mat<2, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type operator* + ( + mat<2, 3, T, Q> const& m, + typename mat<2, 3, T, Q>::row_type const& v) + { + return typename mat<2, 3, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y, + m[0][2] * v.x + m[1][2] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::row_type operator* + ( + typename mat<2, 3, T, Q>::col_type const& v, + mat<2, 3, T, Q> const& m) + { + return typename mat<2, 3, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + T SrcA00 = m1[0][0]; + T SrcA01 = m1[0][1]; + T SrcA02 = m1[0][2]; + T SrcA10 = m1[1][0]; + T SrcA11 = m1[1][1]; + T SrcA12 = m1[1][2]; + + T SrcB00 = m2[0][0]; + T SrcB01 = m2[0][1]; + T SrcB10 = m2[1][0]; + T SrcB11 = m2[1][1]; + T SrcB20 = m2[2][0]; + T SrcB21 = m2[2][1]; + + mat<3, 3, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar) + { + return mat<2, 3, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m) + { + return mat<2, 3, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat2x4.hpp b/includes/glm/detail/type_mat2x4.hpp new file mode 100644 index 0000000..7ca43e5 --- /dev/null +++ b/includes/glm/detail/type_mat2x4.hpp @@ -0,0 +1,161 @@ +/// @ref core +/// @file glm/detail/type_mat2x4.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<2, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<2, T, Q> row_type; + typedef mat<2, 4, T, Q> type; + typedef mat<4, 2, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[2]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<2, 4, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_CTOR_DECL mat( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2); + + template + GLM_CTOR_DECL mat( + vec<4, U, Q> const& v1, + vec<4, V, Q> const& v2); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator+=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-=(mat<2, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator++ (); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<2, 4, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat2x4.inl" +#endif diff --git a/includes/glm/detail/type_mat2x4.inl b/includes/glm/detail/type_mat2x4.inl new file mode 100644 index 0000000..1c182c9 --- /dev/null +++ b/includes/glm/detail/type_mat2x4.inl @@ -0,0 +1,520 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat + ( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0, w0), col_type(x1, y1, z1, w1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(col_type const& v0, col_type const& v1) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1, w1); + this->value[1] = col_type(x2, y2, z2, w2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type & mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type const& mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator=(mat<2, 4, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(mat<2, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(mat<2, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> & mat<2, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> mat<2, 4, T, Q>::operator++(int) + { + mat<2, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> mat<2, 4, T, Q>::operator--(int) + { + mat<2, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + -m[0], + -m[1]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] + scalar, + m[1] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] - scalar, + m[1] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + m[0] * scalar, + m[1] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v) + { + return typename mat<2, 4, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y, + m[0][1] * v.x + m[1][1] * v.y, + m[0][2] * v.x + m[1][2] * v.y, + m[0][3] * v.x + m[1][3] * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m) + { + return typename mat<2, 4, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + T SrcA00 = m1[0][0]; + T SrcA01 = m1[0][1]; + T SrcA02 = m1[0][2]; + T SrcA03 = m1[0][3]; + T SrcA10 = m1[1][0]; + T SrcA11 = m1[1][1]; + T SrcA12 = m1[1][2]; + T SrcA13 = m1[1][3]; + + T SrcB00 = m2[0][0]; + T SrcB01 = m2[0][1]; + T SrcB10 = m2[1][0]; + T SrcB11 = m2[1][1]; + T SrcB20 = m2[2][0]; + T SrcB21 = m2[2][1]; + T SrcB30 = m2[3][0]; + T SrcB31 = m2[3][1]; + + mat<4, 4, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; + Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; + Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; + Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21; + Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31; + Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31; + Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31; + Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar) + { + return mat<2, 4, T, Q>( + m[0] / scalar, + m[1] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m) + { + return mat<2, 4, T, Q>( + scalar / m[0], + scalar / m[1]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat3x2.hpp b/includes/glm/detail/type_mat3x2.hpp new file mode 100644 index 0000000..0249bef --- /dev/null +++ b/includes/glm/detail/type_mat3x2.hpp @@ -0,0 +1,167 @@ +/// @ref core +/// @file glm/detail/type_mat3x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 2, T, Q> type; + typedef mat<2, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<3, 2, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, + T x1, T y1, + T x2, T y2); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_CTOR_DECL mat( + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3); + + template + GLM_CTOR_DECL mat( + vec<2, V1, Q> const& v1, + vec<2, V2, Q> const& v2, + vec<2, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator+=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-=(mat<3, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator++ (); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); + +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x2.inl" +#endif diff --git a/includes/glm/detail/type_mat3x2.inl b/includes/glm/detail/type_mat3x2.inl new file mode 100644 index 0000000..cd9f46c --- /dev/null +++ b/includes/glm/detail/type_mat3x2.inl @@ -0,0 +1,532 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1), col_type(0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); + this->value[2] = col_type(0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0), col_type(0, s), col_type(0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0); + this->value[1] = col_type(0, s); + this->value[2] = col_type(0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat + ( + T x0, T y0, + T x1, T y1, + T x2, T y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat + ( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type & mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type const& mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator=(mat<3, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(mat<3, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(mat<3, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> & mat<3, 2, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> mat<3, 2, T, Q>::operator++(int) + { + mat<3, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> mat<3, 2, T, Q>::operator--(int) + { + mat<3, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v) + { + return typename mat<3, 2, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m) + { + return typename mat<3, 2, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1], + v.x * m[2][0] + v.y * m[2][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + const T SrcA00 = m1[0][0]; + const T SrcA01 = m1[0][1]; + const T SrcA10 = m1[1][0]; + const T SrcA11 = m1[1][1]; + const T SrcA20 = m1[2][0]; + const T SrcA21 = m1[2][1]; + + const T SrcB00 = m2[0][0]; + const T SrcB01 = m2[0][1]; + const T SrcB02 = m2[0][2]; + const T SrcB10 = m2[1][0]; + const T SrcB11 = m2[1][1]; + const T SrcB12 = m2[1][2]; + + mat<2, 2, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar) + { + return mat<3, 2, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m) + { + return mat<3, 2, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat3x3.hpp b/includes/glm/detail/type_mat3x3.hpp new file mode 100644 index 0000000..e4cbbdc --- /dev/null +++ b/includes/glm/detail/type_mat3x3.hpp @@ -0,0 +1,184 @@ +/// @ref core +/// @file glm/detail/type_mat3x3.hpp + +#pragma once + +#include "type_vec3.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 3, T, Q> type; + typedef mat<3, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<3, 3, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, T z0, + T x1, T y1, T z1, + T x2, T y2, T z2); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_CTOR_DECL mat( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2, + X3 x3, Y3 y3, Z3 z3); + + template + GLM_CTOR_DECL mat( + vec<3, V1, Q> const& v1, + vec<3, V2, Q> const& v2, + vec<3, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator+=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator-=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator*=(mat<3, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator/=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator/=(mat<3, 3, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 3, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x3.inl" +#endif diff --git a/includes/glm/detail/type_mat3x3.inl b/includes/glm/detail/type_mat3x3.inl new file mode 100644 index 0000000..5ccfb22 --- /dev/null +++ b/includes/glm/detail/type_mat3x3.inl @@ -0,0 +1,635 @@ +#include "../matrix.hpp" +#include "../common.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); + this->value[2] = col_type(0, 0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0); + this->value[1] = col_type(0, s, 0); + this->value[2] = col_type(0, 0, s); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat + ( + T x0, T y0, T z0, + T x1, T y1, T z1, + T x2, T y2, T z2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat + ( + X1 x1, Y1 y1, Z1 z1, + X2 x2, Y2 y2, Z2 z2, + X3 x3, Y3 y3, Z3 z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1); + this->value[1] = col_type(x2, y2, z2); + this->value[2] = col_type(x3, y3, z3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type & mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type const& mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator=(mat<3, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(mat<3, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(mat<3, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(U s) + { + col_type sv(s); + this->value[0] *= sv; + this->value[1] *= sv; + this->value[2] *= sv; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(mat<3, 3, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(mat<3, 3, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> mat<3, 3, T, Q>::operator++(int) + { + mat<3, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> mat<3, 3, T, Q>::operator--(int) + { + mat<3, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + scalar - m[0], + scalar - m[1], + scalar - m[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) + { + return typename mat<3, 3, T, Q>::col_type( + m[0] * splatX(v) + m[1] * splatY(v) + m[2] * splatZ(v)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) + { + return typename mat<3, 3, T, Q>::row_type( + dot(m[0], v), + dot(m[1], v), + dot(m[2], v)); + } + + namespace detail + { + template + struct mul3x3 {}; + +#if GLM_CONFIG_SIMD == GLM_ENABLE + template + struct mul3x3 + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + typename mat<4, 4, T, Q>::col_type const SrcA0 = xyzz(m1[0]); + typename mat<4, 4, T, Q>::col_type const SrcA1 = xyzz(m1[1]); + typename mat<4, 4, T, Q>::col_type const SrcA2 = xyzz(m1[2]); + + typename mat<4, 4, T, Q>::col_type const SrcB0 = xyzz(m2[0]); + typename mat<4, 4, T, Q>::col_type const SrcB1 = xyzz(m2[1]); + typename mat<4, 4, T, Q>::col_type const SrcB2 = xyzz(m2[2]); + + mat<3, 3, T, Q> Result; + Result[0] = xyz(glm::fma(SrcA2, splatZ(SrcB0), glm::fma(SrcA1, splatY(SrcB0), SrcA0 * splatX(SrcB0)))); + Result[1] = xyz(glm::fma(SrcA2, splatZ(SrcB1), glm::fma(SrcA1, splatY(SrcB1), SrcA0 * splatX(SrcB1)))); + Result[2] = xyz(glm::fma(SrcA2, splatZ(SrcB2), glm::fma(SrcA1, splatY(SrcB2), SrcA0 * splatX(SrcB2)))); + return mat<3, 3, T, Q>(Result); + } + }; +#endif + template + struct mul3x3 + { + GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + typename mat<3, 3, T, Q>::col_type const& SrcA0 = m1[0]; + typename mat<3, 3, T, Q>::col_type const& SrcA1 = m1[1]; + typename mat<3, 3, T, Q>::col_type const& SrcA2 = m1[2]; + + typename mat<3, 3, T, Q>::col_type const& SrcB0 = m2[0]; + typename mat<3, 3, T, Q>::col_type const& SrcB1 = m2[1]; + typename mat<3, 3, T, Q>::col_type const& SrcB2 = m2[2]; + + mat<3, 3, T, Q> Result; + // note: the following lines are decomposed to have consistent results between simd and non simd code (prevent rounding error because of operation order) + //Result[0] = SrcA2 * SrcB1.z + SrcA1 * SrcB1.y + SrcA0 * SrcB1.x; + //Result[1] = SrcA2 * SrcB1.z + SrcA1 * SrcB1.y + SrcA0 * SrcB1.x; + //Result[2] = SrcA2 * SrcB2.z + SrcA1 * SrcB2.y + SrcA0 * SrcB2.x; + + typename mat<3, 3, T, Q>::col_type tmp; + tmp = SrcA0 * SrcB0.x; + tmp += SrcA1 * SrcB0.y; + tmp += SrcA2 * SrcB0.z; + Result[0] = tmp; + tmp = SrcA0 * SrcB1.x; + tmp += SrcA1 * SrcB1.y; + tmp += SrcA2 * SrcB1.z; + Result[1] = tmp; + tmp = SrcA0 * SrcB2.x; + tmp += SrcA1 * SrcB2.y; + tmp += SrcA2 * SrcB2.z; + Result[2] = tmp; + return Result; + } + }; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return detail::mul3x3::value>::call(m1, m2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar) + { + return mat<3, 3, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m) + { + return mat<3, 3, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + mat<3, 3, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat3x4.hpp b/includes/glm/detail/type_mat3x4.hpp new file mode 100644 index 0000000..f9913d2 --- /dev/null +++ b/includes/glm/detail/type_mat3x4.hpp @@ -0,0 +1,166 @@ +/// @ref core +/// @file glm/detail/type_mat3x4.hpp + +#pragma once + +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<3, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<3, T, Q> row_type; + typedef mat<3, 4, T, Q> type; + typedef mat<4, 3, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[3]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<3, 4, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1, + T x2, T y2, T z2, T w2); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3> + GLM_CTOR_DECL mat( + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2, + X3 x3, Y3 y3, Z3 z3, W3 w3); + + template + GLM_CTOR_DECL mat( + vec<4, V1, Q> const& v1, + vec<4, V2, Q> const& v2, + vec<4, V3, Q> const& v3); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator+=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator-=(mat<3, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<3, 4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type operator*(mat<3, 4, T, Q> const& m, typename mat<3, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<3, 4, T, Q>::row_type operator*(typename mat<3, 4, T, Q>::col_type const& v, mat<3, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat3x4.inl" +#endif diff --git a/includes/glm/detail/type_mat3x4.inl b/includes/glm/detail/type_mat3x4.inl new file mode 100644 index 0000000..209e9d9 --- /dev/null +++ b/includes/glm/detail/type_mat3x4.inl @@ -0,0 +1,578 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); + this->value[2] = col_type(0, 0, s, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat + ( + T x0, T y0, T z0, T w0, + T x1, T y1, T z1, T w1, + T x2, T y2, T z2, T w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, typename Z0, typename W0, + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat + ( + X0 x0, Y0 y0, Z0 z0, W0 w0, + X1 x1, Y1 y1, Z1 z1, W1 w1, + X2 x2, Y2 y2, Z2 z2, W2 w2 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(vec<4, V1, Q> const& v0, vec<4, V2, Q> const& v1, vec<4, V3, Q> const& v2) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type & mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type const& mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator=(mat<3, 4, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(mat<3, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(mat<3, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> & mat<3, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> mat<3, 4, T, Q>::operator++(int) + { + mat<3, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> mat<3, 4, T, Q>::operator--(int) + { + mat<3, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + -m[0], + -m[1], + -m[2]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type operator* + ( + mat<3, 4, T, Q> const& m, + typename mat<3, 4, T, Q>::row_type const& v + ) + { + return typename mat<3, 4, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, + m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z, + m[0][3] * v.x + m[1][3] * v.y + m[2][3] * v.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::row_type operator* + ( + typename mat<3, 4, T, Q>::col_type const& v, + mat<3, 4, T, Q> const& m + ) + { + return typename mat<3, 4, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3], + v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2] + v.w * m[2][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + const T SrcA00 = m1[0][0]; + const T SrcA01 = m1[0][1]; + const T SrcA02 = m1[0][2]; + const T SrcA03 = m1[0][3]; + const T SrcA10 = m1[1][0]; + const T SrcA11 = m1[1][1]; + const T SrcA12 = m1[1][2]; + const T SrcA13 = m1[1][3]; + const T SrcA20 = m1[2][0]; + const T SrcA21 = m1[2][1]; + const T SrcA22 = m1[2][2]; + const T SrcA23 = m1[2][3]; + + const T SrcB00 = m2[0][0]; + const T SrcB01 = m2[0][1]; + const T SrcB02 = m2[0][2]; + const T SrcB10 = m2[1][0]; + const T SrcB11 = m2[1][1]; + const T SrcB12 = m2[1][2]; + const T SrcB20 = m2[2][0]; + const T SrcB21 = m2[2][1]; + const T SrcB22 = m2[2][2]; + const T SrcB30 = m2[3][0]; + const T SrcB31 = m2[3][1]; + const T SrcB32 = m2[3][2]; + + mat<4, 4, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; + Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01 + SrcA23 * SrcB02; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; + Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11 + SrcA23 * SrcB12; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; + Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21 + SrcA23 * SrcB22; + Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31 + SrcA20 * SrcB32; + Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31 + SrcA21 * SrcB32; + Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31 + SrcA22 * SrcB32; + Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31 + SrcA23 * SrcB32; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar) + { + return mat<3, 4, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m) + { + return mat<3, 4, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat4x2.hpp b/includes/glm/detail/type_mat4x2.hpp new file mode 100644 index 0000000..7057d4c --- /dev/null +++ b/includes/glm/detail/type_mat4x2.hpp @@ -0,0 +1,171 @@ +/// @ref core +/// @file glm/detail/type_mat4x2.hpp + +#pragma once + +#include "type_vec2.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 2, T, Q> + { + typedef vec<2, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 2, T, Q> type; + typedef mat<2, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<4, 2, T, P> const& m); + + GLM_CTOR_DECL mat(T scalar); + GLM_CTOR_DECL mat( + T x0, T y0, + T x1, T y1, + T x2, T y2, + T x3, T y3); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_CTOR_DECL mat( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3); + + template + GLM_CTOR_DECL mat( + vec<2, V1, Q> const& v1, + vec<2, V2, Q> const& v2, + vec<2, V3, Q> const& v3, + vec<2, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL mat(mat<4, 2, U, P> const& m); + + GLM_CTOR_DECL mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL mat(mat<4, 3, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 4, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator+=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-=(mat<4, 2, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator++ (); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 2, T, Q> & operator-- (); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x2.inl" +#endif diff --git a/includes/glm/detail/type_mat4x2.inl b/includes/glm/detail/type_mat4x2.inl new file mode 100644 index 0000000..2b9b617 --- /dev/null +++ b/includes/glm/detail/type_mat4x2.inl @@ -0,0 +1,574 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0), col_type(0, 1), col_type(0, 0), col_type(0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0); + this->value[1] = col_type(0, 1); + this->value[2] = col_type(0, 0); + this->value[3] = col_type(0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0), col_type(0, s), col_type(0, 0), col_type(0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0); + this->value[1] = col_type(0, s); + this->value[2] = col_type(0, 0); + this->value[3] = col_type(0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat + ( + T x0, T y0, + T x1, T y1, + T x2, T y2, + T x3, T y3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); + this->value[3] = col_type(x3, y3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, + typename X1, typename Y1, + typename X2, typename Y2, + typename X3, typename Y3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat + ( + X0 x0, Y0 y0, + X1 x1, Y1 y1, + X2 x2, Y2 y2, + X3 x3, Y3 y3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0); + this->value[1] = col_type(x1, y1); + this->value[2] = col_type(x2, y2); + this->value[3] = col_type(x3, y3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2, vec<2, V3, Q> const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v0); + this->value[1] = col_type(v1); + this->value[2] = col_type(v2); + this->value[3] = col_type(v3); +# endif + } + + // -- Conversion -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type & mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type const& mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>& mat<4, 2, T, Q>::operator=(mat<4, 2, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(mat<4, 2, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(mat<4, 2, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> mat<4, 2, T, Q>::operator++(int) + { + mat<4, 2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> mat<4, 2, T, Q>::operator--(int) + { + mat<4, 2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v) + { + return typename mat<4, 2, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m) + { + return typename mat<4, 2, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1], + v.x * m[1][0] + v.y * m[1][1], + v.x * m[2][0] + v.y * m[2][1], + v.x * m[3][0] + v.y * m[3][1]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + T const SrcA00 = m1[0][0]; + T const SrcA01 = m1[0][1]; + T const SrcA10 = m1[1][0]; + T const SrcA11 = m1[1][1]; + T const SrcA20 = m1[2][0]; + T const SrcA21 = m1[2][1]; + T const SrcA30 = m1[3][0]; + T const SrcA31 = m1[3][1]; + + T const SrcB00 = m2[0][0]; + T const SrcB01 = m2[0][1]; + T const SrcB02 = m2[0][2]; + T const SrcB03 = m2[0][3]; + T const SrcB10 = m2[1][0]; + T const SrcB11 = m2[1][1]; + T const SrcB12 = m2[1][2]; + T const SrcB13 = m2[1][3]; + + mat<2, 2, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 2, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar) + { + return mat<4, 2, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m) + { + return mat<4, 2, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat4x3.hpp b/includes/glm/detail/type_mat4x3.hpp new file mode 100644 index 0000000..52a38d8 --- /dev/null +++ b/includes/glm/detail/type_mat4x3.hpp @@ -0,0 +1,171 @@ +/// @ref core +/// @file glm/detail/type_mat4x3.hpp + +#pragma once + +#include "type_vec3.hpp" +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 3, T, Q> + { + typedef vec<3, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 3, T, Q> type; + typedef mat<3, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<4, 3, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T s); + GLM_CTOR_DECL mat( + T const& x0, T const& y0, T const& z0, + T const& x1, T const& y1, T const& z1, + T const& x2, T const& y2, T const& z2, + T const& x3, T const& y3, T const& z3); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3, + typename X4, typename Y4, typename Z4> + GLM_CTOR_DECL mat( + X1 const& x1, Y1 const& y1, Z1 const& z1, + X2 const& x2, Y2 const& y2, Z2 const& z2, + X3 const& x3, Y3 const& y3, Z3 const& z3, + X4 const& x4, Y4 const& y4, Z4 const& z4); + + template + GLM_CTOR_DECL mat( + vec<3, V1, Q> const& v1, + vec<3, V2, Q> const& v2, + vec<3, V3, Q> const& v3, + vec<3, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL mat(mat<4, 3, U, P> const& m); + + GLM_CTOR_DECL mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL mat(mat<4, 4, T, Q> const& x); + GLM_CTOR_DECL mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL mat(mat<3, 4, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator+=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator-=(mat<4, 3, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q> & operator/=(U s); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q>& operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 3, T, Q>& operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(T scalar, mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type operator*(mat<4, 3, T, Q> const& m, typename mat<4, 3, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 3, T, Q>::row_type operator*(typename mat<4, 3, T, Q>::col_type const& v, mat<4, 3, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 3, T, Q> operator/(T scalar, mat<4, 3, T, Q> const& m); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x3.inl" +#endif //GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_mat4x3.inl b/includes/glm/detail/type_mat4x3.inl new file mode 100644 index 0000000..8430bc0 --- /dev/null +++ b/includes/glm/detail/type_mat4x3.inl @@ -0,0 +1,598 @@ +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1), col_type(0, 0, 0)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0); + this->value[1] = col_type(0, 1, 0); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0, 0, 0); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s), col_type(0, 0, 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0); + this->value[1] = col_type(0, s, 0); + this->value[2] = col_type(0, 0, s); + this->value[3] = col_type(0, 0, 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat + ( + T const& x0, T const& y0, T const& z0, + T const& x1, T const& y1, T const& z1, + T const& x2, T const& y2, T const& z2, + T const& x3, T const& y3, T const& z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); + this->value[3] = col_type(x3, y3, z3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + // -- Conversion constructors -- + + template + template< + typename X0, typename Y0, typename Z0, + typename X1, typename Y1, typename Z1, + typename X2, typename Y2, typename Z2, + typename X3, typename Y3, typename Z3> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat + ( + X0 const& x0, Y0 const& y0, Z0 const& z0, + X1 const& x1, Y1 const& y1, Z1 const& z1, + X2 const& x2, Y2 const& y2, Z2 const& z2, + X3 const& x3, Y3 const& y3, Z3 const& z3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0); + this->value[1] = col_type(x1, y1, z1); + this->value[2] = col_type(x2, y2, z2); + this->value[3] = col_type(x3, y3, z3); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3, vec<3, V4, Q> const& v4) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); + this->value[3] = col_type(v4); +# endif + } + + // -- Matrix conversions -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(0, 0, 1); + this->value[3] = col_type(0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(m[3], 0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 1); + this->value[3] = col_type(m[3], 0); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(0); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type & mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type const& mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary updatable operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>& mat<4, 3, T, Q>::operator=(mat<4, 3, U, Q> const& m) + { + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(mat<4, 3, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(mat<4, 3, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> mat<4, 3, T, Q>::operator++(int) + { + mat<4, 3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> mat<4, 3, T, Q>::operator--(int) + { + mat<4, 3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(T scalar, mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type operator* + ( + mat<4, 3, T, Q> const& m, + typename mat<4, 3, T, Q>::row_type const& v) + { + return typename mat<4, 3, T, Q>::col_type( + m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, + m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w, + m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * v.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::row_type operator* + ( + typename mat<4, 3, T, Q>::col_type const& v, + mat<4, 3, T, Q> const& m) + { + return typename mat<4, 3, T, Q>::row_type( + v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], + v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2], + v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2], + v.x * m[3][0] + v.y * m[3][1] + v.z * m[3][2]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + T const SrcA00 = m1[0][0]; + T const SrcA01 = m1[0][1]; + T const SrcA02 = m1[0][2]; + T const SrcA10 = m1[1][0]; + T const SrcA11 = m1[1][1]; + T const SrcA12 = m1[1][2]; + T const SrcA20 = m1[2][0]; + T const SrcA21 = m1[2][1]; + T const SrcA22 = m1[2][2]; + T const SrcA30 = m1[3][0]; + T const SrcA31 = m1[3][1]; + T const SrcA32 = m1[3][2]; + + T const SrcB00 = m2[0][0]; + T const SrcB01 = m2[0][1]; + T const SrcB02 = m2[0][2]; + T const SrcB03 = m2[0][3]; + T const SrcB10 = m2[1][0]; + T const SrcB11 = m2[1][1]; + T const SrcB12 = m2[1][2]; + T const SrcB13 = m2[1][3]; + T const SrcB20 = m2[2][0]; + T const SrcB21 = m2[2][1]; + T const SrcB22 = m2[2][2]; + T const SrcB23 = m2[2][3]; + + mat<3, 3, T, Q> Result; + Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; + Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; + Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02 + SrcA32 * SrcB03; + Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; + Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; + Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12 + SrcA32 * SrcB13; + Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22 + SrcA30 * SrcB23; + Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22 + SrcA31 * SrcB23; + Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22 + SrcA32 * SrcB23; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 3, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], + m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], + m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3], + m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2] + m1[3][2] * m2[3][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T scalar) + { + return mat<4, 3, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q> operator/(T scalar, mat<4, 3, T, Q> const& m) + { + return mat<4, 3, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +} //namespace glm diff --git a/includes/glm/detail/type_mat4x4.hpp b/includes/glm/detail/type_mat4x4.hpp new file mode 100644 index 0000000..ad7597b --- /dev/null +++ b/includes/glm/detail/type_mat4x4.hpp @@ -0,0 +1,189 @@ +/// @ref core +/// @file glm/detail/type_mat4x4.hpp + +#pragma once + +#include "type_vec4.hpp" +#include +#include + +namespace glm +{ + template + struct mat<4, 4, T, Q> + { + typedef vec<4, T, Q> col_type; + typedef vec<4, T, Q> row_type; + typedef mat<4, 4, T, Q> type; + typedef mat<4, 4, T, Q> transpose_type; + typedef T value_type; + + private: + col_type value[4]; + + public: + // -- Accesses -- + + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR col_type & operator[](length_type i) GLM_NOEXCEPT; + GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const GLM_NOEXCEPT; + + // -- Constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR mat() GLM_DEFAULT_CTOR; + template + GLM_CTOR_DECL mat(mat<4, 4, T, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(T s); + GLM_CTOR_DECL mat( + T const& x0, T const& y0, T const& z0, T const& w0, + T const& x1, T const& y1, T const& z1, T const& w1, + T const& x2, T const& y2, T const& z2, T const& w2, + T const& x3, T const& y3, T const& z3, T const& w3); + GLM_CTOR_DECL mat( + col_type const& v0, + col_type const& v1, + col_type const& v2, + col_type const& v3); + + // -- Conversions -- + + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3, + typename X4, typename Y4, typename Z4, typename W4> + GLM_CTOR_DECL mat( + X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, + X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, + X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, + X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4); + + template + GLM_CTOR_DECL mat( + vec<4, V1, Q> const& v1, + vec<4, V2, Q> const& v2, + vec<4, V3, Q> const& v3, + vec<4, V4, Q> const& v4); + + // -- Matrix conversions -- + + template + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 4, U, P> const& m); + + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 3, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<2, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 2, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<3, 4, T, Q> const& x); + GLM_CTOR_DECL GLM_EXPLICIT mat(mat<4, 3, T, Q> const& x); + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator+=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator+=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator-=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator-=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator*=(mat<4, 4, U, Q> const& m); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator/=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator/=(mat<4, 4, U, Q> const& m); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR mat<4, 4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator--(int); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator*(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator*(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(T scalar, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_mat4x4.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_mat4x4.inl b/includes/glm/detail/type_mat4x4.inl new file mode 100644 index 0000000..6e328a9 --- /dev/null +++ b/includes/glm/detail/type_mat4x4.inl @@ -0,0 +1,771 @@ +#include "../matrix.hpp" +#include "../geometric.hpp" + +namespace glm +{ + // -- Constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat() +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST + : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION + this->value[0] = col_type(1, 0, 0, 0); + this->value[1] = col_type(0, 1, 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, T, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(T s) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0), col_type(0, 0, 0, s)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(s, 0, 0, 0); + this->value[1] = col_type(0, s, 0, 0); + this->value[2] = col_type(0, 0, s, 0); + this->value[3] = col_type(0, 0, 0, s); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat + ( + T const& x0, T const& y0, T const& z0, T const& w0, + T const& x1, T const& y1, T const& z1, T const& w1, + T const& x2, T const& y2, T const& z2, T const& w2, + T const& x3, T const& y3, T const& z3, T const& w3 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{ + col_type(x0, y0, z0, w0), + col_type(x1, y1, z1, w1), + col_type(x2, y2, z2, w2), + col_type(x3, y3, z3, w3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x0, y0, z0, w0); + this->value[1] = col_type(x1, y1, z1, w1); + this->value[2] = col_type(x2, y2, z2, w2); + this->value[3] = col_type(x3, y3, z3, w3); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = v0; + this->value[1] = v1; + this->value[2] = v2; + this->value[3] = v3; +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, U, P> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0]); + this->value[1] = col_type(m[1]); + this->value[2] = col_type(m[2]); + this->value[3] = col_type(m[3]); +# endif + } + + // -- Conversions -- + + template + template< + typename X1, typename Y1, typename Z1, typename W1, + typename X2, typename Y2, typename Z2, typename W2, + typename X3, typename Y3, typename Z3, typename W3, + typename X4, typename Y4, typename Z4, typename W4> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat + ( + X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, + X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, + X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, + X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4 + ) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(x1, y1, z1, w1), col_type(x2, y2, z2, w2), col_type(x3, y3, z3, w3), col_type(x4, y4, z4, w4)} +# endif + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 5th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 6th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 7th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 8th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 9th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 10th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 11th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 12th parameter type invalid."); + + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 13th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 14th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 15th parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 16th parameter type invalid."); + +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(x1, y1, z1, w1); + this->value[1] = col_type(x2, y2, z2, w2); + this->value[2] = col_type(x3, y3, z3, w3); + this->value[3] = col_type(x4, y4, z4, w4); +# endif + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2, vec<4, V3, Q> const& v3, vec<4, V4, Q> const& v4) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} +# endif + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); + +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(v1); + this->value[1] = col_type(v2); + this->value[2] = col_type(v3); + this->value[3] = col_type(v4); +# endif + } + + // -- Matrix conversions -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(m[2], 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0, 0); + this->value[1] = col_type(m[1], 0, 0); + this->value[2] = col_type(0, 0, 1, 0); + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0, 0, 0, 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = col_type(0, 0, 0, 1); +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) +# if GLM_HAS_INITIALIZER_LISTS + : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(m[3], 1)} +# endif + { +# if !GLM_HAS_INITIALIZER_LISTS + this->value[0] = col_type(m[0], 0); + this->value[1] = col_type(m[1], 0); + this->value[2] = col_type(m[2], 0); + this->value[3] = col_type(m[3], 1); +# endif + } + + // -- Accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type & mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type const& mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) const GLM_NOEXCEPT + { + GLM_ASSERT_LENGTH(i, this->length()); + return this->value[i]; + } + + // -- Unary arithmetic operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator=(mat<4, 4, U, Q> const& m) + { + //memcpy could be faster + //memcpy(&this->value, &m.value, 16 * sizeof(valType)); + this->value[0] = m[0]; + this->value[1] = m[1]; + this->value[2] = m[2]; + this->value[3] = m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(U s) + { + this->value[0] += s; + this->value[1] += s; + this->value[2] += s; + this->value[3] += s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(mat<4, 4, U, Q> const& m) + { + this->value[0] += m[0]; + this->value[1] += m[1]; + this->value[2] += m[2]; + this->value[3] += m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(U s) + { + this->value[0] -= s; + this->value[1] -= s; + this->value[2] -= s; + this->value[3] -= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(mat<4, 4, U, Q> const& m) + { + this->value[0] -= m[0]; + this->value[1] -= m[1]; + this->value[2] -= m[2]; + this->value[3] -= m[3]; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(U s) + { + this->value[0] *= s; + this->value[1] *= s; + this->value[2] *= s; + this->value[3] *= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(mat<4, 4, U, Q> const& m) + { + return (*this = *this * m); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(U s) + { + this->value[0] /= s; + this->value[1] /= s; + this->value[2] /= s; + this->value[3] /= s; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(mat<4, 4, U, Q> const& m) + { + return *this *= inverse(m); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator++() + { + ++this->value[0]; + ++this->value[1]; + ++this->value[2]; + ++this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator--() + { + --this->value[0]; + --this->value[1]; + --this->value[2]; + --this->value[3]; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> mat<4, 4, T, Q>::operator++(int) + { + mat<4, 4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> mat<4, 4, T, Q>::operator--(int) + { + mat<4, 4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m) + { + return m; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + -m[0], + -m[1], + -m[2], + -m[3]); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0] + scalar, + m[1] + scalar, + m[2] + scalar, + m[3] + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 4, T, Q>( + m1[0] + m2[0], + m1[1] + m2[1], + m1[2] + m2[2], + m1[3] + m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] - scalar, + m[1] - scalar, + m[2] - scalar, + m[3] - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + scalar - m[0], + scalar - m[1], + scalar - m[2], + scalar - m[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return mat<4, 4, T, Q>( + m1[0] - m2[0], + m1[1] - m2[1], + m1[2] - m2[2], + m1[3] - m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0] * scalar, + m[1] * scalar, + m[2] * scalar, + m[3] * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator* + ( + mat<4, 4, T, Q> const& m, + typename mat<4, 4, T, Q>::row_type const& v + ) + { +/* + __m128 v0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 v1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 v2 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v3 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(m[0].data, v0); + __m128 m1 = _mm_mul_ps(m[1].data, v1); + __m128 a0 = _mm_add_ps(m0, m1); + + __m128 m2 = _mm_mul_ps(m[2].data, v2); + __m128 m3 = _mm_mul_ps(m[3].data, v3); + __m128 a1 = _mm_add_ps(m2, m3); + + __m128 a2 = _mm_add_ps(a0, a1); + + return typename mat<4, 4, T, Q>::col_type(a2); +*/ + + typename mat<4, 4, T, Q>::col_type const Mov0(v[0]); + typename mat<4, 4, T, Q>::col_type const Mov1(v[1]); + typename mat<4, 4, T, Q>::col_type const Mul0 = m[0] * Mov0; + typename mat<4, 4, T, Q>::col_type const Mul1 = m[1] * Mov1; + typename mat<4, 4, T, Q>::col_type const Add0 = Mul0 + Mul1; + typename mat<4, 4, T, Q>::col_type const Mov2(v[2]); + typename mat<4, 4, T, Q>::col_type const Mov3(v[3]); + typename mat<4, 4, T, Q>::col_type const Mul2 = m[2] * Mov2; + typename mat<4, 4, T, Q>::col_type const Mul3 = m[3] * Mov3; + typename mat<4, 4, T, Q>::col_type const Add1 = Mul2 + Mul3; + typename mat<4, 4, T, Q>::col_type const Add2 = Add0 + Add1; + return Add2; + +/* + return typename mat<4, 4, T, Q>::col_type( + m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0] * v[3], + m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1] * v[3], + m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2] * v[3], + m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3] * v[3]); +*/ + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator* + ( + typename mat<4, 4, T, Q>::col_type const& v, + mat<4, 4, T, Q> const& m + ) + { + return typename mat<4, 4, T, Q>::row_type( + glm::dot(m[0], v), + glm::dot(m[1], v), + glm::dot(m[2], v), + glm::dot(m[3], v)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) + { + return mat<2, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) + { + return mat<3, 4, T, Q>( + m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], + m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], + m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], + m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], + m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], + m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], + m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], + m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3], + m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], + m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], + m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], + m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2] + m1[3][3] * m2[2][3]); + } + + namespace detail + { + template + struct mul4x4 {}; + + template + struct mul4x4 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + typename mat<4, 4, T, Q>::col_type const SrcA0 = m1[0]; + typename mat<4, 4, T, Q>::col_type const SrcA1 = m1[1]; + typename mat<4, 4, T, Q>::col_type const SrcA2 = m1[2]; + typename mat<4, 4, T, Q>::col_type const SrcA3 = m1[3]; + + typename mat<4, 4, T, Q>::col_type const SrcB0 = m2[0]; + typename mat<4, 4, T, Q>::col_type const SrcB1 = m2[1]; + typename mat<4, 4, T, Q>::col_type const SrcB2 = m2[2]; + typename mat<4, 4, T, Q>::col_type const SrcB3 = m2[3]; + + mat<4, 4, T, Q> Result; + Result[0] = glm::fma(SrcA3, splatW(SrcB0), glm::fma(SrcA2, splatZ(SrcB0), glm::fma(SrcA1, splatY(SrcB0), SrcA0 * splatX(SrcB0)))); + Result[1] = glm::fma(SrcA3, splatW(SrcB1), glm::fma(SrcA2, splatZ(SrcB1), glm::fma(SrcA1, splatY(SrcB1), SrcA0 * splatX(SrcB1)))); + Result[2] = glm::fma(SrcA3, splatW(SrcB2), glm::fma(SrcA2, splatZ(SrcB2), glm::fma(SrcA1, splatY(SrcB2), SrcA0 * splatX(SrcB2)))); + Result[3] = glm::fma(SrcA3, splatW(SrcB3), glm::fma(SrcA2, splatZ(SrcB3), glm::fma(SrcA1, splatY(SrcB3), SrcA0 * splatX(SrcB3)))); + return mat < 4, 4, T, Q > (Result); + } + }; + + template + struct mul4x4 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + typename mat<4, 4, T, Q>::col_type const& SrcA0 = m1[0]; + typename mat<4, 4, T, Q>::col_type const& SrcA1 = m1[1]; + typename mat<4, 4, T, Q>::col_type const& SrcA2 = m1[2]; + typename mat<4, 4, T, Q>::col_type const& SrcA3 = m1[3]; + + typename mat<4, 4, T, Q>::col_type const& SrcB0 = m2[0]; + typename mat<4, 4, T, Q>::col_type const& SrcB1 = m2[1]; + typename mat<4, 4, T, Q>::col_type const& SrcB2 = m2[2]; + typename mat<4, 4, T, Q>::col_type const& SrcB3 = m2[3]; + + mat<4, 4, T, Q> Result; + // note: the following lines are decomposed to have consistent results between simd and non simd code (prevent rounding error because of operation order) + //Result[0] = SrcA3 * SrcB0.w + SrcA2 * SrcB0.z + SrcA1 * SrcB0.y + SrcA0 * SrcB0.x; + //Result[1] = SrcA3 * SrcB1.w + SrcA2 * SrcB1.z + SrcA1 * SrcB1.y + SrcA0 * SrcB1.x; + //Result[2] = SrcA3 * SrcB2.w + SrcA2 * SrcB2.z + SrcA1 * SrcB2.y + SrcA0 * SrcB2.x; + //Result[3] = SrcA3 * SrcB3.w + SrcA2 * SrcB3.z + SrcA1 * SrcB3.y + SrcA0 * SrcB3.x; + + typename mat<4, 4, T, Q>::col_type tmp; + tmp = SrcA0 * SrcB0.x; + tmp += SrcA1 * SrcB0.y; + tmp += SrcA2 * SrcB0.z; + tmp += SrcA3 * SrcB0.w; + Result[0] = tmp; + tmp = SrcA0 * SrcB1.x; + tmp += SrcA1 * SrcB1.y; + tmp += SrcA2 * SrcB1.z; + tmp += SrcA3 * SrcB1.w; + Result[1] = tmp; + tmp = SrcA0 * SrcB2.x; + tmp += SrcA1 * SrcB2.y; + tmp += SrcA2 * SrcB2.z; + tmp += SrcA3 * SrcB2.w; + Result[2] = tmp; + tmp = SrcA0 * SrcB3.x; + tmp += SrcA1 * SrcB3.y; + tmp += SrcA2 * SrcB3.z; + tmp += SrcA3 * SrcB3.w; + Result[3] = tmp; + + return Result; + } + }; + } + + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return detail::mul4x4::value>::call(m1, m2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T scalar) + { + return mat<4, 4, T, Q>( + m[0] / scalar, + m[1] / scalar, + m[2] / scalar, + m[3] / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(T scalar, mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + scalar / m[0], + scalar / m[1], + scalar / m[2], + scalar / m[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v) + { + return inverse(m) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m) + { + return v * inverse(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + mat<4, 4, T, Q> m1_copy(m1); + return m1_copy /= m2; + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) + { + return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_mat4x4_simd.inl" +#endif diff --git a/includes/glm/detail/type_mat4x4_simd.inl b/includes/glm/detail/type_mat4x4_simd.inl new file mode 100644 index 0000000..fb3a16f --- /dev/null +++ b/includes/glm/detail/type_mat4x4_simd.inl @@ -0,0 +1,6 @@ +/// @ref core + +namespace glm +{ + +}//namespace glm diff --git a/includes/glm/detail/type_quat.hpp b/includes/glm/detail/type_quat.hpp new file mode 100644 index 0000000..1b41e15 --- /dev/null +++ b/includes/glm/detail/type_quat.hpp @@ -0,0 +1,193 @@ +/// @ref core +/// @file glm/detail/type_quat.hpp + +#pragma once + +// Dependency: +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat4x4.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" +#include "../ext/vector_relational.hpp" +#include "../ext/quaternion_relational.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/matrix_transform.hpp" + +namespace glm +{ +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + + template + struct qua + { + // -- Implementation detail -- + + typedef qua type; + typedef T value_type; + + // -- Data -- + +# if GLM_LANG & GLM_LANG_CXXMS_FLAG + union + { +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + struct { T w, x, y, z; }; +# else + struct { T x, y, z, w; }; +# endif + + typename detail::storage<4, T, detail::is_aligned::value>::type data; + }; +# else +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + T w, x, y, z; +# else + T x, y, z, w; +# endif +# endif + + // -- Component accesses -- + + typedef length_t length_type; + + /// Return the count of components of a quaternion + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR qua() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR qua(qua const& q) GLM_DEFAULT; + template + GLM_CTOR_DECL qua(qua const& q); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL qua(T s, vec<3, T, Q> const& v); + +# ifdef GLM_FORCE_QUAT_DATA_XYZW + GLM_CTOR_DECL qua(T x, T y, T z, T w); +# else + GLM_CTOR_DECL qua(T w, T x, T y, T z); +# endif + + GLM_FUNC_DECL static GLM_CONSTEXPR qua wxyz(T w, T x, T y, T z); + + // -- Conversion constructors -- + + template + GLM_CTOR_DECL GLM_EXPLICIT qua(qua const& q); + + /// Explicit conversion operators +# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + GLM_FUNC_DECL explicit operator mat<3, 3, T, Q>() const; + GLM_FUNC_DECL explicit operator mat<4, 4, T, Q>() const; +# endif + + /// Create a quaternion from two normalized axis + /// + /// @param u A first normalized axis + /// @param v A second normalized axis + /// @see gtc_quaternion + /// @see http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors + GLM_FUNC_DISCARD_DECL qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v); + + /// Build a quaternion from euler angles (pitch, yaw, roll), in radians. + GLM_CTOR_DECL GLM_EXPLICIT qua(vec<3, T, Q> const& eulerAngles); + GLM_CTOR_DECL GLM_EXPLICIT qua(mat<3, 3, T, Q> const& q); + GLM_CTOR_DECL GLM_EXPLICIT qua(mat<4, 4, T, Q> const& q); + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator=(qua const& q); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator+=(qua const& q); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator-=(qua const& q); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator*=(qua const& q); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator*=(U s); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR qua& operator/=(U s); + }; + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, qua const& p); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, T const& s); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(T const& s, qua const& q); + + template + GLM_FUNC_DECL GLM_CONSTEXPR qua operator/(qua const& q, T const& s); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2); +} //namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_quat.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_quat.inl b/includes/glm/detail/type_quat.inl new file mode 100644 index 0000000..6a8f987 --- /dev/null +++ b/includes/glm/detail/type_quat.inl @@ -0,0 +1,424 @@ +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include "../ext/quaternion_common.hpp" +#include "../ext/quaternion_geometric.hpp" +#include + +namespace glm{ +namespace detail +{ + template + struct genTypeTrait > + { + static const genTypeEnum GENTYPE = GENTYPE_QUAT; + }; + + template + struct compute_dot, T, Aligned> + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(qua const& a, qua const& b) + { + vec<4, T, Q> tmp(a.w * b.w, a.x * b.x, a.y * b.y, a.z * b.z); + return (tmp.x + tmp.y) + (tmp.z + tmp.w); + } + }; + + template + struct compute_quat_add + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) + { + return qua::wxyz(q.w + p.w, q.x + p.x, q.y + p.y, q.z + p.z); + } + }; + + template + struct compute_quat_sub + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) + { + return qua::wxyz(q.w - p.w, q.x - p.x, q.y - p.y, q.z - p.z); + } + }; + + template + struct compute_quat_mul_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) + { + return qua::wxyz(q.w * s, q.x * s, q.y * s, q.z * s); + } + }; + + template + struct compute_quat_div_scalar + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) + { + return qua::wxyz(q.w / s, q.x / s, q.y / s, q.z / s); + } + }; + + template + struct compute_quat_mul_vec4 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(qua const& q, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); + } + }; +}//namespace detail + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & qua::operator[](typename qua::length_type i) + { + GLM_ASSERT_LENGTH(i, this->length()); +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + return (&w)[i]; +# else + return (&x)[i]; +# endif + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& qua::operator[](typename qua::length_type i) const + { + GLM_ASSERT_LENGTH(i, this->length()); +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + return (&w)[i]; +# else + return (&x)[i]; +# endif + } + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR qua::qua() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(1), x(0), y(0), z(0) +# else + : x(0), y(0), z(0), w(1) +# endif +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(q.w), x(q.x), y(q.y), z(q.z) +# else + : x(q.x), y(q.y), z(q.z), w(q.w) +# endif + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(q.w), x(q.x), y(q.y), z(q.z) +# else + : x(q.x), y(q.y), z(q.z), w(q.w) +# endif + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T s, vec<3, T, Q> const& v) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(s), x(v.x), y(v.y), z(v.z) +# else + : x(v.x), y(v.y), z(v.z), w(s) +# endif + {} + + template +# ifdef GLM_FORCE_QUAT_DATA_XYZW + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _x, T _y, T _z, T _w) +# else + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _w, T _x, T _y, T _z) +# endif +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(_w), x(_x), y(_y), z(_z) +# else + : x(_x), y(_y), z(_z), w(_w) +# endif + {} + + template + GLM_CONSTEXPR qua qua::wxyz(T w, T x, T y, T z) { +# ifdef GLM_FORCE_QUAT_DATA_XYZW + return qua(x, y, z, w); +# else + return qua(w, x, y, z); +# endif + } + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + : w(static_cast(q.w)), x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)) +# else + : x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)), w(static_cast(q.w)) +# endif + {} + + //template + //GLM_FUNC_QUALIFIER qua::qua + //( + // valType const& pitch, + // valType const& yaw, + // valType const& roll + //) + //{ + // vec<3, valType> eulerAngle(pitch * valType(0.5), yaw * valType(0.5), roll * valType(0.5)); + // vec<3, valType> c = glm::cos(eulerAngle * valType(0.5)); + // vec<3, valType> s = glm::sin(eulerAngle * valType(0.5)); + // + // this->w = c.x * c.y * c.z + s.x * s.y * s.z; + // this->x = s.x * c.y * c.z - c.x * s.y * s.z; + // this->y = c.x * s.y * c.z + s.x * c.y * s.z; + // this->z = c.x * c.y * s.z - s.x * s.y * c.z; + //} + + template + GLM_FUNC_QUALIFIER qua::qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v) + { + T norm_u_norm_v = sqrt(dot(u, u) * dot(v, v)); + T real_part = norm_u_norm_v + dot(u, v); + vec<3, T, Q> t; + + if(real_part < static_cast(1.e-6f) * norm_u_norm_v) + { + // If u and v are exactly opposite, rotate 180 degrees + // around an arbitrary orthogonal axis. Axis normalisation + // can happen later, when we normalise the quaternion. + real_part = static_cast(0); + t = abs(u.x) > abs(u.z) ? vec<3, T, Q>(-u.y, u.x, static_cast(0)) : vec<3, T, Q>(static_cast(0), -u.z, u.y); + } + else + { + // Otherwise, build quaternion the standard way. + t = cross(u, v); + } + + *this = normalize(qua::wxyz(real_part, t.x, t.y, t.z)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(vec<3, T, Q> const& eulerAngle) + { + vec<3, T, Q> c = glm::cos(eulerAngle * T(0.5)); + vec<3, T, Q> s = glm::sin(eulerAngle * T(0.5)); + + this->w = c.x * c.y * c.z + s.x * s.y * s.z; + this->x = s.x * c.y * c.z - c.x * s.y * s.z; + this->y = c.x * s.y * c.z + s.x * c.y * s.z; + this->z = c.x * c.y * s.z - s.x * s.y * c.z; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(mat<3, 3, T, Q> const& m) + { + *this = quat_cast(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(mat<4, 4, T, Q> const& m) + { + *this = quat_cast(m); + } + +# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + template + GLM_FUNC_QUALIFIER qua::operator mat<3, 3, T, Q>() const + { + return mat3_cast(*this); + } + + template + GLM_FUNC_QUALIFIER qua::operator mat<4, 4, T, Q>() const + { + return mat4_cast(*this); + } +# endif//GLM_HAS_EXPLICIT_CONVERSION_OPERATORS + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) + { + this->w = q.w; + this->x = q.x; + this->y = q.y; + this->z = q.z; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) + { + this->w = static_cast(q.w); + this->x = static_cast(q.x); + this->y = static_cast(q.y); + this->z = static_cast(q.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator+=(qua const& q) + { + return (*this = detail::compute_quat_add::value>::call(*this, qua(q))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator-=(qua const& q) + { + return (*this = detail::compute_quat_sub::value>::call(*this, qua(q))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(qua const& r) + { + qua const p(*this); + qua const q(r); + + this->w = p.w * q.w - p.x * q.x - p.y * q.y - p.z * q.z; + this->x = p.w * q.x + p.x * q.w + p.y * q.z - p.z * q.y; + this->y = p.w * q.y + p.y * q.w + p.z * q.x - p.x * q.z; + this->z = p.w * q.z + p.z * q.w + p.x * q.y - p.y * q.x; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(U s) + { + return (*this = detail::compute_quat_mul_scalar::value>::call(*this, static_cast(s))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator/=(U s) + { + return (*this = detail::compute_quat_div_scalar::value>::call(*this, static_cast(s))); + } + + // -- Unary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q) + { + return q; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q) + { + return qua::wxyz(-q.w, -q.x, -q.y, -q.z); + } + + // -- Binary operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q, qua const& p) + { + return qua(q) += p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q, qua const& p) + { + return qua(q) -= p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, qua const& p) + { + return qua(q) *= p; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v) + { + vec<3, T, Q> const QuatVector(q.x, q.y, q.z); + vec<3, T, Q> const uv(glm::cross(QuatVector, v)); + vec<3, T, Q> const uuv(glm::cross(QuatVector, uv)); + + return v + ((uv * q.w) + uuv) * static_cast(2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v) + { + return detail::compute_quat_mul_vec4::value>::call(q, v); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, T const& s) + { + return qua::wxyz( + q.w * s, q.x * s, q.y * s, q.z * s); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(T const& s, qua const& q) + { + return q * s; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator/(qua const& q, T const& s) + { + return qua::wxyz( + q.w / s, q.x / s, q.y / s, q.z / s); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2) + { + return q1.x == q2.x && q1.y == q2.y && q1.z == q2.z && q1.w == q2.w; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2) + { + return q1.x != q2.x || q1.y != q2.y || q1.z != q2.z || q1.w != q2.w; + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_quat_simd.inl" +#endif + diff --git a/includes/glm/detail/type_quat_simd.inl b/includes/glm/detail/type_quat_simd.inl new file mode 100644 index 0000000..fa6da19 --- /dev/null +++ b/includes/glm/detail/type_quat_simd.inl @@ -0,0 +1,208 @@ +/// @ref core + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ +/* + template + struct compute_quat_mul + { + static qua call(qua const& q1, qua const& q2) + { + // SSE2 STATS: 11 shuffle, 8 mul, 8 add + // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps + + __m128 const mul0 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(0, 1, 2, 3))); + __m128 const mul1 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(1, 0, 3, 2))); + __m128 const mul2 = _mm_mul_ps(q1.data, _mm_shuffle_ps(q2.data, q2.data, _MM_SHUFFLE(2, 3, 0, 1))); + __m128 const mul3 = _mm_mul_ps(q1.data, q2.data); + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); + __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff); + __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff); + __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff); +# else + __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f)); + __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4)); + __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); + + __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f)); + __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5)); + __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1)); + + __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f)); + __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6)); + __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1)); + + __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f)); + __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7)); + __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1)); + #endif + + // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than + // the final code below. I'll keep this here for reference - maybe somebody else can do something better... + // + //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0)); + //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0)); + // + //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0)); + + qua Result; + _mm_store_ss(&Result.x, add4); + _mm_store_ss(&Result.y, add5); + _mm_store_ss(&Result.z, add6); + _mm_store_ss(&Result.w, add7); + return Result; + } + }; +*/ + + template + struct compute_quat_add + { + static qua call(qua const& q, qua const& p) + { + qua Result; + Result.data = _mm_add_ps(q.data, p.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_add + { + static qua call(qua const& a, qua const& b) + { + qua Result; + Result.data = _mm256_add_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_quat_sub + { + static qua call(qua const& q, qua const& p) + { + qua Result; + Result.data = _mm_sub_ps(q.data, p.data); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_sub + { + static qua call(qua const& a, qua const& b) + { + qua Result; + Result.data = _mm256_sub_pd(a.data, b.data); + return Result; + } + }; +# endif + + template + struct compute_quat_mul_scalar + { + static qua call(qua const& q, float s) + { + vec<4, float, Q> Result; + Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s)); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_mul_scalar + { + static qua call(qua const& q, double s) + { + qua Result; + Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s)); + return Result; + } + }; +# endif + + template + struct compute_quat_div_scalar + { + static qua call(qua const& q, float s) + { + vec<4, float, Q> Result; + Result.data = _mm_div_ps(q.data, _mm_set_ps1(s)); + return Result; + } + }; + +# if GLM_ARCH & GLM_ARCH_AVX_BIT + template + struct compute_quat_div_scalar + { + static qua call(qua const& q, double s) + { + qua Result; + Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s)); + return Result; + } + }; +# endif + + template + struct compute_quat_mul_vec4 + { + static vec<4, float, Q> call(qua const& q, vec<4, float, Q> const& v) + { +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 1, 3, 2)); + __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(0, 2, 1, 3)); + __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); + + __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); + __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); + + __m128 const two = _mm_set1_ps(2.0f); + uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); + uuv = _mm_mul_ps(uuv, two); + + vec<4, float, Q> Result; + Result.data = _mm_add_ps(v.data, _mm_add_ps(uv, uuv)); + return Result; +# else + __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); + + __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); + __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); + __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); + __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); + + __m128 const two = _mm_set1_ps(2.0f); + uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); + uuv = _mm_mul_ps(uuv, two); + + vec<4, float, Q> Result; + Result.data = _mm_add_ps(v.data, _mm_add_ps(uv, uuv)); + return Result; +# endif + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/detail/type_vec1.hpp b/includes/glm/detail/type_vec1.hpp new file mode 100644 index 0000000..0cc7b5d --- /dev/null +++ b/includes/glm/detail/type_vec1.hpp @@ -0,0 +1,308 @@ +/// @ref core +/// @file glm/detail/type_vec1.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<1, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<1, T, Q> type; + typedef vec<1, bool, Q> bool_type; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x; +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + T x; + T r; + T s; + + typename detail::storage<1, T, detail::is_aligned::value>::type data; +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + _GLM_SWIZZLE1_2_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_2_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_2_MEMBERS(T, Q, s) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_3_MEMBERS(T, Q, s) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, x) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, r) + _GLM_SWIZZLE1_4_MEMBERS(T, Q, s) +# endif +*/ + }; +# else + union {T x, r, s;}; +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC1(T, Q) +# endif +*/ +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 1;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_CTOR_DECL vec(vec<1, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL explicit vec(T scalar); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<2, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<3, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<1, U, P> const& v); + + // -- Swizzle constructors -- +/* +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec(detail::_swizzle<1, T, Q, E0, -1,-2,-3> const& that) + { + *this = that(); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +*/ + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec const& v) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(vec<1, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(vec<1, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec1.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_vec1.inl b/includes/glm/detail/type_vec1.inl new file mode 100644 index 0000000..18411e7 --- /dev/null +++ b/includes/glm/detail/type_vec1.inl @@ -0,0 +1,553 @@ +/// @ref core + +#include "./compute_vector_relational.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, Q> const& v) + : x(v.x) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, P> const& v) + : x(v.x) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(T scalar) + : x(scalar) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<2, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) + { + return x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) const + { + return x; + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, T, Q> const& v) + { + this->x = v.x; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, U, Q> const& v) + { + this->x = static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(U scalar) + { + this->x += static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(vec<1, U, Q> const& v) + { + this->x += static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(U scalar) + { + this->x -= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(vec<1, U, Q> const& v) + { + this->x -= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(U scalar) + { + this->x *= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(vec<1, U, Q> const& v) + { + this->x *= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(U scalar) + { + this->x /= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(vec<1, U, Q> const& v) + { + this->x /= static_cast(v.x); + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator++() + { + ++this->x; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator--() + { + --this->x; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator++(int) + { + vec<1, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator--(int) + { + vec<1, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(U scalar) + { + this->x %= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(vec<1, U, Q> const& v) + { + this->x %= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(U scalar) + { + this->x &= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(vec<1, U, Q> const& v) + { + this->x &= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(U scalar) + { + this->x |= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(vec<1, U, Q> const& v) + { + this->x |= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(U scalar) + { + this->x ^= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(vec<1, U, Q> const& v) + { + this->x ^= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(U scalar) + { + this->x <<= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + this->x <<= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(U scalar) + { + this->x >>= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + this->x >>= static_cast(v.x); + return *this; + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + -v.x); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar + v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x + v2.x); + } + + //operator- + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar - v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x - v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar * v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x * v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar / v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x / v2.x); + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x % scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar % v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x % v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x & scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar & v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x & v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x | scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar | v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x | v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + v.x ^ scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + scalar ^ v.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + v1.x ^ v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + static_cast(v.x << scalar)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + static_cast(scalar << v.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + static_cast(v1.x << v2.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar) + { + return vec<1, T, Q>( + static_cast(v.x >> scalar)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + static_cast(scalar >> v.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<1, T, Q>( + static_cast(v1.x >> v2.x)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v) + { + return vec<1, T, Q>( + ~v.x); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return detail::compute_equal::is_iec559>::call(v1.x, v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) + { + return vec<1, bool, Q>(v1.x && v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) + { + return vec<1, bool, Q>(v1.x || v2.x); + } +}//namespace glm diff --git a/includes/glm/detail/type_vec2.hpp b/includes/glm/detail/type_vec2.hpp new file mode 100644 index 0000000..66c6137 --- /dev/null +++ b/includes/glm/detail/type_vec2.hpp @@ -0,0 +1,406 @@ +/// @ref core +/// @file glm/detail/type_vec2.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<2, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<2, T, Q> type; + typedef vec<2, bool, Q> bool_type; + enum is_aligned + { + value = false + }; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, Q, x, y) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct{ T x, y; }; + struct{ T r, g; }; + struct{ T s, t; }; + + typename detail::storage<2, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE2_2_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_2_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_2_MEMBERS(T, Q, s, t) + GLM_SWIZZLE2_3_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_3_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_3_MEMBERS(T, Q, s, t) + GLM_SWIZZLE2_4_MEMBERS(T, Q, x, y) + GLM_SWIZZLE2_4_MEMBERS(T, Q, r, g) + GLM_SWIZZLE2_4_MEMBERS(T, Q, s, t) +# endif + }; +# else + union {T x, r, s;}; + union {T y, g, t;}; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) +# endif//GLM_CONFIG_SWIZZLE +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} + + GLM_FUNC_DECL GLM_CONSTEXPR T& operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_CTOR_DECL vec(vec<2, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL explicit vec(T scalar); + GLM_CTOR_DECL vec(T x, T y); + + // -- Conversion constructors -- + + template + GLM_CTOR_DECL explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A x, B y); + template + GLM_CTOR_DECL vec(vec<1, A, Q> const& x, B y); + template + GLM_CTOR_DECL vec(A x, vec<1, B, Q> const& y); + template + GLM_CTOR_DECL vec(vec<1, A, Q> const& x, vec<1, B, Q> const& y); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<3, U, P> const& v); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<2, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DISCARD_DECL vec(detail::_swizzle<2, T, Q, E0, E1,-1,-2> const& that) + { + *this = that(); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec const& v) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<2, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<2, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<2, U, Q> const& v); + }; + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec2.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_vec2.inl b/includes/glm/detail/type_vec2.inl new file mode 100644 index 0000000..e840899 --- /dev/null +++ b/includes/glm/detail/type_vec2.inl @@ -0,0 +1,915 @@ +/// @ref core + +#include "./compute_vector_relational.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, Q> const& v) + : x(v.x), y(v.y) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, P> const& v) + : x(v.x), y(v.y) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T scalar) + : x(scalar), y(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T _x, T _y) + : x(_x), y(_y) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, B _y) + : x(static_cast(_x)) + , y(static_cast(_y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, B _y) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, vec<1, B, Q> const& _y) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, vec<1, B, Q> const& _y) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) + { + GLM_ASSERT_LENGTH(i, this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) const + { + GLM_ASSERT_LENGTH(i, this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(U scalar) + { + this->x += static_cast(scalar); + this->y += static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<1, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<2, U, Q> const& v) + { + this->x += static_cast(v.x); + this->y += static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(U scalar) + { + this->x -= static_cast(scalar); + this->y -= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<1, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<2, U, Q> const& v) + { + this->x -= static_cast(v.x); + this->y -= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(U scalar) + { + this->x *= static_cast(scalar); + this->y *= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<1, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<2, U, Q> const& v) + { + this->x *= static_cast(v.x); + this->y *= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(U scalar) + { + this->x /= static_cast(scalar); + this->y /= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<1, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<2, U, Q> const& v) + { + this->x /= static_cast(v.x); + this->y /= static_cast(v.y); + return *this; + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator++() + { + ++this->x; + ++this->y; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator--() + { + --this->x; + --this->y; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator++(int) + { + vec<2, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator--(int) + { + vec<2, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(U scalar) + { + this->x %= static_cast(scalar); + this->y %= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<1, U, Q> const& v) + { + this->x %= static_cast(v.x); + this->y %= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<2, U, Q> const& v) + { + this->x %= static_cast(v.x); + this->y %= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(U scalar) + { + this->x &= static_cast(scalar); + this->y &= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<1, U, Q> const& v) + { + this->x &= static_cast(v.x); + this->y &= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<2, U, Q> const& v) + { + this->x &= static_cast(v.x); + this->y &= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(U scalar) + { + this->x |= static_cast(scalar); + this->y |= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<1, U, Q> const& v) + { + this->x |= static_cast(v.x); + this->y |= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<2, U, Q> const& v) + { + this->x |= static_cast(v.x); + this->y |= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(U scalar) + { + this->x ^= static_cast(scalar); + this->y ^= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<1, U, Q> const& v) + { + this->x ^= static_cast(v.x); + this->y ^= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<2, U, Q> const& v) + { + this->x ^= static_cast(v.x); + this->y ^= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(U scalar) + { + this->x <<= static_cast(scalar); + this->y <<= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<2, U, Q> const& v) + { + this->x <<= static_cast(v.x); + this->y <<= static_cast(v.y); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(U scalar) + { + this->x >>= static_cast(scalar); + this->y >>= static_cast(scalar); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.x); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<2, U, Q> const& v) + { + this->x >>= static_cast(v.x); + this->y >>= static_cast(v.y); + return *this; + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + -v.x, + -v.y); + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x + scalar, + v.y + scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.y + v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar + v.x, + scalar + v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.x + v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x + v2.x, + v1.y + v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x - scalar, + v.y - scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.y - v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar - v.x, + scalar - v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.x - v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x - v2.x, + v1.y - v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x * scalar, + v.y * scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.y * v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar * v.x, + scalar * v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.x * v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x * v2.x, + v1.y * v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x / scalar, + v.y / scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.y / v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar / v.x, + scalar / v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.x / v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x / v2.x, + v1.y / v2.y); + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x % scalar, + v.y % scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.y % v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar % v.x, + scalar % v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.x % v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x % v2.x, + v1.y % v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x & scalar, + v.y & scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.y & v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar & v.x, + scalar & v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.x & v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x & v2.x, + v1.y & v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x | scalar, + v.y | scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.y | v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar | v.x, + scalar | v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.x | v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x | v2.x, + v1.y | v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x ^ scalar, + v.y ^ scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.y ^ v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar ^ v.x, + scalar ^ v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.x ^ v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x ^ v2.x, + v1.y ^ v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x << scalar, + v.y << scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.y << v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar << v.x, + scalar << v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.x << v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x << v2.x, + v1.y << v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar) + { + return vec<2, T, Q>( + v.x >> scalar, + v.y >> scalar); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.y >> v2.x); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + scalar >> v.x, + scalar >> v.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.x >> v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return vec<2, T, Q>( + v1.x >> v2.x, + v1.y >> v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v) + { + return vec<2, T, Q>( + ~v.x, + ~v.y); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return + detail::compute_equal::is_iec559>::call(v1.x, v2.x) && + detail::compute_equal::is_iec559>::call(v1.y, v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) + { + return vec<2, bool, Q>(v1.x && v2.x, v1.y && v2.y); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) + { + return vec<2, bool, Q>(v1.x || v2.x, v1.y || v2.y); + } +}//namespace glm diff --git a/includes/glm/detail/type_vec3.hpp b/includes/glm/detail/type_vec3.hpp new file mode 100644 index 0000000..90de2f8 --- /dev/null +++ b/includes/glm/detail/type_vec3.hpp @@ -0,0 +1,447 @@ +/// @ref core +/// @file glm/detail/type_vec3.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<3, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<3, T, Q> type; + typedef vec<3, bool, Q> bool_type; + + enum is_aligned + { + value = detail::is_aligned::value + }; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# pragma clang diagnostic ignored "-Wpadded" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# pragma warning(disable: 4324) // structure was padded due to alignment specifier +# endif +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y, z; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, Q, x, y, z) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct{ T x, y, z; }; + struct{ T r, g, b; }; + struct{ T s, t, p; }; + + typename detail::storage<3, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE3_2_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_2_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_2_MEMBERS(T, Q, s, t, p) + GLM_SWIZZLE3_3_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_3_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_3_MEMBERS(T, Q, s, t, p) + GLM_SWIZZLE3_4_MEMBERS(T, Q, x, y, z) + GLM_SWIZZLE3_4_MEMBERS(T, Q, r, g, b) + GLM_SWIZZLE3_4_MEMBERS(T, Q, s, t, p) +# endif + }; +# else + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) +# endif//GLM_CONFIG_SWIZZLE +# endif//GLM_LANG + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + /// Return the count of components of the vector + typedef length_t length_type; + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 3;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; + template + GLM_CTOR_DECL vec(vec<3, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL explicit vec(T scalar); + GLM_CTOR_DECL vec(T a, T b, T c); + + // -- Conversion scalar constructors -- + + template + GLM_CTOR_DECL explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(X x, Y y, Z z); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, Z _z); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, Z _z); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z); + template + GLM_CTOR_DECL vec(X _x, Y _y, vec<1, Z, Q> const& _z); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, B _z); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, vec<2, B, P> const& _yz); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<4, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<3, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& that) + { + *this = that(); + } + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& scalar) + { + *this = vec(v(), scalar); + } + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec(T const& scalar, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) + { + *this = vec(scalar, v()); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q>& operator=(vec<3, T, Q> const& v) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<3, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<3, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<3, U, Q> const& v); + }; + + + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); + + + + +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec3.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_vec3.inl b/includes/glm/detail/type_vec3.inl new file mode 100644 index 0000000..fed82bf --- /dev/null +++ b/includes/glm/detail/type_vec3.inl @@ -0,0 +1,985 @@ +/// @ref core + +#include "compute_vector_relational.hpp" +#include "compute_vector_decl.hpp" + +namespace glm +{ + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0), z(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, Q> const& v) + : x(v.x), y(v.y), z(v.z) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, P> const& v) + : x(v.x), y(v.y), z(v.z) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T scalar) + : x(scalar), y(scalar), z(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T _x, T _y, T _z) + : x(_x), y(_y), z(_z) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + , z(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, Z _z) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, B _z) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(A _x, vec<2, B, P> const& _yz) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + {} + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) + { + GLM_ASSERT_LENGTH(i, this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) const + { + GLM_ASSERT_LENGTH(i, this->length()); + switch(i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + this->z = v.z; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + this->z = static_cast(v.z); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(U scalar) + { + return (*this = detail::compute_vec_add<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_add<3, T, Q, detail::is_aligned::value>::call(*this, vec<1, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_add<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(U scalar) + { + return (*this = detail::compute_vec_sub<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_sub<3, T, Q, detail::is_aligned::value>::call(*this, vec<1, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_sub<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(U scalar) + { + return (*this = detail::compute_vec_mul<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(static_cast(scalar)))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_mul<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_mul<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(U v) + { + return (*this = detail::compute_vec_div<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_div<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_div<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator++() + { + ++this->x; + ++this->y; + ++this->z; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator--() + { + --this->x; + --this->y; + --this->z; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator++(int) + { + vec<3, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator--(int) + { + vec<3, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(U scalar) + { + return (*this = detail::compute_vec_mod<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_mod<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_mod<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(U scalar) + { + return (*this = detail::compute_vec_and<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_and<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_and<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(U scalar) + { + return (*this = detail::compute_vec_or<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_or<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_or<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(U scalar) + { + return (*this = detail::compute_vec_xor<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_xor<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_xor<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(U scalar) + { + return (*this = detail::compute_vec_shift_left<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_left<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<1, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_left<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(U scalar) + { + return (*this = detail::compute_vec_shift_right<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_right<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<3, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_right<3, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + + } + + // -- Unary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v) + { + return vec<3, T, Q>(0) -= v; + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<3, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) += v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) -= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) -= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) -= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) -= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) *= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(v) *= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) *= v2; + + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) /= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) /= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) /= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) /= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) /= v2; + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) %= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) %= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) %= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) &= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) &= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) &= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) &= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) &= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) |= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) |= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) |= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) |= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) |= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) ^= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) ^= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) ^= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) ^= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) ^= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) <<= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) <<= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) << v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) << v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) <<= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar) + { + return vec<3, T, Q>(v) >>= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<3, T, Q>(v) >>= scalar.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar) >>= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) + { + return vec<3, T, Q>(scalar.x) >>= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return vec<3, T, Q>(v1) >>= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v) + { + return vec<3, T, Q>( + ~v.x, + ~v.y, + ~v.z); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return + detail::compute_equal::is_iec559>::call(v1.x, v2.x) && + detail::compute_equal::is_iec559>::call(v1.y, v2.y) && + detail::compute_equal::is_iec559>::call(v1.z, v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) + { + return !(v1 == v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) + { + return vec<3, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) + { + return vec<3, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z); + } +}//namespace glm + + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_vec_simd.inl" + +namespace glm { + +#if (GLM_ARCH & GLM_ARCH_NEON_BIT) && !GLM_CONFIG_XYZW_ONLY + CTORSL(3, CTOR_FLOAT); + CTORSL(3, CTOR_INT); + CTORSL(3, CTOR_UINT); + CTORSL(3, CTOR_VECF_INT3); + CTORSL(3, CTOR_VECF_UINT3); + CTORSL(3, CTOR_VECF_VECF); + CTORSL(3, CTOR_VECF_VECI); + CTORSL(3, CTOR_VECF_VECU); +#endif + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + CTORSL(3, CTOR_FLOAT_COPY3); + CTORSL(3, CTOR_DOUBLE_COPY3); + CTORSL(3, CTOR_FLOAT); + CTORSL(3, CTOR_FLOAT3); + CTORSL(3, CTOR_DOUBLE3); + CTORSL(3, CTOR_INT); + CTORSL(3, CTOR_INT3); + CTORSL(3, CTOR_VECF_INT3); + + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, double, aligned_highp>::vec(const vec<3, double, aligned_highp>& v) +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + :data(v.data) {} +#else + { + data.setv(0, v.data.getv(0)); + data.setv(1, v.data.getv(1)); + } +#endif + + + + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, float, aligned_highp>::vec(const vec<3, float, aligned_highp>& v) : + data(v.data) {} + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, float, aligned_highp>::vec(const vec<3, float, packed_highp>& v) + { + data = _mm_set_ps(v[2], v[2], v[1], v[0]); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, float, packed_highp>::vec(const vec<3, float, aligned_highp>& v) + { + _mm_store_sd(reinterpret_cast(this), _mm_castps_pd(v.data)); + __m128 mz = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(this)+2, mz); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, double, aligned_highp>::vec(const vec<3, double, packed_highp>& v) + { +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + data = _mm256_set_pd(v[2], v[2], v[1], v[0]); +#else + data.setv(0, _mm_loadu_pd(reinterpret_cast(&v))); + data.setv(1, _mm_loadu_pd(reinterpret_cast(&v)+2)); +#endif + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, double, packed_highp>::vec(const vec<3, double, aligned_highp>& v) + { +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + __m256d T1 = _mm256_permute_pd(v.data, 1); + _mm_store_sd((reinterpret_cast(this)) + 0, _mm256_castpd256_pd128(v.data)); + _mm_store_sd((reinterpret_cast(this)) + 1, _mm256_castpd256_pd128(T1)); + _mm_store_sd((reinterpret_cast(this)) + 2, _mm256_extractf128_pd(v.data, 1)); +#else + _mm_storeu_pd(reinterpret_cast(this), v.data.getv(0)); + _mm_store_sd((reinterpret_cast(this)) + 2, v.data.getv(1)); +#endif + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, int, aligned_highp>::vec(const vec<3, int, aligned_highp>& v) : + data(v.data) + { + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, int, aligned_highp>::vec(const vec<3, int, packed_highp>& v) + { + __m128 mx = _mm_load_ss(reinterpret_cast(&v[0])); + __m128 my = _mm_load_ss(reinterpret_cast(&v[1])); + __m128 mz = _mm_load_ss(reinterpret_cast(&v[2])); + __m128 mxy = _mm_unpacklo_ps(mx, my); + data = _mm_castps_si128(_mm_movelh_ps(mxy, mz)); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, int, packed_highp>::vec(const vec<3, int, aligned_highp>& v) + { + _mm_store_sd(reinterpret_cast(this), _mm_castsi128_pd(v.data)); + __m128 mz = _mm_shuffle_ps(_mm_castsi128_ps(v.data), _mm_castsi128_ps(v.data), _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(this)+2, mz); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, unsigned int, aligned_highp>::vec(const vec<3, unsigned int, aligned_highp>& v) : + data(v.data) + { + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, unsigned int, aligned_highp>::vec(const vec<3, unsigned int, packed_highp>& v) + { + __m128 mx = _mm_load_ss(reinterpret_cast(&v[0])); + __m128 my = _mm_load_ss(reinterpret_cast(&v[1])); + __m128 mz = _mm_load_ss(reinterpret_cast(&v[2])); + __m128 mxy = _mm_unpacklo_ps(mx, my); + data = _mm_castps_si128(_mm_movelh_ps(mxy, mz)); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, unsigned int, packed_highp>::vec(const vec<3, unsigned int, aligned_highp>& v) + { + _mm_store_sd(reinterpret_cast(this), _mm_castsi128_pd(v.data)); + __m128 mz = _mm_shuffle_ps(_mm_castsi128_ps(v.data), _mm_castsi128_ps(v.data), _MM_SHUFFLE(2, 2, 2, 2)); + _mm_store_ss(reinterpret_cast(this) + 2, mz); + } + + CTORSL(3, CTOR_DOUBLE); + //CTORSL(3, CTOR_INT64); + +#endif //GLM_ARCH & GLM_ARCH_SSE2_BITt + + +} + +#endif diff --git a/includes/glm/detail/type_vec4.hpp b/includes/glm/detail/type_vec4.hpp new file mode 100644 index 0000000..9ba1122 --- /dev/null +++ b/includes/glm/detail/type_vec4.hpp @@ -0,0 +1,514 @@ +/// @ref core +/// @file glm/detail/type_vec4.hpp + +#pragma once + +#include "qualifier.hpp" +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +# include "_swizzle.hpp" +#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION +# include "_swizzle_func.hpp" +#endif +#include + +namespace glm +{ + template + struct vec<4, T, Q> + { + // -- Implementation detail -- + + typedef T value_type; + typedef vec<4, T, Q> type; + typedef vec<4, bool, Q> bool_type; + + enum is_aligned + { + value = detail::is_aligned::value + }; + + // -- Data -- + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpedantic" +# elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" +# pragma clang diagnostic ignored "-Wnested-anon-types" +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +# endif +# endif + +# if GLM_CONFIG_XYZW_ONLY + T x, y, z, w; +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, Q, x, y, z, w) +# endif//GLM_CONFIG_SWIZZLE +# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE + union + { + struct { T x, y, z, w; }; + struct { T r, g, b, a; }; + struct { T s, t, p, q; }; + + typename detail::storage<4, T, detail::is_aligned::value>::type data; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + GLM_SWIZZLE4_2_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_2_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_2_MEMBERS(T, Q, s, t, p, q) + GLM_SWIZZLE4_3_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_3_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_3_MEMBERS(T, Q, s, t, p, q) + GLM_SWIZZLE4_4_MEMBERS(T, Q, x, y, z, w) + GLM_SWIZZLE4_4_MEMBERS(T, Q, r, g, b, a) + GLM_SWIZZLE4_4_MEMBERS(T, Q, s, t, p, q) +# endif + }; +# else + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION + GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) +# endif +# endif + +# if GLM_SILENT_WARNINGS == GLM_ENABLE +# if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_GCC +# pragma GCC diagnostic pop +# elif GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif +# endif + + // -- Component accesses -- + + typedef length_t length_type; + + /// Return the count of components of the vector + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} + + GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); + GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_DEFAULT_CTOR_DECL GLM_CONSTEXPR vec() GLM_DEFAULT_CTOR; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, Q> const& v) GLM_DEFAULT; + template + GLM_CTOR_DECL vec(vec<4, T, P> const& v); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL explicit vec(T scalar); + GLM_CTOR_DECL vec(T x, T y, T z, T w); + + // -- Conversion scalar constructors -- + + template + GLM_CTOR_DECL explicit vec(vec<1, U, P> const& v); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(X _x, Y _y, Z _z, W _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w); + template + GLM_CTOR_DECL vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + template + GLM_CTOR_DECL vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); + + // -- Conversion vector constructors -- + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, B _z, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, vec<2, B, P> const& _yz, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, B _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<3, A, P> const& _xyz, B _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(A _x, vec<3, B, P> const& _yzw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw); + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw); + + /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) + template + GLM_CTOR_DECL GLM_EXPLICIT vec(vec<4, U, P> const& v); + + // -- Swizzle constructors -- +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + GLM_FUNC_DISCARD_DECL vec(detail::_swizzle<4, T, Q, E0, E1, E2, E3> const& that) + { + *this = that(); + } + + template + GLM_FUNC_DISCARD_DECL vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, detail::_swizzle<2, T, Q, F0, F1, -1, -2> const& u) + { + *this = vec<4, T, Q>(v(), u()); + } + + template + GLM_FUNC_DISCARD_DECL vec(T const& x, T const& y, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) + { + *this = vec<4, T, Q>(x, y, v()); + } + + template + GLM_FUNC_DISCARD_DECL vec(T const& x, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& w) + { + *this = vec<4, T, Q>(x, v(), w); + } + + template + GLM_FUNC_DISCARD_DECL vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& z, T const& w) + { + *this = vec<4, T, Q>(v(), z, w); + } + + template + GLM_FUNC_DISCARD_DECL vec(detail::_swizzle<3, T, Q, E0, E1, E2, 3> const& v, T const& w) + { + *this = vec<4, T, Q>(v(), w); + } + + template + GLM_FUNC_DISCARD_DECL vec(T const& x, detail::_swizzle<3, T, Q, E0, E1, E2, 3> const& v) + { + *this = vec<4, T, Q>(x, v()); + } +# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, T, Q> const& v) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<4, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<4, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<4, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(U scalar); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<1, U, Q> const& v); + template + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<4, U, Q> const& v); + + // -- Increment and decrement operators -- + + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q> & operator++(); + GLM_FUNC_DISCARD_DECL GLM_CONSTEXPR vec<4, T, Q> & operator--(); + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator++(int); + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator--(int); + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<4, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(U scalar); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<1, U, Q> const& v); + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<4, U, Q> const& v); + }; + + + // -- Unary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v); + + // -- Binary operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); + + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); +}//namespace glm + +#ifndef GLM_EXTERNAL_TEMPLATE +#include "type_vec4.inl" +#endif//GLM_EXTERNAL_TEMPLATE diff --git a/includes/glm/detail/type_vec4.inl b/includes/glm/detail/type_vec4.inl new file mode 100644 index 0000000..66539e3 --- /dev/null +++ b/includes/glm/detail/type_vec4.inl @@ -0,0 +1,1130 @@ +/// @ref core + +#include "compute_vector_relational.hpp" +#include "compute_vector_decl.hpp" + +namespace glm{ +namespace detail +{ + +}//namespace detail + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_DEFAULT_CTOR == GLM_DISABLE + template + GLM_DEFAULTED_DEFAULT_CTOR_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec() +# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE + : x(0), y(0), z(0), w(0) +# endif + {} +# endif + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, Q> const& v) + : x(v.x), y(v.y), z(v.z), w(v.w) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, P> const& v) + : x(v.x), y(v.y), z(v.z), w(v.w) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T scalar) + : x(scalar), y(scalar), z(scalar), w(scalar) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T _x, T _y, T _z, T _w) + : x(_x), y(_y), z(_z), w(_w) + {} + + // -- Conversion scalar constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.x)) + , z(static_cast(v.x)) + , w(static_cast(v.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, Z _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + // -- Conversion vector constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, C _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_z.x)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, C _w) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) + : x(static_cast(_x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) + : x(static_cast(_x.x)) + , y(static_cast(_yz.x)) + , z(static_cast(_yz.y)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, B _y, vec<2, C, P> const& _zw) + : x(static_cast(_x)) + , y(static_cast(_y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw) + : x(static_cast(_x.x)) + , y(static_cast(_y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) + : x(static_cast(_x)) + , y(static_cast(_y.x)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) + : x(static_cast(_x.x)) + , y(static_cast(_y.x)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, B _w) + : x(static_cast(_xyz.x)) + , y(static_cast(_xyz.y)) + , z(static_cast(_xyz.z)) + , w(static_cast(_w)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w) + : x(static_cast(_xyz.x)) + , y(static_cast(_xyz.y)) + , z(static_cast(_xyz.z)) + , w(static_cast(_w.x)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<3, B, P> const& _yzw) + : x(static_cast(_x)) + , y(static_cast(_yzw.x)) + , z(static_cast(_yzw.y)) + , w(static_cast(_yzw.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw) + : x(static_cast(_x.x)) + , y(static_cast(_yzw.x)) + , z(static_cast(_yzw.y)) + , w(static_cast(_yzw.z)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw) + : x(static_cast(_xy.x)) + , y(static_cast(_xy.y)) + , z(static_cast(_zw.x)) + , w(static_cast(_zw.y)) + {} + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, U, P> const& v) + : x(static_cast(v.x)) + , y(static_cast(v.y)) + , z(static_cast(v.z)) + , w(static_cast(v.w)) + {} + + + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) + { + GLM_ASSERT_LENGTH(i, this->length()); + switch (i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + case 3: + return w; + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) const + { + GLM_ASSERT_LENGTH(i, this->length()); + switch (i) + { + default: + case 0: + return x; + case 1: + return y; + case 2: + return z; + case 3: + return w; + } + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, T, Q> const& v) + { + this->x = v.x; + this->y = v.y; + this->z = v.z; + this->w = v.w; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, U, Q> const& v) + { + this->x = static_cast(v.x); + this->y = static_cast(v.y); + this->z = static_cast(v.z); + this->w = static_cast(v.w); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(U scalar) + { + return (*this = detail::compute_vec_add<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_add<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_add<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(U scalar) + { + return (*this = detail::compute_vec_sub<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_sub<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_sub<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(U scalar) + { + return (*this = detail::compute_vec_mul<4,T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_mul<4,T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_mul<4,T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(U scalar) + { + return (*this = detail::compute_vec_div<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_div<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v.x))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_div<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + // -- Increment and decrement operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator++() + { + ++this->x; + ++this->y; + ++this->z; + ++this->w; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator--() + { + --this->x; + --this->y; + --this->z; + --this->w; + return *this; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator++(int) + { + vec<4, T, Q> Result(*this); + ++*this; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator--(int) + { + vec<4, T, Q> Result(*this); + --*this; + return Result; + } + + // -- Unary bit operators -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(U scalar) + { + return (*this = detail::compute_vec_mod<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_mod<3, T, Q, detail::is_aligned::value>::call(*this, vec<3, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_mod<4, T, Q, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(U scalar) + { + return (*this = detail::compute_vec_and<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_and<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_and<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(U scalar) + { + return (*this = detail::compute_vec_or<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_or<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_or<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(U scalar) + { + return (*this = detail::compute_vec_xor<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_xor<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_xor<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(U scalar) + { + return (*this = detail::compute_vec_shift_left<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_left<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_left<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(U scalar) + { + return (*this = detail::compute_vec_shift_right<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<1, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_right<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<4, U, Q> const& v) + { + return (*this = detail::compute_vec_shift_right<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); + } + + // -- Unary constant operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v) + { + return vec<4, T, Q>(0) -= v; + } + + // -- Binary arithmetic operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(v) += scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v2) += v1; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) += v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) -= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) -= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) -= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) *= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(v) *= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v2) *= v1; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) *= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) /= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) /= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) /= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) /= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) /= v2; + } + + // -- Binary bit operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) %= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) %= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar.x) %= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) %= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) &= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar) + { + return vec<4, T, Q>(v) &= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) &= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) &= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) &= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) |= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) |= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) |= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) |= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) |= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) ^= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) ^= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) ^= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) ^= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) ^= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) <<= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) <<= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) <<= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) <<= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) <<= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar) + { + return vec<4, T, Q>(v) >>= scalar; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) + { + return vec<4, T, Q>(v1) >>= v2.x; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(scalar) >>= v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1.x) >>= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return vec<4, T, Q>(v1) >>= v2; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v) + { + return detail::compute_vec_bitwise_not<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(v); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return detail::compute_vec_equal<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) + { + return detail::compute_vec_nequal<4, T, Q, detail::is_int::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) + { + return vec<4, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z, v1.w && v2.w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) + { + return vec<4, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z, v1.w || v2.w); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "type_vec_simd.inl" + +namespace glm { +#if GLM_ARCH & GLM_ARCH_NEON_BIT && !GLM_CONFIG_XYZW_ONLY + CTORSL(4, CTOR_FLOAT); + CTORSL(4, CTOR_INT); + CTORSL(4, CTOR_UINT); + CTORSL(4, CTOR_VECF_INT4); + CTORSL(4, CTOR_VECF_UINT4); + CTORSL(4, CTOR_VECF_VECF); + CTORSL(4, CTOR_VECF_VECI); + CTORSL(4, CTOR_VECF_VECU); + + +#endif// GLM_ARCH & GLM_ARCH_NEON_BIT + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + CTORSL(4, CTOR_FLOAT); + CTORSL(4, CTOR_DOUBLE); + CTORSL(4, CTOR_FLOAT4); + CTORSL(4, CTOR_DOUBLE4); + CTORSL(4, CTOR_INT); + CTORSL(4, CTOR_INT4); + CTORSL(4, CTOR_VECF_INT4); + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, aligned_highp>& v): + data(v.data) + { + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, packed_highp>& v) + { + data = _mm_loadu_ps(reinterpret_cast(&v)); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, packed_highp>::vec(const vec<4, float, aligned_highp>& v) + { + _mm_storeu_ps(reinterpret_cast(this), v.data); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(const vec<4, int, aligned_highp>& v) : + data(v.data) + { + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(const vec<4, int, packed_highp>& v) + { + data = _mm_loadu_si128(reinterpret_cast(&v)); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, packed_highp>::vec(const vec<4, int, aligned_highp>& v) + { + _mm_storeu_si128(reinterpret_cast<__m128i*>(this), v.data); + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(const vec<4, double, aligned_highp>& v) + { +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + data = v.data; +#else + data.setv(0, v.data.getv(0)); + data.setv(1, v.data.getv(1)); +#endif + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(const vec<4, double, packed_highp>& v) + { +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + data = _mm256_loadu_pd(reinterpret_cast(&v)); +#else + data.setv(0, _mm_loadu_pd(reinterpret_cast(&v))); + data.setv(1, _mm_loadu_pd(reinterpret_cast(&v)+2)); +#endif + } + + template<> + template<> + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, packed_highp>::vec(const vec<4, double, aligned_highp>& v) + { +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + _mm256_storeu_pd(reinterpret_cast(this), v.data); +#else + _mm_storeu_pd(reinterpret_cast(this), v.data.getv(0)); + _mm_storeu_pd(reinterpret_cast(this) + 2, v.data.getv(1)); +#endif + } + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT +} + +#endif diff --git a/includes/glm/detail/type_vec_simd.inl b/includes/glm/detail/type_vec_simd.inl new file mode 100644 index 0000000..566abec --- /dev/null +++ b/includes/glm/detail/type_vec_simd.inl @@ -0,0 +1,1032 @@ +#pragma once + +#define CTORSL(L, CTOR)\ +CTOR(L, aligned_lowp)\ +CTOR(L, aligned_mediump)\ +CTOR(L, aligned_highp)\ + +namespace glm { + namespace detail + { + +template +struct compute_vec_and : public compute_vec_and +{}; + +template +struct compute_vec_or: public compute_vec_or +{}; + +template +struct compute_vec_xor : public compute_vec_xor +{}; + +template +struct compute_vec_shift_left : public compute_vec_shift_left +{}; + +template +struct compute_vec_shift_right : public compute_vec_shift_right +{}; + +template +struct compute_vec_bitwise_not:public compute_vec_bitwise_not +{}; + +template +struct compute_vec_equal : public compute_vec_equal +{}; + +template +struct compute_vec_nequal : public compute_vec_nequal +{}; + +template +struct compute_vec_mod : public compute_vec_mod +{}; + + +template +struct compute_vec_add : public compute_vec_add +{}; + +template< length_t L, typename T, qualifier Q> +struct compute_vec_sub : public compute_vec_sub +{}; + +template< length_t L, typename T, qualifier Q> +struct compute_vec_mul : public compute_vec_mul +{}; + +template< length_t L, typename T, qualifier Q> +struct compute_vec_div : public compute_vec_div +{}; + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec operator ()() const + { + __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer); + + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX_BIT + Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0)); +# else + Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0)); +# endif + return Result; + } + }; + + template + struct _swizzle_base1<2, float, Q, E0, E1, E2, E3, true> : public _swizzle_base1<2, float, Q, E0, E1, E2, E3, false> {}; + + template + struct _swizzle_base1<2, int, Q, E0, E1, E2, E3, true> : public _swizzle_base1<2, int, Q, E0, E1, E2, E3, false> {}; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec operator ()() const + { + __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); + + vec Result; + Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); + return Result; + } + }; + + template + struct _swizzle_base1 : public _swizzle_base0 + { + GLM_FUNC_QUALIFIER vec operator ()() const + { + __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); + + vec Result; + Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); + return Result; + } + }; +# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_add_ps(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_add_epi32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +# if (GLM_ARCH & GLM_ARCH_AVX_BIT) + Result.data = _mm256_add_pd(a.data, b.data); +#else + Result.data.setv(0, _mm_add_pd(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_add_pd(a.data.getv(1), b.data.getv(1))); +#endif + return Result; + } + }; + + template + struct compute_vec_sub + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_sub_ps(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_sub + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_sub_epi32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_sub + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + Result.data = _mm256_sub_pd(a.data, b.data); +#else + Result.data.setv(0, _mm_sub_pd(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_sub_pd(a.data.getv(1), b.data.getv(1))); +#endif + return Result; + } + }; + + template + struct compute_vec_mul + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_mul_ps(a.data, b.data); + return Result; + } + }; + + + template + struct compute_vec_mul + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +#if (GLM_ARCH & GLM_ARCH_AVX_BIT) + Result.data = _mm256_mul_pd(a.data, b.data); +#else + Result.data.setv(0, _mm_mul_pd(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_mul_pd(a.data.getv(1), b.data.getv(1))); +#endif + return Result; + } + }; + + template + struct compute_vec_mul + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + glm_i32vec4 ia = a.data; + glm_i32vec4 ib = b.data; +#ifdef __SSE4_1__ // modern CPU - use SSE 4.1 + Result.data = _mm_mullo_epi32(ia, ib); +#else // old CPU - use SSE 2 + __m128i tmp1 = _mm_mul_epu32(ia, ib); /* mul 2,0*/ + __m128i tmp2 = _mm_mul_epu32(_mm_srli_si128(ia, 4), _mm_srli_si128(ib, 4)); /* mul 3,1 */ + Result.data = _mm_unpacklo_epi32(_mm_shuffle_epi32(tmp1, _MM_SHUFFLE(0, 0, 2, 0)), _mm_shuffle_epi32(tmp2, _MM_SHUFFLE(0, 0, 2, 0))); /* shuffle results to [63..0] and pack */ +#endif + return Result; + } + }; + + template + struct compute_vec_div + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_div_ps(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_div + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { +#if defined(_MSC_VER) && _MSC_VER >= 1920 //_mm_div_epi32 only defined with VS >= 2019 + vec Result; + Result.data = _mm_div_epi32(a.data, b.data); + return Result; +#else + return compute_vec_div::call(a, b); +#endif + } + }; + + + // note: div on uninitialized w can generate div by 0 exception + template + struct compute_vec_div<3, int, Q, true> + { + + GLM_FUNC_QUALIFIER static vec<3, int, Q> call(vec<3, int, Q> const& a, vec<3, int, Q> const& b) + { +#if defined(_MSC_VER) && _MSC_VER >= 1920 //_mm_div_epi32 only defined with VS >= 2019 + vec<3, int, Q> Result; + glm_i32vec4 bv = b.data; + bv = _mm_shuffle_epi32(bv, _MM_SHUFFLE(0, 2, 1, 0)); + Result.data = _mm_div_epi32(a.data, bv); + return Result; +#else + return compute_vec_div<3, int, Q, false>::call(a, b); +#endif + } + }; + + + template + struct compute_vec_div + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX_BIT + Result.data = _mm256_div_pd(a.data, b.data); +# else + Result.data.setv(0, _mm_div_pd(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_div_pd(a.data.getv(1), b.data.getv(1))); +# endif + return Result; + } + }; + + template + struct compute_vec_div + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data)); + return Result; + } + }; + + template + struct compute_vec_and + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_and_si128(a.data, b.data); + return Result; + } + }; + + + template + struct compute_vec_and + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + Result.data = _mm256_and_si256(a.data, b.data); +# elif GLM_ARCH & GLM_ARCH_AVX_BIT + Result.data = _mm256_and_pd(_mm256_castpd256_pd128(a.data), _mm256_castpd256_pd128(b.data)); +# else + Result.data.setv(0, _mm_and_si128(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_and_si128(a.data.getv(1), b.data.getv(1))); +# endif + return Result; + } + }; + + + + template + struct compute_vec_or + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_or_si128(a.data, b.data); + return Result; + } + }; + + + template + struct compute_vec_or + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + Result.data = _mm256_or_si256(a.data, b.data); +# else + Result.data.setv(0, _mm_or_si128(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_or_si128(a.data.getv(1), b.data.getv(1))); +# endif + return Result; + } + }; + + template + struct compute_vec_xor + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = _mm_xor_si128(a.data, b.data); + return Result; + } + }; + + + template + struct compute_vec_xor + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + Result.data = _mm256_xor_si256(a.data, b.data); +# else + Result.data.setv(0, _mm_xor_si128(a.data.getv(0), b.data.getv(0))); + Result.data.setv(1, _mm_xor_si128(a.data.getv(1), b.data.getv(1))); +# endif + return Result; + } + }; + + + //template + //struct compute_vec_shift_left<3, T, Q, -1, 32, true> + //{ + // GLM_FUNC_QUALIFIER static vec<3, T, Q> call(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + // { + // vec<3, T, Q> Result; + // __m128 v2 = _mm_castsi128_ps(b.data); + // v2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(0, 0, 0, 0)); // note: shift is done with xmm[3] that correspond vec w that doesn't exist on vect3 + // _mm_set1_epi64x(_w, _z, _y, _x); + // __m128i vr = _mm_sll_epi32(a.data, _mm_castps_si128(v2)); + // Result.data = vr; + // return Result; + // } + //}; + + //template + //struct compute_vec_shift_left + //{ + // GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + // { + // vec Result; + // Result.data = _mm_sll_epi32(a.data, b.data); + // return Result; + // } + //}; + + +// template +// struct compute_vec_shift_left +// { +// GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) +// { +// vec Result; +//# if GLM_ARCH & GLM_ARCH_AVX2_BIT +// Result.data = _mm256_sll_epi64(a.data, b.data); +//# else +// Result.data.setv(0, _mm_sll_epi64(a.data.getv(0), b.data.getv(0))); +// Result.data.setv(1, _mm_sll_epi64(a.data.getv(1), b.data.getv(1))); +//# endif +// return Result; +// } +// }; + + +// template +// struct compute_vec_shift_right +// { +// GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) +// { +// vec Result; +// Result.data = _mm_srl_epi32(a.data, b.data); +// return Result; +// } +// }; +// +// template +// struct compute_vec_shift_right +// { +// GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) +// { +// vec Result; +//# if GLM_ARCH & GLM_ARCH_AVX2_BIT +// Result.data = _mm256_srl_epi64(a.data, b.data); +//# else +// Result.data.setv(0, _mm_srl_epi64(a.data.getv(0), b.data.getv(0))); +// Result.data.setv(1, _mm_srl_epi64(a.data.getv(1), b.data.getv(1))); +//# endif +// return Result; +// } +// }; + + template + struct compute_vec_bitwise_not + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + vec Result; + Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1)); + return Result; + } + }; + + + template + struct compute_vec_bitwise_not + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + vec Result; +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + Result.data = _mm256_xor_si256(v.data, _mm256_set1_epi32(-1)); +# else + Result.data.setv(0, _mm_xor_si128(v.data.getv(0), _mm_set1_epi32(-1))); + Result.data.setv(1, _mm_xor_si128(v.data.getv(1), _mm_set1_epi32(-1))); +# endif + return Result; + } + }; + + + template + struct compute_vec_equal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) == 0; + } + }; + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + template + struct compute_vec_equal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0; + __m128i neq = _mm_xor_si128(v1.data, v2.data); + return _mm_test_all_zeros(neq, neq) == 0; + } + }; +# endif + + + + template + struct compute_vec_nequal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0; + } + }; + +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + template + struct compute_vec_nequal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0; + __m128i neq = _mm_xor_si128(v1.data, v2.data); + int v = _mm_test_all_zeros(neq, neq); + return v != 1; + } + }; + template + struct compute_vec_nequal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0; + __m128i neq = _mm_xor_si128(v1.data, v2.data); + return _mm_test_all_zeros(neq, neq) != 1; + } + }; +# else + + + template + struct compute_vec_nequal + { + GLM_FUNC_QUALIFIER static bool call(vec const& v1, vec const& v2) + { + return compute_vec_nequal::call(v1, v2); + } + }; + +# endif + + + + +}//namespace detail + + +#define CTOR_FLOAT(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(float _s) :\ + data(_mm_set1_ps(_s))\ + {} + +#if GLM_ARCH & GLM_ARCH_AVX_BIT +# define CTOR_DOUBLE(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _s) :\ + data(_mm256_set1_pd(_s)){} + +#define CTOR_DOUBLE4(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _x, double _y, double _z, double _w):\ + data(_mm256_set_pd(_w, _z, _y, _x)) {} + +#define CTOR_DOUBLE3(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _x, double _y, double _z):\ + data(_mm256_set_pd(_z, _z, _y, _x)) {} + +# define CTOR_INT64(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(detail::int64 _s) :\ + data(_mm256_set1_epi64x(_s)){} + +#define CTOR_DOUBLE_COPY3(L, Q)\ + template<>\ + template\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, double, Q>::vec(vec<3, double, P> const& v) :\ + data(_mm256_setr_pd(v.x, v.y, v.z, v.z)){} + +#else +# define CTOR_DOUBLE(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _v) \ + {\ + data.setv(0, _mm_set1_pd(_v)); \ + data.setv(1, _mm_set1_pd(_v)); \ + } + +#define CTOR_DOUBLE4(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _x, double _y, double _z, double _w)\ + {\ + data.setv(0, _mm_setr_pd(_x, _y)); \ + data.setv(1, _mm_setr_pd(_z, _w)); \ + } + +#define CTOR_DOUBLE3(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(double _x, double _y, double _z)\ + {\ + data.setv(0, _mm_setr_pd(_x, _y)); \ + data.setv(1, _mm_setr_pd(_z, _z)); \ + } + +# define CTOR_INT64(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(detail::int64 _s) :\ + data(_mm256_set1_epi64x(_s)){} + +#define CTOR_DOUBLE_COPY3(L, Q)\ + template<>\ + template\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, double, Q>::vec(vec<3, double, P> const& v)\ + {\ + data.setv(0, _mm_setr_pd(v.x, v.y));\ + data.setv(1, _mm_setr_pd(v.z, 1.0));\ + } + +#endif //GLM_ARCH & GLM_ARCH_AVX_BIT + +#define CTOR_INT(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _s) :\ + data(_mm_set1_epi32(_s))\ + {} + +#define CTOR_FLOAT4(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(float _x, float _y, float _z, float _w) :\ + data(_mm_set_ps(_w, _z, _y, _x))\ + {} + +#define CTOR_FLOAT3(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(float _x, float _y, float _z) :\ + data(_mm_set_ps(_z, _z, _y, _x)){} + + +#define CTOR_INT4(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z, int _w) :\ + data(_mm_set_epi32(_w, _z, _y, _x)){} + +#define CTOR_INT3(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z) :\ + data(_mm_set_epi32(_z, _z, _y, _x)){} + +#define CTOR_VECF_INT4(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z, int _w) :\ + data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x)))\ + {} + +#define CTOR_VECF_INT3(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z) :\ + data(_mm_cvtepi32_ps(_mm_set_epi32(_z, _z, _y, _x)))\ + {} + +#define CTOR_DEFAULT(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec() :\ + data(_mm_setzero_ps())\ + {} + +#define CTOR_FLOAT_COPY3(L, Q)\ + template<>\ + template\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, float, Q>::vec(vec<3, float, P> const& v)\ + :data(_mm_set_ps(v.z, v.z, v.y, v.x))\ + {} + + + +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + +#if GLM_ARCH & GLM_ARCH_NEON_BIT + +#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR +// the functions below needs to be properly implemented, use unoptimized function fro now. + +template +struct _swizzle_base1 : public _swizzle_base1{}; + +template +struct _swizzle_base1 : public _swizzle_base1 {}; + +template +struct _swizzle_base1 : public _swizzle_base1 {}; + +# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR + + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER static + vec + call(vec const& a, vec const& b) + { + vec Result; + Result.data = vaddq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_add + { + GLM_FUNC_QUALIFIER static + vec + call(vec const& a, vec const& b) + { + vec Result; + Result.data = vaddq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_add + { + static + vec + call(vec const& a, vec const& b) + { + vec Result; + Result.data = vaddq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_sub + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vsubq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_sub + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vsubq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_sub + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vsubq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_mul + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vmulq_f32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_mul + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vmulq_u32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_mul + { + static vec call(vec const& a, vec const& b) + { + vec Result; + Result.data = vmulq_s32(a.data, b.data); + return Result; + } + }; + + template + struct compute_vec_div + { + static vec call(vec const& a, vec const& b) + { + vec Result; +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + Result.data = vdivq_f32(a.data, b.data); +#else + /* Arm assembler reference: + * + * The Newton-Raphson iteration: x[n+1] = x[n] * (2 - d * x[n]) + * converges to (1/d) if x0 is the result of VRECPE applied to d. + * + * Note: The precision usually improves with two interactions, but more than two iterations are not helpful. */ + float32x4_t x = vrecpeq_f32(b.data); + x = vmulq_f32(vrecpsq_f32(b.data, x), x); + x = vmulq_f32(vrecpsq_f32(b.data, x), x); + Result.data = vmulq_f32(a.data, x); +#endif + return Result; + } + }; + + template + struct compute_vec_equal + { + static bool call(vec const& v1, vec const& v2) + { + uint32x4_t cmp = vceqq_f32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec_equal + { + static bool call(vec const& v1, vec const& v2) + { + uint32x4_t cmp = vceqq_u32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec_equal + { + static bool call(vec const& v1, vec const& v2) + { + uint32x4_t cmp = vceqq_s32(v1.data, v2.data); +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + cmp = vpminq_u32(cmp, cmp); + cmp = vpminq_u32(cmp, cmp); + uint32_t r = cmp[0]; +#else + uint32x2_t cmpx2 = vpmin_u32(vget_low_u32(cmp), vget_high_u32(cmp)); + cmpx2 = vpmin_u32(cmpx2, cmpx2); + uint32_t r = cmpx2[0]; +#endif + return r == ~0u; + } + }; + + template + struct compute_vec_nequal + { + static bool call(vec const& v1, vec const& v2) + { + return !compute_vec_equal::call(v1, v2); + } + }; + + template + struct compute_vec_nequal + { + static bool call(vec const& v1, vec const& v2) + { + return !compute_vec_equal::call(v1, v2); + } + }; + + template + struct compute_vec_nequal + { + static bool call(vec const& v1, vec const& v2) + { + return !compute_vec_equal::call(v1, v2); + } + }; + + +#if !GLM_CONFIG_XYZW_ONLY + +#define CTOR_FLOAT(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(float _s) :\ + data(vdupq_n_f32(_s))\ + {} + +#define CTOR_INT(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _s) :\ + data(vdupq_n_s32(_s))\ + {} + +#define CTOR_UINT(L, Q)\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(uint _s) :\ + data(vdupq_n_u32(_s))\ + {} + +#define CTOR_VECF_INT4(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z, int _w) :\ + data(vcvtq_f32_s32(vec(_x, _y, _z, _w).data))\ + {} + +#define CTOR_VECF_UINT4(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(uint _x, uint _y, uint _z, uint _w) :\ + data(vcvtq_f32_u32(vec(_x, _y, _z, _w).data))\ + {} + +#define CTOR_VECF_INT3(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(int _x, int _y, int _z) :\ + data(vcvtq_f32_s32(vec(_x, _y, _z).data))\ + {} + +#define CTOR_VECF_UINT4(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(uint _x, uint _y, uint _z, uint _w) :\ + data(vcvtq_f32_u32(vec(_x, _y, _z, _w).data))\ + {} + +#define CTOR_VECF_UINT3(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(uint _x, uint _y, uint _z) :\ + data(vcvtq_f32_u32(vec(_x, _y, _z).data))\ + {} + + +#define CTOR_VECF_VECF(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(const vec& rhs) :\ + data(rhs.data)\ + {} + +#define CTOR_VECF_VECI(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(const vec& rhs) :\ + data(vcvtq_f32_s32(rhs.data))\ + {} + +#define CTOR_VECF_VECU(L, Q)\ + template<>\ + template<>\ + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec::vec(const vec& rhs) :\ + data(vcvtq_f32_u32(rhs.data))\ + {} + + +#endif + + +}//namespace detail + +}//namespace glm + +#endif diff --git a/includes/glm/exponential.hpp b/includes/glm/exponential.hpp new file mode 100644 index 0000000..1614f76 --- /dev/null +++ b/includes/glm/exponential.hpp @@ -0,0 +1,110 @@ +/// @ref core +/// @file glm/exponential.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions +/// +/// @defgroup core_func_exponential Exponential functions +/// @ingroup core +/// +/// Provides GLSL exponential functions +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/type_vec1.hpp" +#include "detail/type_vec2.hpp" +#include "detail/type_vec3.hpp" +#include "detail/type_vec4.hpp" +#include + +namespace glm +{ + /// @addtogroup core_func_exponential + /// @{ + + /// Returns 'base' raised to the power 'exponent'. + /// + /// @param base Floating point value. pow function is defined for input values of 'base' defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @param exponent Floating point value representing the 'exponent'. + /// + /// @see GLSL pow man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec pow(vec const& base, vec const& exponent); + + /// Returns the natural exponentiation of v, i.e., e^v. + /// + /// @param v exp function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL exp man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec exp(vec const& v); + + /// Returns the natural logarithm of v, i.e., + /// returns the value y which satisfies the equation x = e^y. + /// Results are undefined if v <= 0. + /// + /// @param v log function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL log man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec log(vec const& v); + + /// Returns 2 raised to the v power. + /// + /// @param v exp2 function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL exp2 man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec exp2(vec const& v); + + /// Returns the base 2 log of x, i.e., returns the value y, + /// which satisfies the equation x = 2 ^ y. + /// + /// @param v log2 function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL log2 man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec log2(vec const& v); + + /// Returns the positive square root of v. + /// + /// @param v sqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL sqrt man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec sqrt(vec const& v); + + /// Returns the reciprocal of the positive square root of v. + /// + /// @param v inversesqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL inversesqrt man page + /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions + template + GLM_FUNC_DECL vec inversesqrt(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_exponential.inl" diff --git a/includes/glm/ext.hpp b/includes/glm/ext.hpp new file mode 100644 index 0000000..f9ac369 --- /dev/null +++ b/includes/glm/ext.hpp @@ -0,0 +1,267 @@ +/// @file glm/ext.hpp +/// +/// @ref core (Dependence) + +#include "detail/setup.hpp" + +#pragma once + +#include "glm.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED) +# define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED +# pragma message("GLM: All extensions included (not recommended)") +#endif//GLM_MESSAGES + +#include "./ext/matrix_clip_space.hpp" +#include "./ext/matrix_common.hpp" + +#include "./ext/matrix_double2x2.hpp" +#include "./ext/matrix_double2x2_precision.hpp" +#include "./ext/matrix_double2x3.hpp" +#include "./ext/matrix_double2x3_precision.hpp" +#include "./ext/matrix_double2x4.hpp" +#include "./ext/matrix_double2x4_precision.hpp" +#include "./ext/matrix_double3x2.hpp" +#include "./ext/matrix_double3x2_precision.hpp" +#include "./ext/matrix_double3x3.hpp" +#include "./ext/matrix_double3x3_precision.hpp" +#include "./ext/matrix_double3x4.hpp" +#include "./ext/matrix_double3x4_precision.hpp" +#include "./ext/matrix_double4x2.hpp" +#include "./ext/matrix_double4x2_precision.hpp" +#include "./ext/matrix_double4x3.hpp" +#include "./ext/matrix_double4x3_precision.hpp" +#include "./ext/matrix_double4x4.hpp" +#include "./ext/matrix_double4x4_precision.hpp" + +#include "./ext/matrix_float2x2.hpp" +#include "./ext/matrix_float2x2_precision.hpp" +#include "./ext/matrix_float2x3.hpp" +#include "./ext/matrix_float2x3_precision.hpp" +#include "./ext/matrix_float2x4.hpp" +#include "./ext/matrix_float2x4_precision.hpp" +#include "./ext/matrix_float3x2.hpp" +#include "./ext/matrix_float3x2_precision.hpp" +#include "./ext/matrix_float3x3.hpp" +#include "./ext/matrix_float3x3_precision.hpp" +#include "./ext/matrix_float3x4.hpp" +#include "./ext/matrix_float3x4_precision.hpp" +#include "./ext/matrix_float4x2.hpp" +#include "./ext/matrix_float4x2_precision.hpp" +#include "./ext/matrix_float4x3.hpp" +#include "./ext/matrix_float4x3_precision.hpp" +#include "./ext/matrix_float4x4.hpp" +#include "./ext/matrix_float4x4_precision.hpp" + +#include "./ext/matrix_int2x2.hpp" +#include "./ext/matrix_int2x2_sized.hpp" +#include "./ext/matrix_int2x3.hpp" +#include "./ext/matrix_int2x3_sized.hpp" +#include "./ext/matrix_int2x4.hpp" +#include "./ext/matrix_int2x4_sized.hpp" +#include "./ext/matrix_int3x2.hpp" +#include "./ext/matrix_int3x2_sized.hpp" +#include "./ext/matrix_int3x3.hpp" +#include "./ext/matrix_int3x3_sized.hpp" +#include "./ext/matrix_int3x4.hpp" +#include "./ext/matrix_int3x4_sized.hpp" +#include "./ext/matrix_int4x2.hpp" +#include "./ext/matrix_int4x2_sized.hpp" +#include "./ext/matrix_int4x3.hpp" +#include "./ext/matrix_int4x3_sized.hpp" +#include "./ext/matrix_int4x4.hpp" +#include "./ext/matrix_int4x4_sized.hpp" + +#include "./ext/matrix_uint2x2.hpp" +#include "./ext/matrix_uint2x2_sized.hpp" +#include "./ext/matrix_uint2x3.hpp" +#include "./ext/matrix_uint2x3_sized.hpp" +#include "./ext/matrix_uint2x4.hpp" +#include "./ext/matrix_uint2x4_sized.hpp" +#include "./ext/matrix_uint3x2.hpp" +#include "./ext/matrix_uint3x2_sized.hpp" +#include "./ext/matrix_uint3x3.hpp" +#include "./ext/matrix_uint3x3_sized.hpp" +#include "./ext/matrix_uint3x4.hpp" +#include "./ext/matrix_uint3x4_sized.hpp" +#include "./ext/matrix_uint4x2.hpp" +#include "./ext/matrix_uint4x2_sized.hpp" +#include "./ext/matrix_uint4x3.hpp" +#include "./ext/matrix_uint4x3_sized.hpp" +#include "./ext/matrix_uint4x4.hpp" +#include "./ext/matrix_uint4x4_sized.hpp" + +#include "./ext/matrix_projection.hpp" +#include "./ext/matrix_relational.hpp" +#include "./ext/matrix_transform.hpp" + +#include "./ext/quaternion_common.hpp" +#include "./ext/quaternion_double.hpp" +#include "./ext/quaternion_double_precision.hpp" +#include "./ext/quaternion_float.hpp" +#include "./ext/quaternion_float_precision.hpp" +#include "./ext/quaternion_exponential.hpp" +#include "./ext/quaternion_geometric.hpp" +#include "./ext/quaternion_relational.hpp" +#include "./ext/quaternion_transform.hpp" +#include "./ext/quaternion_trigonometric.hpp" + +#include "./ext/scalar_common.hpp" +#include "./ext/scalar_constants.hpp" +#include "./ext/scalar_integer.hpp" +#include "./ext/scalar_packing.hpp" +#include "./ext/scalar_reciprocal.hpp" +#include "./ext/scalar_relational.hpp" +#include "./ext/scalar_ulp.hpp" + +#include "./ext/scalar_int_sized.hpp" +#include "./ext/scalar_uint_sized.hpp" + +#include "./ext/vector_common.hpp" +#include "./ext/vector_integer.hpp" +#include "./ext/vector_packing.hpp" +#include "./ext/vector_reciprocal.hpp" +#include "./ext/vector_relational.hpp" +#include "./ext/vector_ulp.hpp" + +#include "./ext/vector_bool1.hpp" +#include "./ext/vector_bool1_precision.hpp" +#include "./ext/vector_bool2.hpp" +#include "./ext/vector_bool2_precision.hpp" +#include "./ext/vector_bool3.hpp" +#include "./ext/vector_bool3_precision.hpp" +#include "./ext/vector_bool4.hpp" +#include "./ext/vector_bool4_precision.hpp" + +#include "./ext/vector_double1.hpp" +#include "./ext/vector_double1_precision.hpp" +#include "./ext/vector_double2.hpp" +#include "./ext/vector_double2_precision.hpp" +#include "./ext/vector_double3.hpp" +#include "./ext/vector_double3_precision.hpp" +#include "./ext/vector_double4.hpp" +#include "./ext/vector_double4_precision.hpp" + +#include "./ext/vector_float1.hpp" +#include "./ext/vector_float1_precision.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float2_precision.hpp" +#include "./ext/vector_float3.hpp" +#include "./ext/vector_float3_precision.hpp" +#include "./ext/vector_float4.hpp" +#include "./ext/vector_float4_precision.hpp" + +#include "./ext/vector_int1.hpp" +#include "./ext/vector_int1_sized.hpp" +#include "./ext/vector_int2.hpp" +#include "./ext/vector_int2_sized.hpp" +#include "./ext/vector_int3.hpp" +#include "./ext/vector_int3_sized.hpp" +#include "./ext/vector_int4.hpp" +#include "./ext/vector_int4_sized.hpp" + +#include "./ext/vector_uint1.hpp" +#include "./ext/vector_uint1_sized.hpp" +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_uint2_sized.hpp" +#include "./ext/vector_uint3.hpp" +#include "./ext/vector_uint3_sized.hpp" +#include "./ext/vector_uint4.hpp" +#include "./ext/vector_uint4_sized.hpp" + +#include "./gtc/bitfield.hpp" +#include "./gtc/color_space.hpp" +#include "./gtc/constants.hpp" +#include "./gtc/epsilon.hpp" +#include "./gtc/integer.hpp" +#include "./gtc/matrix_access.hpp" +#include "./gtc/matrix_integer.hpp" +#include "./gtc/matrix_inverse.hpp" +#include "./gtc/matrix_transform.hpp" +#include "./gtc/noise.hpp" +#include "./gtc/packing.hpp" +#include "./gtc/quaternion.hpp" +#include "./gtc/random.hpp" +#include "./gtc/reciprocal.hpp" +#include "./gtc/round.hpp" +#include "./gtc/type_precision.hpp" +#include "./gtc/type_ptr.hpp" +#include "./gtc/ulp.hpp" +#include "./gtc/vec1.hpp" +#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE +# include "./gtc/type_aligned.hpp" +#endif + +#ifdef GLM_ENABLE_EXPERIMENTAL +#include "./gtx/associated_min_max.hpp" +#include "./gtx/bit.hpp" +#include "./gtx/closest_point.hpp" +#include "./gtx/color_encoding.hpp" +#include "./gtx/color_space.hpp" +#include "./gtx/color_space_YCoCg.hpp" +#include "./gtx/common.hpp" +#include "./gtx/compatibility.hpp" +#include "./gtx/component_wise.hpp" +#include "./gtx/dual_quaternion.hpp" +#include "./gtx/easing.hpp" +#include "./gtx/euler_angles.hpp" +#include "./gtx/extend.hpp" +#include "./gtx/extended_min_max.hpp" +#include "./gtx/fast_exponential.hpp" +#include "./gtx/fast_square_root.hpp" +#include "./gtx/fast_trigonometry.hpp" +#include "./gtx/functions.hpp" +#include "./gtx/gradient_paint.hpp" +#include "./gtx/handed_coordinate_space.hpp" + +#if __cplusplus >= 201103L +#include "./gtx/hash.hpp" +#endif + +#include "./gtx/integer.hpp" +#include "./gtx/intersect.hpp" +#include "./gtx/io.hpp" +#include "./gtx/log_base.hpp" +#include "./gtx/matrix_cross_product.hpp" +#include "./gtx/matrix_decompose.hpp" +#include "./gtx/matrix_factorisation.hpp" +#include "./gtx/matrix_interpolation.hpp" +#include "./gtx/matrix_major_storage.hpp" +#include "./gtx/matrix_operation.hpp" +#include "./gtx/matrix_query.hpp" +#include "./gtx/mixed_product.hpp" +#include "./gtx/norm.hpp" +#include "./gtx/normal.hpp" +#include "./gtx/normalize_dot.hpp" +#include "./gtx/number_precision.hpp" +#include "./gtx/optimum_pow.hpp" +#include "./gtx/orthonormalize.hpp" +#include "./gtx/pca.hpp" +#include "./gtx/perpendicular.hpp" +#include "./gtx/polar_coordinates.hpp" +#include "./gtx/projection.hpp" +#include "./gtx/quaternion.hpp" +#include "./gtx/raw_data.hpp" +#include "./gtx/rotate_normalized_axis.hpp" +#include "./gtx/rotate_vector.hpp" +#include "./gtx/spline.hpp" +#include "./gtx/std_based_type.hpp" +#if !((GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)) +# include "./gtx/string_cast.hpp" +#endif +#include "./gtx/transform.hpp" +#include "./gtx/transform2.hpp" +#include "./gtx/vec_swizzle.hpp" +#include "./gtx/vector_angle.hpp" +#include "./gtx/vector_query.hpp" +#include "./gtx/wrap.hpp" + +#if GLM_HAS_TEMPLATE_ALIASES +# include "./gtx/scalar_multiplication.hpp" +#endif + +#if GLM_HAS_RANGE_FOR +# include "./gtx/range.hpp" +#endif +#endif//GLM_ENABLE_EXPERIMENTAL diff --git a/includes/glm/ext/_matrix_vectorize.hpp b/includes/glm/ext/_matrix_vectorize.hpp new file mode 100644 index 0000000..0d08117 --- /dev/null +++ b/includes/glm/ext/_matrix_vectorize.hpp @@ -0,0 +1,128 @@ +#pragma once + +namespace glm { + + namespace detail { + + template class mat, length_t C, length_t R, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 2, T, Q> call(Ret (*Func)(T x), mat<2, 2, T, Q> const &x) { + return mat<2, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]) + ); + } + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 3, T, Q> call(Ret (*Func)(T x), mat<2, 3, T, Q> const &x) { + return mat<2, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<2, 4, T, Q> call(Ret (*Func)(T x), mat<2, 4, T, Q> const &x) { + return mat<2, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 2, T, Q> call(Ret (*Func)(T x), mat<3, 2, T, Q> const &x) { + return mat<3, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]), + Func(x[2][0]), Func(x[2][1]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 3, T, Q> call(Ret (*Func)(T x), mat<3, 3, T, Q> const &x) { + return mat<3, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<3, 4, T, Q> call(Ret (*Func)(T x), mat<3, 4, T, Q> const &x) { + return mat<3, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), Func(x[2][3]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 2, T, Q> call(Ret (*Func)(T x), mat<4, 2, T, Q> const &x) { + return mat<4, 2, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), + Func(x[1][0]), Func(x[1][1]), + Func(x[2][0]), Func(x[2][1]), + Func(x[3][0]), Func(x[3][1]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 3, T, Q> call(Ret (*Func)(T x), mat<4, 3, T, Q> const &x) { + return mat<4, 3, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), + Func(x[3][0]), Func(x[3][1]), Func(x[3][2]) + ); + } + + }; + + template class mat, typename Ret, typename T, qualifier Q> + struct matrix_functor_1 { + + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat<4, 4, T, Q> call(Ret (*Func)(T x), mat<4, 4, T, Q> const &x) { + return mat<4, 4, Ret, Q>( + Func(x[0][0]), Func(x[0][1]), Func(x[0][2]), Func(x[0][3]), + Func(x[1][0]), Func(x[1][1]), Func(x[1][2]), Func(x[1][3]), + Func(x[2][0]), Func(x[2][1]), Func(x[2][2]), Func(x[2][3]), + Func(x[3][0]), Func(x[3][1]), Func(x[3][2]), Func(x[3][3]) + ); + } + + }; + + } + +}// namespace glm diff --git a/includes/glm/ext/matrix_clip_space.hpp b/includes/glm/ext/matrix_clip_space.hpp new file mode 100644 index 0000000..43579b8 --- /dev/null +++ b/includes/glm/ext/matrix_clip_space.hpp @@ -0,0 +1,522 @@ +/// @ref ext_matrix_clip_space +/// @file glm/ext/matrix_clip_space.hpp +/// +/// @defgroup ext_matrix_clip_space GLM_EXT_matrix_clip_space +/// @ingroup ext +/// +/// Defines functions that generate clip space transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_transform +/// @see ext_matrix_projection + +#pragma once + +// Dependencies +#include "../ext/scalar_constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_clip_space extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_clip_space + /// @{ + + /// Creates a matrix for projecting two-dimensional coordinates onto the screen. + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top, T const& zNear, T const& zFar) + /// @see gluOrtho2D man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( + T left, T right, T bottom, T top); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_ZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_NO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_ZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_NO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoZO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoNO( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a matrix for an orthographic parallel viewing volume, using the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @tparam T A floating-point scalar type + /// + /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) + /// @see glOrtho man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( + T left, T right, T bottom, T top, T zNear, T zFar); + + /// Creates a left-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_ZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a left-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_NO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_ZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_NO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumZO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumNO( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a left-handed frustum matrix. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a right-handed frustum matrix. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH( + T left, T right, T bottom, T top, T near, T far); + + /// Creates a frustum matrix with default handedness, using the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @tparam T A floating-point scalar type + /// @see glFrustum man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> frustum( + T left, T right, T bottom, T top, T near, T far); + + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_ZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_NO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_ZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_NO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveZO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveNO( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH( + T fovy, T aspect, T near, T far); + + /// Creates a matrix for a symmetric perspective-view frustum based on the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param fovy Specifies the field of view angle in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + /// @see gluPerspective man page + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspective( + T fovy, T aspect, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_ZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_NO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_ZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_NO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovZO( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovNO( + T fov, T width, T height, T near, T far); + + /// Builds a right-handed perspective projection matrix based on a field of view. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH( + T fov, T width, T height, T near, T far); + + /// Builds a left-handed perspective projection matrix based on a field of view. + /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH( + T fov, T width, T height, T near, T far); + + /// Builds a perspective projection matrix based on a field of view and the default handedness and default near and far clip planes definition. + /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param fov Expressed in radians. + /// @param width Width of the viewport + /// @param height Height of the viewport + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFov( + T fov, T width, T height, T near, T far); + + /// Creates a matrix for a left-handed, symmetric perspective-view frustum with far plane at infinite. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveLH( + T fovy, T aspect, T near); + + /// Creates a matrix for a right-handed, symmetric perspective-view frustum with far plane at infinite. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveRH( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite with default handedness. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspective( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( + T fovy, T aspect, T near); + + /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. + /// + /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. + /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). + /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). + /// @param ep Epsilon + /// + /// @tparam T A floating-point scalar type + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( + T fovy, T aspect, T near, T ep); + + /// @} +}//namespace glm + +#include "matrix_clip_space.inl" diff --git a/includes/glm/ext/matrix_clip_space.inl b/includes/glm/ext/matrix_clip_space.inl new file mode 100644 index 0000000..27fb6a1 --- /dev/null +++ b/includes/glm/ext/matrix_clip_space.inl @@ -0,0 +1,595 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top) + { + mat<4, 4, T, defaultp> Result(static_cast(1)); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = static_cast(1) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - zNear / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_NO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = static_cast(2) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - (zFar + zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(1) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - zNear / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_NO(T left, T right, T bottom, T top, T zNear, T zFar) + { + mat<4, 4, T, defaultp> Result(1); + Result[0][0] = static_cast(2) / (right - left); + Result[1][1] = static_cast(2) / (top - bottom); + Result[2][2] = - static_cast(2) / (zFar - zNear); + Result[3][0] = - (right + left) / (right - left); + Result[3][1] = - (top + bottom) / (top - bottom); + Result[3][2] = - (zFar + zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoZO(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoNO(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# endif + + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# else + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return orthoLH_ZO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return orthoLH_NO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return orthoRH_ZO(left, right, bottom, top, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return orthoRH_NO(left, right, bottom, top, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = -(right + left) / (right - left); + Result[2][1] = -(top + bottom) / (top - bottom); + Result[2][2] = farVal / (farVal - nearVal); + Result[2][3] = static_cast(1); + Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = -(right + left) / (right - left); + Result[2][1] = -(top + bottom) / (top - bottom); + Result[2][2] = (farVal + nearVal) / (farVal - nearVal); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = (right + left) / (right - left); + Result[2][1] = (top + bottom) / (top - bottom); + Result[2][2] = farVal / (nearVal - farVal); + Result[2][3] = static_cast(-1); + Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) + { + mat<4, 4, T, defaultp> Result(0); + Result[0][0] = (static_cast(2) * nearVal) / (right - left); + Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); + Result[2][0] = (right + left) / (right - left); + Result[2][1] = (top + bottom) / (top - bottom); + Result[2][2] = - (farVal + nearVal) / (farVal - nearVal); + Result[2][3] = static_cast(-1); + Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumZO(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumNO(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# else + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustum(T left, T right, T bottom, T top, T nearVal, T farVal) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return frustumLH_NO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return frustumRH_NO(left, right, bottom, top, nearVal, farVal); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_ZO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = zFar / (zNear - zFar); + Result[2][3] = - static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_NO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = - (zFar + zNear) / (zFar - zNear); + Result[2][3] = - static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_ZO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = zFar / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_NO(T fovy, T aspect, T zNear, T zFar) + { + assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); + + T const tanHalfFovy = tan(fovy / static_cast(2)); + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); + Result[1][1] = static_cast(1) / (tanHalfFovy); + Result[2][2] = (zFar + zNear) / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveZO(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveNO(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# endif + + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# else + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspective(T fovy, T aspect, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return perspectiveLH_ZO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return perspectiveLH_NO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return perspectiveRH_ZO(fovy, aspect, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return perspectiveRH_NO(fovy, aspect, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_ZO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = zFar / (zNear - zFar); + Result[2][3] = - static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_NO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = - (zFar + zNear) / (zFar - zNear); + Result[2][3] = - static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_ZO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = zFar / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = -(zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_NO(T fov, T width, T height, T zNear, T zFar) + { + assert(width > static_cast(0)); + assert(height > static_cast(0)); + assert(fov > static_cast(0)); + + T const rad = fov; + T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); + T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = w; + Result[1][1] = h; + Result[2][2] = (zFar + zNear) / (zFar - zNear); + Result[2][3] = static_cast(1); + Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovZO(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovNO(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# else + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFov(T fov, T width, T height, T zNear, T zFar) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return perspectiveFovLH_NO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return perspectiveFovRH_NO(fov, width, height, zNear, zFar); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH_NO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[2][3] = - static_cast(1); + Result[3][2] = - static_cast(2) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH_ZO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = - static_cast(1); + Result[2][3] = - static_cast(1); + Result[3][2] = - zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH_NO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(T(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = static_cast(1); + Result[2][3] = static_cast(1); + Result[3][2] = - static_cast(2) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH_ZO(T fovy, T aspect, T zNear) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(T(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = static_cast(1); + Result[2][3] = static_cast(1); + Result[3][2] = - zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspective(T fovy, T aspect, T zNear) + { +# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO + return infinitePerspectiveLH_ZO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO + return infinitePerspectiveLH_NO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO + return infinitePerspectiveRH_ZO(fovy, aspect, zNear); +# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO + return infinitePerspectiveRH_NO(fovy, aspect, zNear); +# endif + } + + // Infinite projection matrix: http://www.terathon.com/gdc07_lengyel.pdf + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear, T ep) + { + T const range = tan(fovy / static_cast(2)) * zNear; + T const left = -range * aspect; + T const right = range * aspect; + T const bottom = -range; + T const top = range; + + mat<4, 4, T, defaultp> Result(static_cast(0)); + Result[0][0] = (static_cast(2) * zNear) / (right - left); + Result[1][1] = (static_cast(2) * zNear) / (top - bottom); + Result[2][2] = ep - static_cast(1); + Result[2][3] = static_cast(-1); + Result[3][2] = (ep - static_cast(2)) * zNear; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear) + { + return tweakedInfinitePerspective(fovy, aspect, zNear, epsilon()); + } +}//namespace glm diff --git a/includes/glm/ext/matrix_common.hpp b/includes/glm/ext/matrix_common.hpp new file mode 100644 index 0000000..6bb3d06 --- /dev/null +++ b/includes/glm/ext/matrix_common.hpp @@ -0,0 +1,39 @@ +/// @ref ext_matrix_common +/// @file glm/ext/matrix_common.hpp +/// +/// @defgroup ext_matrix_common GLM_EXT_matrix_common +/// @ingroup ext +/// +/// Defines functions for common matrix operations. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_common + +#pragma once + +#include "../detail/qualifier.hpp" +#include "../detail/_fixes.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_common + /// @{ + + template + GLM_FUNC_DECL mat mix(mat const& x, mat const& y, mat const& a); + + template + GLM_FUNC_DECL mat mix(mat const& x, mat const& y, U a); + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat abs(mat const& x); + + /// @} +}//namespace glm + +#include "matrix_common.inl" diff --git a/includes/glm/ext/matrix_common.inl b/includes/glm/ext/matrix_common.inl new file mode 100644 index 0000000..1be4222 --- /dev/null +++ b/includes/glm/ext/matrix_common.inl @@ -0,0 +1,34 @@ +#include "../matrix.hpp" + +#include "_matrix_vectorize.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, U a) + { + return mat(x) * (static_cast(1) - a) + mat(y) * a; + } + + template + GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, mat const& a) + { + return matrixCompMult(mat(x), static_cast(1) - a) + matrixCompMult(mat(y), a); + } + + template + struct compute_abs_matrix + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static mat call(mat const& x) + { + return detail::matrix_functor_1::call(abs, x); + } + }; + + template + GLM_FUNC_DECL GLM_CONSTEXPR mat abs(mat const& x) + { + return compute_abs_matrix::value>::call(x); + } + +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x2.hpp b/includes/glm/ext/matrix_double2x2.hpp new file mode 100644 index 0000000..94dca54 --- /dev/null +++ b/includes/glm/ext/matrix_double2x2.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double2x2.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, double, defaultp> dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, double, defaultp> dmat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x2_precision.hpp b/includes/glm/ext/matrix_double2x2_precision.hpp new file mode 100644 index 0000000..9e2c174 --- /dev/null +++ b/includes/glm/ext/matrix_double2x2_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, lowp> lowp_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, mediump> mediump_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, highp> highp_dmat2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, lowp> lowp_dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, mediump> mediump_dmat2x2; + + /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, double, highp> highp_dmat2x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x3.hpp b/includes/glm/ext/matrix_double2x3.hpp new file mode 100644 index 0000000..bfef87a --- /dev/null +++ b/includes/glm/ext/matrix_double2x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double2x3.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 3, double, defaultp> dmat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x3_precision.hpp b/includes/glm/ext/matrix_double2x3_precision.hpp new file mode 100644 index 0000000..098fb60 --- /dev/null +++ b/includes/glm/ext/matrix_double2x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double2x3_precision.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, lowp> lowp_dmat2x3; + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, mediump> mediump_dmat2x3; + + /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, double, highp> highp_dmat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x4.hpp b/includes/glm/ext/matrix_double2x4.hpp new file mode 100644 index 0000000..499284b --- /dev/null +++ b/includes/glm/ext/matrix_double2x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double2x4.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 4, double, defaultp> dmat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double2x4_precision.hpp b/includes/glm/ext/matrix_double2x4_precision.hpp new file mode 100644 index 0000000..9b61ebc --- /dev/null +++ b/includes/glm/ext/matrix_double2x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double2x4_precision.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, lowp> lowp_dmat2x4; + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, mediump> mediump_dmat2x4; + + /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, double, highp> highp_dmat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x2.hpp b/includes/glm/ext/matrix_double3x2.hpp new file mode 100644 index 0000000..dd23f36 --- /dev/null +++ b/includes/glm/ext/matrix_double3x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double3x2.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 2, double, defaultp> dmat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x2_precision.hpp b/includes/glm/ext/matrix_double3x2_precision.hpp new file mode 100644 index 0000000..068d9e9 --- /dev/null +++ b/includes/glm/ext/matrix_double3x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double3x2_precision.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, lowp> lowp_dmat3x2; + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, mediump> mediump_dmat3x2; + + /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, double, highp> highp_dmat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x3.hpp b/includes/glm/ext/matrix_double3x3.hpp new file mode 100644 index 0000000..53572b7 --- /dev/null +++ b/includes/glm/ext/matrix_double3x3.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double3x3.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, double, defaultp> dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, double, defaultp> dmat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x3_precision.hpp b/includes/glm/ext/matrix_double3x3_precision.hpp new file mode 100644 index 0000000..8691e78 --- /dev/null +++ b/includes/glm/ext/matrix_double3x3_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double3x3_precision.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, lowp> lowp_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, mediump> mediump_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, highp> highp_dmat3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, lowp> lowp_dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, mediump> mediump_dmat3x3; + + /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, double, highp> highp_dmat3x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x4.hpp b/includes/glm/ext/matrix_double3x4.hpp new file mode 100644 index 0000000..c572d63 --- /dev/null +++ b/includes/glm/ext/matrix_double3x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double3x4.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 4, double, defaultp> dmat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double3x4_precision.hpp b/includes/glm/ext/matrix_double3x4_precision.hpp new file mode 100644 index 0000000..f040217 --- /dev/null +++ b/includes/glm/ext/matrix_double3x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double3x4_precision.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, lowp> lowp_dmat3x4; + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, mediump> mediump_dmat3x4; + + /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, double, highp> highp_dmat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x2.hpp b/includes/glm/ext/matrix_double4x2.hpp new file mode 100644 index 0000000..9b229f4 --- /dev/null +++ b/includes/glm/ext/matrix_double4x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double4x2.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 2 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 2, double, defaultp> dmat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x2_precision.hpp b/includes/glm/ext/matrix_double4x2_precision.hpp new file mode 100644 index 0000000..6ad18ba --- /dev/null +++ b/includes/glm/ext/matrix_double4x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double4x2_precision.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, lowp> lowp_dmat4x2; + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, mediump> mediump_dmat4x2; + + /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, double, highp> highp_dmat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x3.hpp b/includes/glm/ext/matrix_double4x3.hpp new file mode 100644 index 0000000..dca4cf9 --- /dev/null +++ b/includes/glm/ext/matrix_double4x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_double4x3.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 3 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 3, double, defaultp> dmat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x3_precision.hpp b/includes/glm/ext/matrix_double4x3_precision.hpp new file mode 100644 index 0000000..f7371de --- /dev/null +++ b/includes/glm/ext/matrix_double4x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_double4x3_precision.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, lowp> lowp_dmat4x3; + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, mediump> mediump_dmat4x3; + + /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, double, highp> highp_dmat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x4.hpp b/includes/glm/ext/matrix_double4x4.hpp new file mode 100644 index 0000000..81e1bf6 --- /dev/null +++ b/includes/glm/ext/matrix_double4x4.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_double4x4.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, double, defaultp> dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, double, defaultp> dmat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_double4x4_precision.hpp b/includes/glm/ext/matrix_double4x4_precision.hpp new file mode 100644 index 0000000..4c36a84 --- /dev/null +++ b/includes/glm/ext/matrix_double4x4_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_double4x4_precision.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, lowp> lowp_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, mediump> mediump_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, highp> highp_dmat4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, lowp> lowp_dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, mediump> mediump_dmat4x4; + + /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, double, highp> highp_dmat4x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x2.hpp b/includes/glm/ext/matrix_float2x2.hpp new file mode 100644 index 0000000..53df921 --- /dev/null +++ b/includes/glm/ext/matrix_float2x2.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, float, defaultp> mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 2, float, defaultp> mat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x2_precision.hpp b/includes/glm/ext/matrix_float2x2_precision.hpp new file mode 100644 index 0000000..898b6db --- /dev/null +++ b/includes/glm/ext/matrix_float2x2_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, lowp> lowp_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, mediump> mediump_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, highp> highp_mat2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, lowp> lowp_mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, mediump> mediump_mat2x2; + + /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 2, float, highp> highp_mat2x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x3.hpp b/includes/glm/ext/matrix_float2x3.hpp new file mode 100644 index 0000000..6f68822 --- /dev/null +++ b/includes/glm/ext/matrix_float2x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float2x3.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 3, float, defaultp> mat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x3_precision.hpp b/includes/glm/ext/matrix_float2x3_precision.hpp new file mode 100644 index 0000000..50c1032 --- /dev/null +++ b/includes/glm/ext/matrix_float2x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x3_precision.hpp + +#pragma once +#include "../detail/type_mat2x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, lowp> lowp_mat2x3; + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, mediump> mediump_mat2x3; + + /// 2 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 3, float, highp> highp_mat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x4.hpp b/includes/glm/ext/matrix_float2x4.hpp new file mode 100644 index 0000000..30f30de --- /dev/null +++ b/includes/glm/ext/matrix_float2x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float2x4.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 2 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<2, 4, float, defaultp> mat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float2x4_precision.hpp b/includes/glm/ext/matrix_float2x4_precision.hpp new file mode 100644 index 0000000..079d638 --- /dev/null +++ b/includes/glm/ext/matrix_float2x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x4_precision.hpp + +#pragma once +#include "../detail/type_mat2x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, lowp> lowp_mat2x4; + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, mediump> mediump_mat2x4; + + /// 2 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<2, 4, float, highp> highp_mat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x2.hpp b/includes/glm/ext/matrix_float3x2.hpp new file mode 100644 index 0000000..280d0a3 --- /dev/null +++ b/includes/glm/ext/matrix_float3x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float3x2.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 2, float, defaultp> mat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x2_precision.hpp b/includes/glm/ext/matrix_float3x2_precision.hpp new file mode 100644 index 0000000..8572c2a --- /dev/null +++ b/includes/glm/ext/matrix_float3x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float3x2_precision.hpp + +#pragma once +#include "../detail/type_mat3x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, lowp> lowp_mat3x2; + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, mediump> mediump_mat3x2; + + /// 3 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 2, float, highp> highp_mat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x3.hpp b/includes/glm/ext/matrix_float3x3.hpp new file mode 100644 index 0000000..177d809 --- /dev/null +++ b/includes/glm/ext/matrix_float3x3.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float3x3.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, float, defaultp> mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 3, float, defaultp> mat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x3_precision.hpp b/includes/glm/ext/matrix_float3x3_precision.hpp new file mode 100644 index 0000000..8a900c1 --- /dev/null +++ b/includes/glm/ext/matrix_float3x3_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float3x3_precision.hpp + +#pragma once +#include "../detail/type_mat3x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, lowp> lowp_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, mediump> mediump_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, highp> highp_mat3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, lowp> lowp_mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, mediump> mediump_mat3x3; + + /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 3, float, highp> highp_mat3x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x4.hpp b/includes/glm/ext/matrix_float3x4.hpp new file mode 100644 index 0000000..64b8459 --- /dev/null +++ b/includes/glm/ext/matrix_float3x4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float3x4.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 3 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<3, 4, float, defaultp> mat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float3x4_precision.hpp b/includes/glm/ext/matrix_float3x4_precision.hpp new file mode 100644 index 0000000..bc36bf1 --- /dev/null +++ b/includes/glm/ext/matrix_float3x4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float3x4_precision.hpp + +#pragma once +#include "../detail/type_mat3x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, lowp> lowp_mat3x4; + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, mediump> mediump_mat3x4; + + /// 3 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<3, 4, float, highp> highp_mat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x2.hpp b/includes/glm/ext/matrix_float4x2.hpp new file mode 100644 index 0000000..1ed5227 --- /dev/null +++ b/includes/glm/ext/matrix_float4x2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float4x2.hpp + +#pragma once +#include "../detail/type_mat4x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 2 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 2, float, defaultp> mat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x2_precision.hpp b/includes/glm/ext/matrix_float4x2_precision.hpp new file mode 100644 index 0000000..88fd069 --- /dev/null +++ b/includes/glm/ext/matrix_float4x2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float2x2_precision.hpp + +#pragma once +#include "../detail/type_mat2x2.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, lowp> lowp_mat4x2; + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, mediump> mediump_mat4x2; + + /// 4 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 2, float, highp> highp_mat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x3.hpp b/includes/glm/ext/matrix_float4x3.hpp new file mode 100644 index 0000000..5dbe765 --- /dev/null +++ b/includes/glm/ext/matrix_float4x3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/matrix_float4x3.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix + /// @{ + + /// 4 columns of 3 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 3, float, defaultp> mat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x3_precision.hpp b/includes/glm/ext/matrix_float4x3_precision.hpp new file mode 100644 index 0000000..846ed4f --- /dev/null +++ b/includes/glm/ext/matrix_float4x3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/matrix_float4x3_precision.hpp + +#pragma once +#include "../detail/type_mat4x3.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, lowp> lowp_mat4x3; + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, mediump> mediump_mat4x3; + + /// 4 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 3, float, highp> highp_mat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x4.hpp b/includes/glm/ext/matrix_float4x4.hpp new file mode 100644 index 0000000..5ba111d --- /dev/null +++ b/includes/glm/ext/matrix_float4x4.hpp @@ -0,0 +1,23 @@ +/// @ref core +/// @file glm/ext/matrix_float4x4.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @ingroup core_matrix + /// @{ + + /// 4 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, float, defaultp> mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + typedef mat<4, 4, float, defaultp> mat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_float4x4_precision.hpp b/includes/glm/ext/matrix_float4x4_precision.hpp new file mode 100644 index 0000000..597149b --- /dev/null +++ b/includes/glm/ext/matrix_float4x4_precision.hpp @@ -0,0 +1,49 @@ +/// @ref core +/// @file glm/ext/matrix_float4x4_precision.hpp + +#pragma once +#include "../detail/type_mat4x4.hpp" + +namespace glm +{ + /// @addtogroup core_matrix_precision + /// @{ + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, lowp> lowp_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, mediump> mediump_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, highp> highp_mat4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, lowp> lowp_mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, mediump> mediump_mat4x4; + + /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef mat<4, 4, float, highp> highp_mat4x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x2.hpp b/includes/glm/ext/matrix_int2x2.hpp new file mode 100644 index 0000000..c6aa068 --- /dev/null +++ b/includes/glm/ext/matrix_int2x2.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int2x2 +/// @file glm/ext/matrix_int2x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x2 GLM_EXT_matrix_int2x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x2 + /// @{ + + /// Signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2 + typedef mat<2, 2, int, defaultp> imat2x2; + + /// Signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2 + typedef mat<2, 2, int, defaultp> imat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x2_sized.hpp b/includes/glm/ext/matrix_int2x2_sized.hpp new file mode 100644 index 0000000..70c0c21 --- /dev/null +++ b/includes/glm/ext/matrix_int2x2_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int2x2_sized +/// @file glm/ext/matrix_int2x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x2_sized GLM_EXT_matrix_int2x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x2_sized + /// @{ + + /// 8 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int8, defaultp> i8mat2x2; + + /// 16 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int16, defaultp> i16mat2x2; + + /// 32 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int32, defaultp> i32mat2x2; + + /// 64 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int64, defaultp> i64mat2x2; + + + /// 8 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int8, defaultp> i8mat2; + + /// 16 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int16, defaultp> i16mat2; + + /// 32 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int32, defaultp> i32mat2; + + /// 64 bit signed integer 2x2 matrix. + /// + /// @see ext_matrix_int2x2_sized + typedef mat<2, 2, int64, defaultp> i64mat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x3.hpp b/includes/glm/ext/matrix_int2x3.hpp new file mode 100644 index 0000000..aee415c --- /dev/null +++ b/includes/glm/ext/matrix_int2x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int2x3 +/// @file glm/ext/matrix_int2x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x3 GLM_EXT_matrix_int2x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x3 + /// @{ + + /// Signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3 + typedef mat<2, 3, int, defaultp> imat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x3_sized.hpp b/includes/glm/ext/matrix_int2x3_sized.hpp new file mode 100644 index 0000000..b5526fe --- /dev/null +++ b/includes/glm/ext/matrix_int2x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int2x3_sized +/// @file glm/ext/matrix_int2x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x3_sized GLM_EXT_matrix_int2x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x3_sized + /// @{ + + /// 8 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int8, defaultp> i8mat2x3; + + /// 16 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int16, defaultp> i16mat2x3; + + /// 32 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int32, defaultp> i32mat2x3; + + /// 64 bit signed integer 2x3 matrix. + /// + /// @see ext_matrix_int2x3_sized + typedef mat<2, 3, int64, defaultp> i64mat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x4.hpp b/includes/glm/ext/matrix_int2x4.hpp new file mode 100644 index 0000000..4f36331 --- /dev/null +++ b/includes/glm/ext/matrix_int2x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int2x4 +/// @file glm/ext/matrix_int2x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x4 GLM_EXT_matrix_int2x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x4 + /// @{ + + /// Signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4 + typedef mat<2, 4, int, defaultp> imat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int2x4_sized.hpp b/includes/glm/ext/matrix_int2x4_sized.hpp new file mode 100644 index 0000000..a66a5e7 --- /dev/null +++ b/includes/glm/ext/matrix_int2x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int2x4_sized +/// @file glm/ext/matrix_int2x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int2x4_sized GLM_EXT_matrix_int2x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int2x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int2x4_sized + /// @{ + + /// 8 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int8, defaultp> i8mat2x4; + + /// 16 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int16, defaultp> i16mat2x4; + + /// 32 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int32, defaultp> i32mat2x4; + + /// 64 bit signed integer 2x4 matrix. + /// + /// @see ext_matrix_int2x4_sized + typedef mat<2, 4, int64, defaultp> i64mat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x2.hpp b/includes/glm/ext/matrix_int3x2.hpp new file mode 100644 index 0000000..3bd563b --- /dev/null +++ b/includes/glm/ext/matrix_int3x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int3x2 +/// @file glm/ext/matrix_int3x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x2 GLM_EXT_matrix_int3x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x2 + /// @{ + + /// Signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2 + typedef mat<3, 2, int, defaultp> imat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x2_sized.hpp b/includes/glm/ext/matrix_int3x2_sized.hpp new file mode 100644 index 0000000..7e34c52 --- /dev/null +++ b/includes/glm/ext/matrix_int3x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int3x2_sized +/// @file glm/ext/matrix_int3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x2_sized GLM_EXT_matrix_int3x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x2_sized + /// @{ + + /// 8 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int8, defaultp> i8mat3x2; + + /// 16 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int16, defaultp> i16mat3x2; + + /// 32 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int32, defaultp> i32mat3x2; + + /// 64 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_int3x2_sized + typedef mat<3, 2, int64, defaultp> i64mat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x3.hpp b/includes/glm/ext/matrix_int3x3.hpp new file mode 100644 index 0000000..287488d --- /dev/null +++ b/includes/glm/ext/matrix_int3x3.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int3x3 +/// @file glm/ext/matrix_int3x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x3 GLM_EXT_matrix_int3x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x3 + /// @{ + + /// Signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3 + typedef mat<3, 3, int, defaultp> imat3x3; + + /// Signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3 + typedef mat<3, 3, int, defaultp> imat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x3_sized.hpp b/includes/glm/ext/matrix_int3x3_sized.hpp new file mode 100644 index 0000000..577e305 --- /dev/null +++ b/includes/glm/ext/matrix_int3x3_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int3x3_sized +/// @file glm/ext/matrix_int3x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x3_sized GLM_EXT_matrix_int3x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x3_sized + /// @{ + + /// 8 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int8, defaultp> i8mat3x3; + + /// 16 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int16, defaultp> i16mat3x3; + + /// 32 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int32, defaultp> i32mat3x3; + + /// 64 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int64, defaultp> i64mat3x3; + + + /// 8 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int8, defaultp> i8mat3; + + /// 16 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int16, defaultp> i16mat3; + + /// 32 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int32, defaultp> i32mat3; + + /// 64 bit signed integer 3x3 matrix. + /// + /// @see ext_matrix_int3x3_sized + typedef mat<3, 3, int64, defaultp> i64mat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x4.hpp b/includes/glm/ext/matrix_int3x4.hpp new file mode 100644 index 0000000..08e534d --- /dev/null +++ b/includes/glm/ext/matrix_int3x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int3x4 +/// @file glm/ext/matrix_int3x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x4 GLM_EXT_matrix_int3x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x4 + /// @{ + + /// Signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4 + typedef mat<3, 4, int, defaultp> imat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int3x4_sized.hpp b/includes/glm/ext/matrix_int3x4_sized.hpp new file mode 100644 index 0000000..692c48c --- /dev/null +++ b/includes/glm/ext/matrix_int3x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int3x4_sized +/// @file glm/ext/matrix_int3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int3x4_sized GLM_EXT_matrix_int3x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int3x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int3x4_sized + /// @{ + + /// 8 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int8, defaultp> i8mat3x4; + + /// 16 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int16, defaultp> i16mat3x4; + + /// 32 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int32, defaultp> i32mat3x4; + + /// 64 bit signed integer 3x4 matrix. + /// + /// @see ext_matrix_int3x4_sized + typedef mat<3, 4, int64, defaultp> i64mat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x2.hpp b/includes/glm/ext/matrix_int4x2.hpp new file mode 100644 index 0000000..f756ef2 --- /dev/null +++ b/includes/glm/ext/matrix_int4x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int4x2 +/// @file glm/ext/matrix_int4x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x2 GLM_EXT_matrix_int4x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x2 + /// @{ + + /// Signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2 + typedef mat<4, 2, int, defaultp> imat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x2_sized.hpp b/includes/glm/ext/matrix_int4x2_sized.hpp new file mode 100644 index 0000000..63a99d6 --- /dev/null +++ b/includes/glm/ext/matrix_int4x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int4x2_sized +/// @file glm/ext/matrix_int4x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x2_sized GLM_EXT_matrix_int4x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x2_sized + /// @{ + + /// 8 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int8, defaultp> i8mat4x2; + + /// 16 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int16, defaultp> i16mat4x2; + + /// 32 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int32, defaultp> i32mat4x2; + + /// 64 bit signed integer 4x2 matrix. + /// + /// @see ext_matrix_int4x2_sized + typedef mat<4, 2, int64, defaultp> i64mat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x3.hpp b/includes/glm/ext/matrix_int4x3.hpp new file mode 100644 index 0000000..d5d97a7 --- /dev/null +++ b/includes/glm/ext/matrix_int4x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_int4x3 +/// @file glm/ext/matrix_int4x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x3 GLM_EXT_matrix_int4x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x3 + /// @{ + + /// Signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3 + typedef mat<4, 3, int, defaultp> imat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x3_sized.hpp b/includes/glm/ext/matrix_int4x3_sized.hpp new file mode 100644 index 0000000..55078fa --- /dev/null +++ b/includes/glm/ext/matrix_int4x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_int4x3_sized +/// @file glm/ext/matrix_int4x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x3_sized GLM_EXT_matrix_int4x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x3_sized + /// @{ + + /// 8 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int8, defaultp> i8mat4x3; + + /// 16 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int16, defaultp> i16mat4x3; + + /// 32 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int32, defaultp> i32mat4x3; + + /// 64 bit signed integer 4x3 matrix. + /// + /// @see ext_matrix_int4x3_sized + typedef mat<4, 3, int64, defaultp> i64mat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x4.hpp b/includes/glm/ext/matrix_int4x4.hpp new file mode 100644 index 0000000..e17cff1 --- /dev/null +++ b/includes/glm/ext/matrix_int4x4.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_int4x4 +/// @file glm/ext/matrix_int4x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x4 GLM_EXT_matrix_int4x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x4 + /// @{ + + /// Signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4 + typedef mat<4, 4, int, defaultp> imat4x4; + + /// Signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4 + typedef mat<4, 4, int, defaultp> imat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_int4x4_sized.hpp b/includes/glm/ext/matrix_int4x4_sized.hpp new file mode 100644 index 0000000..4a11203 --- /dev/null +++ b/includes/glm/ext/matrix_int4x4_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_int4x4_sized +/// @file glm/ext/matrix_int4x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_int4x4_sized GLM_EXT_matrix_int4x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_int4x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_int4x4_sized + /// @{ + + /// 8 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int8, defaultp> i8mat4x4; + + /// 16 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int16, defaultp> i16mat4x4; + + /// 32 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int32, defaultp> i32mat4x4; + + /// 64 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int64, defaultp> i64mat4x4; + + + /// 8 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int8, defaultp> i8mat4; + + /// 16 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int16, defaultp> i16mat4; + + /// 32 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int32, defaultp> i32mat4; + + /// 64 bit signed integer 4x4 matrix. + /// + /// @see ext_matrix_int4x4_sized + typedef mat<4, 4, int64, defaultp> i64mat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_integer.hpp b/includes/glm/ext/matrix_integer.hpp new file mode 100644 index 0000000..7d7dfc5 --- /dev/null +++ b/includes/glm/ext/matrix_integer.hpp @@ -0,0 +1,91 @@ +/// @ref ext_matrix_integer +/// @file glm/ext/matrix_integer.hpp +/// +/// @defgroup ext_matrix_integer GLM_EXT_matrix_integer +/// @ingroup ext +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_projection +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_integer + /// @{ + + /// Multiply matrix x by matrix y component-wise, i.e., + /// result[i][j] is the scalar product of x[i][j] and y[i][j]. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL matrixCompMult man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); + + /// Treats the first parameter c as a column vector + /// and the second parameter r as a row vector + /// and does a linear algebraic matrix multiply c * r. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL outerProduct man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); + + /// Returns the transposed matrix of x + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL transpose man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); + + /// Return the determinant of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL determinant man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL T determinant(mat const& m); + + /// @} +}//namespace glm + +#include "matrix_integer.inl" diff --git a/includes/glm/ext/matrix_integer.inl b/includes/glm/ext/matrix_integer.inl new file mode 100644 index 0000000..8b377ce --- /dev/null +++ b/includes/glm/ext/matrix_integer.inl @@ -0,0 +1,38 @@ +namespace glm{ +namespace detail +{ + template + struct compute_matrixCompMult_type { + GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) + { + return detail::compute_matrixCompMult::value>::call(x, y); + } + }; + + template + struct compute_outerProduct_type { + GLM_FUNC_QUALIFIER static typename detail::outerProduct_trait::type call(vec const& c, vec const& r) + { + return detail::compute_outerProduct::call(c, r); + } + }; + + template + struct compute_transpose_type + { + GLM_FUNC_QUALIFIER static mat call(mat const& m) + { + return detail::compute_transpose::value>::call(m); + } + }; + + template + struct compute_determinant_type{ + + GLM_FUNC_QUALIFIER static T call(mat const& m) + { + return detail::compute_determinant::value>::call(m); + } + }; +}//namespace detail +}//namespace glm diff --git a/includes/glm/ext/matrix_projection.hpp b/includes/glm/ext/matrix_projection.hpp new file mode 100644 index 0000000..51fd01b --- /dev/null +++ b/includes/glm/ext/matrix_projection.hpp @@ -0,0 +1,149 @@ +/// @ref ext_matrix_projection +/// @file glm/ext/matrix_projection.hpp +/// +/// @defgroup ext_matrix_projection GLM_EXT_matrix_projection +/// @ingroup ext +/// +/// Functions that generate common projection transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_transform +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_projection extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_projection + /// @{ + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> projectZO( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> projectNO( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates using default near and far clip planes definition. + /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param obj Specify the object coordinates. + /// @param model Specifies the current modelview matrix + /// @param proj Specifies the current projection matrix + /// @param viewport Specifies the current viewport + /// @return Return the computed window coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluProject man page + template + GLM_FUNC_DECL vec<3, T, Q> project( + vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProjectZO( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. + /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProjectNO( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates using default near and far clip planes definition. + /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. + /// + /// @param win Specify the window coordinates to be mapped. + /// @param model Specifies the modelview matrix + /// @param proj Specifies the projection matrix + /// @param viewport Specifies the viewport + /// @return Returns the computed object coordinates. + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluUnProject man page + template + GLM_FUNC_DECL vec<3, T, Q> unProject( + vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); + + /// Define a picking region + /// + /// @param center Specify the center of a picking region in window coordinates. + /// @param delta Specify the width and height, respectively, of the picking region in window coordinates. + /// @param viewport Rendering viewport + /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. + /// @tparam U Currently supported: Floating-point types and integer types. + /// + /// @see gluPickMatrix man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> pickMatrix( + vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport); + + /// @} +}//namespace glm + +#include "matrix_projection.inl" diff --git a/includes/glm/ext/matrix_projection.inl b/includes/glm/ext/matrix_projection.inl new file mode 100644 index 0000000..2f2c196 --- /dev/null +++ b/includes/glm/ext/matrix_projection.inl @@ -0,0 +1,106 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> projectZO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); + tmp = model * tmp; + tmp = proj * tmp; + + tmp /= tmp.w; + tmp.x = tmp.x * static_cast(0.5) + static_cast(0.5); + tmp.y = tmp.y * static_cast(0.5) + static_cast(0.5); + + tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); + tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); + + return vec<3, T, Q>(tmp); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> projectNO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); + tmp = model * tmp; + tmp = proj * tmp; + + tmp /= tmp.w; + tmp = tmp * static_cast(0.5) + static_cast(0.5); + tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); + tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); + + return vec<3, T, Q>(tmp); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> project(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return projectZO(obj, model, proj, viewport); +# else + return projectNO(obj, model, proj, viewport); +# endif + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + mat<4, 4, T, Q> Inverse = inverse(proj * model); + + vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); + tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); + tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); + tmp.x = tmp.x * static_cast(2) - static_cast(1); + tmp.y = tmp.y * static_cast(2) - static_cast(1); + + vec<4, T, Q> obj = Inverse * tmp; + obj /= obj.w; + + return vec<3, T, Q>(obj); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { + mat<4, 4, T, Q> Inverse = inverse(proj * model); + + vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); + tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); + tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); + tmp = tmp * static_cast(2) - static_cast(1); + + vec<4, T, Q> obj = Inverse * tmp; + obj /= obj.w; + + return vec<3, T, Q>(obj); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unProject(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT + return unProjectZO(win, model, proj, viewport); +# else + return unProjectNO(win, model, proj, viewport); +# endif + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> pickMatrix(vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport) + { + assert(delta.x > static_cast(0) && delta.y > static_cast(0)); + mat<4, 4, T, Q> Result(static_cast(1)); + + if(!(delta.x > static_cast(0) && delta.y > static_cast(0))) + return Result; // Error + + vec<3, T, Q> Temp( + (static_cast(viewport[2]) - static_cast(2) * (center.x - static_cast(viewport[0]))) / delta.x, + (static_cast(viewport[3]) - static_cast(2) * (center.y - static_cast(viewport[1]))) / delta.y, + static_cast(0)); + + // Translate and scale the picked region to the entire window + Result = translate(Result, Temp); + return scale(Result, vec<3, T, Q>(static_cast(viewport[2]) / delta.x, static_cast(viewport[3]) / delta.y, static_cast(1))); + } +}//namespace glm diff --git a/includes/glm/ext/matrix_relational.hpp b/includes/glm/ext/matrix_relational.hpp new file mode 100644 index 0000000..20023ad --- /dev/null +++ b/includes/glm/ext/matrix_relational.hpp @@ -0,0 +1,132 @@ +/// @ref ext_matrix_relational +/// @file glm/ext/matrix_relational.hpp +/// +/// @defgroup ext_matrix_relational GLM_EXT_matrix_relational +/// @ingroup ext +/// +/// Exposes comparison functions for matrix types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_relational +/// @see ext_scalar_relational +/// @see ext_quaternion_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_relational + /// @{ + + /// Perform a component-wise equal-to comparison of two matrices. + /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y); + + /// Perform a component-wise not-equal-to comparison of two matrices. + /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& epsilon); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix + /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& ULPs); + + /// @} +}//namespace glm + +#include "matrix_relational.inl" diff --git a/includes/glm/ext/matrix_relational.inl b/includes/glm/ext/matrix_relational.inl new file mode 100644 index 0000000..9cd42b7 --- /dev/null +++ b/includes/glm/ext/matrix_relational.inl @@ -0,0 +1,88 @@ +/// @ref ext_vector_relational +/// @file glm/ext/vector_relational.inl + +// Dependency: +#include "../ext/vector_relational.hpp" +#include "../common.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, T Epsilon) + { + return equal(a, b, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& Epsilon) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i], Epsilon[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, T Epsilon) + { + return notEqual(a, b, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& Epsilon) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i], Epsilon[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, int MaxULPs) + { + return equal(a, b, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& MaxULPs) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = all(equal(a[i], b[i], MaxULPs[i])); + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, int MaxULPs) + { + return notEqual(a, b, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& MaxULPs) + { + vec Result(true); + for(length_t i = 0; i < C; ++i) + Result[i] = any(notEqual(a[i], b[i], MaxULPs[i])); + return Result; + } + +}//namespace glm diff --git a/includes/glm/ext/matrix_transform.hpp b/includes/glm/ext/matrix_transform.hpp new file mode 100644 index 0000000..52695b8 --- /dev/null +++ b/includes/glm/ext/matrix_transform.hpp @@ -0,0 +1,171 @@ +/// @ref ext_matrix_transform +/// @file glm/ext/matrix_transform.hpp +/// +/// @defgroup ext_matrix_transform GLM_EXT_matrix_transform +/// @ingroup ext +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. +/// +/// Include to use the features of this extension. +/// +/// @see ext_matrix_projection +/// @see ext_matrix_clip_space + +#pragma once + +// Dependencies +#include "../gtc/constants.hpp" +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_transform extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_transform + /// @{ + + /// Builds an identity matrix. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType identity(); + + /// Builds a translation 4 * 4 matrix created from a vector of 3 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a translation vector. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @code + /// #include + /// #include + /// ... + /// glm::mat4 m = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f)); + /// // m[0][0] == 1.0f, m[0][1] == 0.0f, m[0][2] == 0.0f, m[0][3] == 0.0f + /// // m[1][0] == 0.0f, m[1][1] == 1.0f, m[1][2] == 0.0f, m[1][3] == 0.0f + /// // m[2][0] == 0.0f, m[2][1] == 0.0f, m[2][2] == 1.0f, m[2][3] == 0.0f + /// // m[3][0] == 1.0f, m[3][1] == 1.0f, m[3][2] == 1.0f, m[3][3] == 1.0f + /// @endcode + /// + /// @see - translate(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - translate(vec<3, T, Q> const& v) + /// @see glTranslate man page + template + GLM_FUNC_DECL GLM_CONSTEXPR mat<4, 4, T, Q> translate( + mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); + + /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle. + /// + /// @param m Input matrix multiplied by this rotation matrix. + /// @param angle Rotation angle expressed in radians. + /// @param axis Rotation axis, recommended to be normalized. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) + /// @see - rotate(T angle, vec<3, T, Q> const& v) + /// @see glRotate man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotate( + mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& axis); + + /// Builds a scale 4 * 4 matrix created from 3 scalars. + /// + /// @param m Input matrix multiplied by this scale matrix. + /// @param v Ratio of scaling for each axis. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - scale(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - scale(vec<3, T, Q> const& v) + /// @see glScale man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> scale( + mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); + + /// Builds a scale 4 * 4 matrix created from point referent 3 shearers. + /// + /// @param m Input matrix multiplied by this shear matrix. + /// @param p Point of shearing as reference. + /// @param l_x Ratio of matrix.x projection in YZ plane relative to the y-axis/z-axis. + /// @param l_y Ratio of matrix.y projection in XZ plane relative to the x-axis/z-axis. + /// @param l_z Ratio of matrix.z projection in XY plane relative to the x-axis/y-axis. + /// + /// as example: + /// [1 , l_xy, l_xz, -(l_xy+l_xz) * p_x] [x] T + /// [x`, y`, z`, w`] = [x`, y`, z`, w`] * [l_yx, 1 , l_yz, -(l_yx+l_yz) * p_y] [y] + /// [l_zx, l_zy, 1 , -(l_zx+l_zy) * p_z] [z] + /// [0 , 0 , 0 , 1 ] [w] + /// + /// @tparam T A floating-point shear type + /// @tparam Q A value from qualifier enum + /// + /// @see - shear(mat<4, 4, T, Q> const& m, T x, T y, T z) + /// @see - shear(vec<3, T, Q> const& p) + /// @see - shear(vec<2, T, Q> const& l_x) + /// @see - shear(vec<2, T, Q> const& l_y) + /// @see - shear(vec<2, T, Q> const& l_z) + /// @see no resource... + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear( + mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z); + + /// Build a right handed look at view matrix. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAtRH( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// Build a left handed look at view matrix. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAtLH( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// Build a look at view matrix based on the default handedness. + /// + /// @param eye Position of the camera + /// @param center Position where the camera is looking at + /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) + /// @see gluLookAt man page + template + GLM_FUNC_DECL mat<4, 4, T, Q> lookAt( + vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); + + /// @} +}//namespace glm + +#include "matrix_transform.inl" diff --git a/includes/glm/ext/matrix_transform.inl b/includes/glm/ext/matrix_transform.inl new file mode 100644 index 0000000..029ef0f --- /dev/null +++ b/includes/glm/ext/matrix_transform.inl @@ -0,0 +1,207 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType identity() + { + return detail::init_gentype::GENTYPE>::identity(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result(m); + Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + vec<3, T, Q> axis(normalize(v)); + vec<3, T, Q> temp((T(1) - c) * axis); + + mat<4, 4, T, Q> Rotate; + Rotate[0][0] = c + temp[0] * axis[0]; + Rotate[0][1] = temp[0] * axis[1] + s * axis[2]; + Rotate[0][2] = temp[0] * axis[2] - s * axis[1]; + + Rotate[1][0] = temp[1] * axis[0] - s * axis[2]; + Rotate[1][1] = c + temp[1] * axis[1]; + Rotate[1][2] = temp[1] * axis[2] + s * axis[0]; + + Rotate[2][0] = temp[2] * axis[0] + s * axis[1]; + Rotate[2][1] = temp[2] * axis[1] - s * axis[0]; + Rotate[2][2] = c + temp[2] * axis[2]; + + mat<4, 4, T, Q> Result; + Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate_slow(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + mat<4, 4, T, Q> Result; + + vec<3, T, Q> axis = normalize(v); + + Result[0][0] = c + (static_cast(1) - c) * axis.x * axis.x; + Result[0][1] = (static_cast(1) - c) * axis.x * axis.y + s * axis.z; + Result[0][2] = (static_cast(1) - c) * axis.x * axis.z - s * axis.y; + Result[0][3] = static_cast(0); + + Result[1][0] = (static_cast(1) - c) * axis.y * axis.x - s * axis.z; + Result[1][1] = c + (static_cast(1) - c) * axis.y * axis.y; + Result[1][2] = (static_cast(1) - c) * axis.y * axis.z + s * axis.x; + Result[1][3] = static_cast(0); + + Result[2][0] = (static_cast(1) - c) * axis.z * axis.x + s * axis.y; + Result[2][1] = (static_cast(1) - c) * axis.z * axis.y - s * axis.x; + Result[2][2] = c + (static_cast(1) - c) * axis.z * axis.z; + Result[2][3] = static_cast(0); + + Result[3] = vec<4, T, Q>(0, 0, 0, 1); + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result; + Result[0] = m[0] * v[0]; + Result[1] = m[1] * v[1]; + Result[2] = m[2] * v[2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale_slow(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) + { + mat<4, 4, T, Q> Result(T(1)); + Result[0][0] = v.x; + Result[1][1] = v.y; + Result[2][2] = v.z; + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z) + { + T const lambda_xy = l_x[0]; + T const lambda_xz = l_x[1]; + T const lambda_yx = l_y[0]; + T const lambda_yz = l_y[1]; + T const lambda_zx = l_z[0]; + T const lambda_zy = l_z[1]; + + vec<3, T, Q> point_lambda = vec<3, T, Q>( + (lambda_xy + lambda_xz), (lambda_yx + lambda_yz), (lambda_zx + lambda_zy) + ); + + mat<4, 4, T, Q> Shear = mat<4, 4, T, Q>( + 1 , lambda_yx , lambda_zx , 0, + lambda_xy , 1 , lambda_zy , 0, + lambda_xz , lambda_yz , 1 , 0, + -point_lambda[0] * p[0], -point_lambda[1] * p[1], -point_lambda[2] * p[2], 1 + ); + + mat<4, 4, T, Q> Result; + Result[0] = m[0] * Shear[0][0] + m[1] * Shear[0][1] + m[2] * Shear[0][2] + m[3] * Shear[0][3]; + Result[1] = m[0] * Shear[1][0] + m[1] * Shear[1][1] + m[2] * Shear[1][2] + m[3] * Shear[1][3]; + Result[2] = m[0] * Shear[2][0] + m[1] * Shear[2][1] + m[2] * Shear[2][2] + m[3] * Shear[2][3]; + Result[3] = m[0] * Shear[3][0] + m[1] * Shear[3][1] + m[2] * Shear[3][2] + m[3] * Shear[3][3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear_slow(mat<4, 4, T, Q> const &m, vec<3, T, Q> const& p, vec<2, T, Q> const &l_x, vec<2, T, Q> const &l_y, vec<2, T, Q> const &l_z) + { + T const lambda_xy = static_cast(l_x[0]); + T const lambda_xz = static_cast(l_x[1]); + T const lambda_yx = static_cast(l_y[0]); + T const lambda_yz = static_cast(l_y[1]); + T const lambda_zx = static_cast(l_z[0]); + T const lambda_zy = static_cast(l_z[1]); + + vec<3, T, Q> point_lambda = vec<3, T, Q>( + static_cast(lambda_xy + lambda_xz), + static_cast(lambda_yx + lambda_yz), + static_cast(lambda_zx + lambda_zy) + ); + + mat<4, 4, T, Q> Shear = mat<4, 4, T, Q>( + 1 , lambda_yx , lambda_zx , 0, + lambda_xy , 1 , lambda_zy , 0, + lambda_xz , lambda_yz , 1 , 0, + -point_lambda[0] * p[0], -point_lambda[1] * p[1], -point_lambda[2] * p[2], 1 + ); + return m * Shear; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtRH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { + vec<3, T, Q> const f(normalize(center - eye)); + vec<3, T, Q> const s(normalize(cross(f, up))); + vec<3, T, Q> const u(cross(s, f)); + + mat<4, 4, T, Q> Result(1); + Result[0][0] = s.x; + Result[1][0] = s.y; + Result[2][0] = s.z; + Result[0][1] = u.x; + Result[1][1] = u.y; + Result[2][1] = u.z; + Result[0][2] =-f.x; + Result[1][2] =-f.y; + Result[2][2] =-f.z; + Result[3][0] =-dot(s, eye); + Result[3][1] =-dot(u, eye); + Result[3][2] = dot(f, eye); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtLH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { + vec<3, T, Q> const f(normalize(center - eye)); + vec<3, T, Q> const s(normalize(cross(up, f))); + vec<3, T, Q> const u(cross(f, s)); + + mat<4, 4, T, Q> Result(1); + Result[0][0] = s.x; + Result[1][0] = s.y; + Result[2][0] = s.z; + Result[0][1] = u.x; + Result[1][1] = u.y; + Result[2][1] = u.z; + Result[0][2] = f.x; + Result[1][2] = f.y; + Result[2][2] = f.z; + Result[3][0] = -dot(s, eye); + Result[3][1] = -dot(u, eye); + Result[3][2] = -dot(f, eye); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAt(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) + { +# if (GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT) + return lookAtLH(eye, center, up); +# else + return lookAtRH(eye, center, up); +# endif + } +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x2.hpp b/includes/glm/ext/matrix_uint2x2.hpp new file mode 100644 index 0000000..034771a --- /dev/null +++ b/includes/glm/ext/matrix_uint2x2.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint2x2 +/// @file glm/ext/matrix_uint2x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x2 GLM_EXT_matrix_uint2x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x2 + /// @{ + + /// Unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2 + typedef mat<2, 2, uint, defaultp> umat2x2; + + /// Unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2 + typedef mat<2, 2, uint, defaultp> umat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x2_sized.hpp b/includes/glm/ext/matrix_uint2x2_sized.hpp new file mode 100644 index 0000000..4555324 --- /dev/null +++ b/includes/glm/ext/matrix_uint2x2_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint2x2_sized +/// @file glm/ext/matrix_uint2x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x2_sized GLM_EXT_matrix_uint2x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x2_sized + /// @{ + + /// 8 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint8, defaultp> u8mat2x2; + + /// 16 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint16, defaultp> u16mat2x2; + + /// 32 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint32, defaultp> u32mat2x2; + + /// 64 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint64, defaultp> u64mat2x2; + + + /// 8 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint8, defaultp> u8mat2; + + /// 16 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint16, defaultp> u16mat2; + + /// 32 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint32, defaultp> u32mat2; + + /// 64 bit unsigned integer 2x2 matrix. + /// + /// @see ext_matrix_uint2x2_sized + typedef mat<2, 2, uint64, defaultp> u64mat2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x3.hpp b/includes/glm/ext/matrix_uint2x3.hpp new file mode 100644 index 0000000..f496c53 --- /dev/null +++ b/includes/glm/ext/matrix_uint2x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint2x3 +/// @file glm/ext/matrix_uint2x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x3 GLM_EXT_matrix_uint2x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x3 + /// @{ + + /// Unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3 + typedef mat<2, 3, uint, defaultp> umat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x3_sized.hpp b/includes/glm/ext/matrix_uint2x3_sized.hpp new file mode 100644 index 0000000..db7939c --- /dev/null +++ b/includes/glm/ext/matrix_uint2x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint2x3_sized +/// @file glm/ext/matrix_uint2x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x3_sized GLM_EXT_matrix_uint2x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x3_sized + /// @{ + + /// 8 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint8, defaultp> u8mat2x3; + + /// 16 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint16, defaultp> u16mat2x3; + + /// 32 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint32, defaultp> u32mat2x3; + + /// 64 bit unsigned integer 2x3 matrix. + /// + /// @see ext_matrix_uint2x3_sized + typedef mat<2, 3, uint64, defaultp> u64mat2x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x4.hpp b/includes/glm/ext/matrix_uint2x4.hpp new file mode 100644 index 0000000..0f99350 --- /dev/null +++ b/includes/glm/ext/matrix_uint2x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint2x4 +/// @file glm/ext/matrix_uint2x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x4 GLM_EXT_matrix_int2x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x4 + /// @{ + + /// Unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4 + typedef mat<2, 4, uint, defaultp> umat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint2x4_sized.hpp b/includes/glm/ext/matrix_uint2x4_sized.hpp new file mode 100644 index 0000000..5c55547 --- /dev/null +++ b/includes/glm/ext/matrix_uint2x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint2x4_sized +/// @file glm/ext/matrix_uint2x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint2x4_sized GLM_EXT_matrix_uint2x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint2x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint2x4_sized + /// @{ + + /// 8 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint8, defaultp> u8mat2x4; + + /// 16 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint16, defaultp> u16mat2x4; + + /// 32 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint32, defaultp> u32mat2x4; + + /// 64 bit unsigned integer 2x4 matrix. + /// + /// @see ext_matrix_uint2x4_sized + typedef mat<2, 4, uint64, defaultp> u64mat2x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x2.hpp b/includes/glm/ext/matrix_uint3x2.hpp new file mode 100644 index 0000000..55a9bed --- /dev/null +++ b/includes/glm/ext/matrix_uint3x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint3x2 +/// @file glm/ext/matrix_uint3x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x2 GLM_EXT_matrix_uint3x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x2 + /// @{ + + /// Unsigned integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2 + typedef mat<3, 2, uint, defaultp> umat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x2_sized.hpp b/includes/glm/ext/matrix_uint3x2_sized.hpp new file mode 100644 index 0000000..c81af8f --- /dev/null +++ b/includes/glm/ext/matrix_uint3x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint3x2_sized +/// @file glm/ext/matrix_uint3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x2_sized GLM_EXT_matrix_uint3x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x2_sized + /// @{ + + /// 8 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint8, defaultp> u8mat3x2; + + /// 16 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint16, defaultp> u16mat3x2; + + /// 32 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint32, defaultp> u32mat3x2; + + /// 64 bit signed integer 3x2 matrix. + /// + /// @see ext_matrix_uint3x2_sized + typedef mat<3, 2, uint64, defaultp> u64mat3x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x3.hpp b/includes/glm/ext/matrix_uint3x3.hpp new file mode 100644 index 0000000..1004c0d --- /dev/null +++ b/includes/glm/ext/matrix_uint3x3.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint3x3 +/// @file glm/ext/matrix_uint3x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x3 GLM_EXT_matrix_uint3x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x3 + /// @{ + + /// Unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3 + typedef mat<3, 3, uint, defaultp> umat3x3; + + /// Unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3 + typedef mat<3, 3, uint, defaultp> umat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x3_sized.hpp b/includes/glm/ext/matrix_uint3x3_sized.hpp new file mode 100644 index 0000000..41a8be7 --- /dev/null +++ b/includes/glm/ext/matrix_uint3x3_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint3x3_sized +/// @file glm/ext/matrix_uint3x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x3_sized GLM_EXT_matrix_uint3x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x3_sized + /// @{ + + /// 8 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint8, defaultp> u8mat3x3; + + /// 16 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint16, defaultp> u16mat3x3; + + /// 32 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint32, defaultp> u32mat3x3; + + /// 64 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint64, defaultp> u64mat3x3; + + + /// 8 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint8, defaultp> u8mat3; + + /// 16 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint16, defaultp> u16mat3; + + /// 32 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint32, defaultp> u32mat3; + + /// 64 bit unsigned integer 3x3 matrix. + /// + /// @see ext_matrix_uint3x3_sized + typedef mat<3, 3, uint64, defaultp> u64mat3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x4.hpp b/includes/glm/ext/matrix_uint3x4.hpp new file mode 100644 index 0000000..c6dd78c --- /dev/null +++ b/includes/glm/ext/matrix_uint3x4.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint3x4 +/// @file glm/ext/matrix_uint3x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x4 GLM_EXT_matrix_uint3x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x4 + /// @{ + + /// Signed integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4 + typedef mat<3, 4, uint, defaultp> umat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint3x4_sized.hpp b/includes/glm/ext/matrix_uint3x4_sized.hpp new file mode 100644 index 0000000..2ce28ad --- /dev/null +++ b/includes/glm/ext/matrix_uint3x4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint3x4_sized +/// @file glm/ext/matrix_uint3x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint3x4_sized GLM_EXT_matrix_uint3x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat3x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint3x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint3x4_sized + /// @{ + + /// 8 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint8, defaultp> u8mat3x4; + + /// 16 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint16, defaultp> u16mat3x4; + + /// 32 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint32, defaultp> u32mat3x4; + + /// 64 bit unsigned integer 3x4 matrix. + /// + /// @see ext_matrix_uint3x4_sized + typedef mat<3, 4, uint64, defaultp> u64mat3x4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x2.hpp b/includes/glm/ext/matrix_uint4x2.hpp new file mode 100644 index 0000000..0446f57 --- /dev/null +++ b/includes/glm/ext/matrix_uint4x2.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint4x2 +/// @file glm/ext/matrix_uint4x2.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x2 GLM_EXT_matrix_uint4x2 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x2 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x2 + /// @{ + + /// Unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2 + typedef mat<4, 2, uint, defaultp> umat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x2_sized.hpp b/includes/glm/ext/matrix_uint4x2_sized.hpp new file mode 100644 index 0000000..57a66bf --- /dev/null +++ b/includes/glm/ext/matrix_uint4x2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint4x2_sized +/// @file glm/ext/matrix_uint4x2_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x2_sized GLM_EXT_matrix_uint4x2_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x2_sized + /// @{ + + /// 8 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint8, defaultp> u8mat4x2; + + /// 16 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint16, defaultp> u16mat4x2; + + /// 32 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint32, defaultp> u32mat4x2; + + /// 64 bit unsigned integer 4x2 matrix. + /// + /// @see ext_matrix_uint4x2_sized + typedef mat<4, 2, uint64, defaultp> u64mat4x2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x3.hpp b/includes/glm/ext/matrix_uint4x3.hpp new file mode 100644 index 0000000..54c24e4 --- /dev/null +++ b/includes/glm/ext/matrix_uint4x3.hpp @@ -0,0 +1,33 @@ +/// @ref ext_matrix_uint4x3 +/// @file glm/ext/matrix_uint4x3.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x3 GLM_EXT_matrix_uint4x3 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x3 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x3 + /// @{ + + /// Unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3 + typedef mat<4, 3, uint, defaultp> umat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x3_sized.hpp b/includes/glm/ext/matrix_uint4x3_sized.hpp new file mode 100644 index 0000000..2e61124 --- /dev/null +++ b/includes/glm/ext/matrix_uint4x3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_matrix_uint4x3_sized +/// @file glm/ext/matrix_uint4x3_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x3_sized GLM_EXT_matrix_uint4x3_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x3_sized + /// @{ + + /// 8 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint8, defaultp> u8mat4x3; + + /// 16 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint16, defaultp> u16mat4x3; + + /// 32 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint32, defaultp> u32mat4x3; + + /// 64 bit unsigned integer 4x3 matrix. + /// + /// @see ext_matrix_uint4x3_sized + typedef mat<4, 3, uint64, defaultp> u64mat4x3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x4.hpp b/includes/glm/ext/matrix_uint4x4.hpp new file mode 100644 index 0000000..5cc8455 --- /dev/null +++ b/includes/glm/ext/matrix_uint4x4.hpp @@ -0,0 +1,38 @@ +/// @ref ext_matrix_uint4x4 +/// @file glm/ext/matrix_uint4x4.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x4 GLM_EXT_matrix_uint4x4 +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x4 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x4 + /// @{ + + /// Unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4 + typedef mat<4, 4, uint, defaultp> umat4x4; + + /// Unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4 + typedef mat<4, 4, uint, defaultp> umat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/matrix_uint4x4_sized.hpp b/includes/glm/ext/matrix_uint4x4_sized.hpp new file mode 100644 index 0000000..bb10bd2 --- /dev/null +++ b/includes/glm/ext/matrix_uint4x4_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_matrix_uint4x4_sized +/// @file glm/ext/matrix_uint4x4_sized.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_matrix_uint4x4_sized GLM_EXT_matrix_uint4x4_sized +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat4x4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_matrix_uint4x4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_matrix_uint4x4_sized + /// @{ + + /// 8 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint8, defaultp> u8mat4x4; + + /// 16 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint16, defaultp> u16mat4x4; + + /// 32 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint32, defaultp> u32mat4x4; + + /// 64 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint64, defaultp> u64mat4x4; + + + /// 8 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint8, defaultp> u8mat4; + + /// 16 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint16, defaultp> u16mat4; + + /// 32 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint32, defaultp> u32mat4; + + /// 64 bit unsigned integer 4x4 matrix. + /// + /// @see ext_matrix_uint4x4_sized + typedef mat<4, 4, uint64, defaultp> u64mat4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/quaternion_common.hpp b/includes/glm/ext/quaternion_common.hpp new file mode 100644 index 0000000..f738692 --- /dev/null +++ b/includes/glm/ext/quaternion_common.hpp @@ -0,0 +1,135 @@ +/// @ref ext_quaternion_common +/// @file glm/ext/quaternion_common.hpp +/// +/// @defgroup ext_quaternion_common GLM_EXT_quaternion_common +/// @ingroup ext +/// +/// Provides common functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_common +/// @see ext_vector_common +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_trigonometric +/// @see ext_quaternion_transform + +#pragma once + +// Dependency: +#include "../ext/scalar_constants.hpp" +#include "../ext/quaternion_geometric.hpp" +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_common + /// @{ + + /// Spherical linear interpolation of two quaternions. + /// The interpolation is oriented and the rotation is performed at constant speed. + /// For short path spherical linear interpolation, use the slerp function. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + /// + /// @see - slerp(qua const& x, qua const& y, T const& a) + template + GLM_FUNC_DECL qua mix(qua const& x, qua const& y, T a); + + /// Linear interpolation of two quaternions. + /// The interpolation is oriented. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined in the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua lerp(qua const& x, qua const& y, T a); + + /// Spherical linear interpolation of two quaternions. + /// The interpolation always take the short path and the rotation is performed at constant speed. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a); + + /// Spherical linear interpolation of two quaternions with multiple spins over rotation axis. + /// The interpolation always take the short path when the spin count is positive and long path + /// when count is negative. Rotation is performed at constant speed. + /// + /// @param x A quaternion + /// @param y A quaternion + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// @param k Additional spin count. If Value is negative interpolation will be on "long" path. + /// + /// @tparam T A floating-point scalar type + /// @tparam S An integer scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a, S k); + + /// Returns the q conjugate. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua conjugate(qua const& q); + + /// Returns the q inverse. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR qua inverse(qua const& q); + + /// Returns true if x holds a NaN (not a number) + /// representation in the underlying implementation's set of + /// floating point representations. Returns false otherwise, + /// including for implementations with no NaN + /// representations. + /// + /// /!\ When using compiler fast math, this function may fail. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> isnan(qua const& x); + + /// Returns true if x holds a positive infinity or negative + /// infinity representation in the underlying implementation's + /// set of floating point representations. Returns false + /// otherwise, including for implementations with no infinity + /// representations. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> isinf(qua const& x); + + /// @} +} //namespace glm + +#include "quaternion_common.inl" diff --git a/includes/glm/ext/quaternion_common.inl b/includes/glm/ext/quaternion_common.inl new file mode 100644 index 0000000..ad171f9 --- /dev/null +++ b/includes/glm/ext/quaternion_common.inl @@ -0,0 +1,144 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua mix(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'mix' only accept floating-point inputs"); + + T const cosTheta = dot(x, y); + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if(cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, y.w, a), + mix(x.x, y.x, a), + mix(x.y, y.y, a), + mix(x.z, y.z, a)); + } + else + { + // Essential Mathematics, page 467 + T angle = acos(cosTheta); + return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * y) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua lerp(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'lerp' only accept floating-point inputs"); + + // Lerp is only defined in [0, 1] + assert(a >= static_cast(0)); + assert(a <= static_cast(1)); + + return x * (static_cast(1) - a) + (y * a); + } + + template + GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'slerp' only accept floating-point inputs"); + + qua z = y; + + T cosTheta = dot(x, y); + + // If cosTheta < 0, the interpolation will take the long way around the sphere. + // To fix this, one quat must be negated. + if(cosTheta < static_cast(0)) + { + z = -y; + cosTheta = -cosTheta; + } + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if(cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, z.w, a), + mix(x.x, z.x, a), + mix(x.y, z.y, a), + mix(x.z, z.z, a)); + } + else + { + // Essential Mathematics, page 467 + T angle = acos(cosTheta); + return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * z) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a, S k) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'slerp' only accept floating-point inputs"); + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'slerp' only accept integer for spin count"); + + qua z = y; + + T cosTheta = dot(x, y); + + // If cosTheta < 0, the interpolation will take the long way around the sphere. + // To fix this, one quat must be negated. + if (cosTheta < static_cast(0)) + { + z = -y; + cosTheta = -cosTheta; + } + + // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator + if (cosTheta > static_cast(1) - epsilon()) + { + // Linear interpolation + return qua::wxyz( + mix(x.w, z.w, a), + mix(x.x, z.x, a), + mix(x.y, z.y, a), + mix(x.z, z.z, a)); + } + else + { + // Graphics Gems III, page 96 + T angle = acos(cosTheta); + T phi = angle + static_cast(k) * glm::pi(); + return (sin(angle - a * phi)* x + sin(a * phi) * z) / sin(angle); + } + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua conjugate(qua const& q) + { + return qua::wxyz(q.w, -q.x, -q.y, -q.z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua inverse(qua const& q) + { + return conjugate(q) / dot(q, q); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isnan(qua const& q) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isnan' only accept floating-point inputs"); + + return vec<4, bool, Q>(isnan(q.x), isnan(q.y), isnan(q.z), isnan(q.w)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isinf(qua const& q) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isinf' only accept floating-point inputs"); + + return vec<4, bool, Q>(isinf(q.x), isinf(q.y), isinf(q.z), isinf(q.w)); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "quaternion_common_simd.inl" +#endif + diff --git a/includes/glm/ext/quaternion_common_simd.inl b/includes/glm/ext/quaternion_common_simd.inl new file mode 100644 index 0000000..ddfc8a4 --- /dev/null +++ b/includes/glm/ext/quaternion_common_simd.inl @@ -0,0 +1,18 @@ +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +namespace glm{ +namespace detail +{ + template + struct compute_dot, float, true> + { + static GLM_FUNC_QUALIFIER float call(qua const& x, qua const& y) + { + return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); + } + }; +}//namespace detail +}//namespace glm + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + diff --git a/includes/glm/ext/quaternion_double.hpp b/includes/glm/ext/quaternion_double.hpp new file mode 100644 index 0000000..63b24de --- /dev/null +++ b/includes/glm/ext/quaternion_double.hpp @@ -0,0 +1,39 @@ +/// @ref ext_quaternion_double +/// @file glm/ext/quaternion_double.hpp +/// +/// @defgroup ext_quaternion_double GLM_EXT_quaternion_double +/// @ingroup ext +/// +/// Exposes double-precision floating point quaternion type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double_precision +/// @see ext_quaternion_common +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_double extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_double + /// @{ + + /// Quaternion of double-precision floating-point numbers. + typedef qua dquat; + + /// @} +} //namespace glm + diff --git a/includes/glm/ext/quaternion_double_precision.hpp b/includes/glm/ext/quaternion_double_precision.hpp new file mode 100644 index 0000000..8aa24a1 --- /dev/null +++ b/includes/glm/ext/quaternion_double_precision.hpp @@ -0,0 +1,42 @@ +/// @ref ext_quaternion_double_precision +/// @file glm/ext/quaternion_double_precision.hpp +/// +/// @defgroup ext_quaternion_double_precision GLM_EXT_quaternion_double_precision +/// @ingroup ext +/// +/// Exposes double-precision floating point quaternion type with various precision in term of ULPs. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_double_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_double_precision + /// @{ + + /// Quaternion of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua lowp_dquat; + + /// Quaternion of medium double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua mediump_dquat; + + /// Quaternion of high double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. + /// + /// @see ext_quaternion_double_precision + typedef qua highp_dquat; + + /// @} +} //namespace glm + diff --git a/includes/glm/ext/quaternion_exponential.hpp b/includes/glm/ext/quaternion_exponential.hpp new file mode 100644 index 0000000..affe297 --- /dev/null +++ b/includes/glm/ext/quaternion_exponential.hpp @@ -0,0 +1,63 @@ +/// @ref ext_quaternion_exponential +/// @file glm/ext/quaternion_exponential.hpp +/// +/// @defgroup ext_quaternion_exponential GLM_EXT_quaternion_exponential +/// @ingroup ext +/// +/// Provides exponential functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see core_exponential +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../geometric.hpp" +#include "../ext/scalar_constants.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_exponential extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_transform + /// @{ + + /// Returns a exponential of a quaternion. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua exp(qua const& q); + + /// Returns a logarithm of a quaternion + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua log(qua const& q); + + /// Returns a quaternion raised to a power. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua pow(qua const& q, T y); + + /// Returns the square root of a quaternion + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua sqrt(qua const& q); + + /// @} +} //namespace glm + +#include "quaternion_exponential.inl" diff --git a/includes/glm/ext/quaternion_exponential.inl b/includes/glm/ext/quaternion_exponential.inl new file mode 100644 index 0000000..8a9d774 --- /dev/null +++ b/includes/glm/ext/quaternion_exponential.inl @@ -0,0 +1,89 @@ +#include "scalar_constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua exp(qua const& q) + { + vec<3, T, Q> u(q.x, q.y, q.z); + T const Angle = glm::length(u); + if (Angle < epsilon()) + return qua(); + + vec<3, T, Q> const v(u / Angle); + return qua(cos(Angle), sin(Angle) * v); + } + + template + GLM_FUNC_QUALIFIER qua log(qua const& q) + { + vec<3, T, Q> u(q.x, q.y, q.z); + T Vec3Len = length(u); + + if (Vec3Len < epsilon()) + { + if(q.w > static_cast(0)) + return qua::wxyz(log(q.w), static_cast(0), static_cast(0), static_cast(0)); + else if(q.w < static_cast(0)) + return qua::wxyz(log(-q.w), pi(), static_cast(0), static_cast(0)); + else + return qua::wxyz(std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity()); + } + else + { + T t = atan(Vec3Len, T(q.w)) / Vec3Len; + T QuatLen2 = Vec3Len * Vec3Len + q.w * q.w; + return qua::wxyz(static_cast(0.5) * log(QuatLen2), t * q.x, t * q.y, t * q.z); + } + } + + template + GLM_FUNC_QUALIFIER qua pow(qua const& x, T y) + { + //Raising to the power of 0 should yield 1 + //Needed to prevent a division by 0 error later on + if(y > -epsilon() && y < epsilon()) + return qua::wxyz(1,0,0,0); + + //To deal with non-unit quaternions + T magnitude = sqrt(x.x * x.x + x.y * x.y + x.z * x.z + x.w *x.w); + + T Angle; + if(abs(x.w / magnitude) > cos_one_over_two()) + { + //Scalar component is close to 1; using it to recover angle would lose precision + //Instead, we use the non-scalar components since sin() is accurate around 0 + + //Prevent a division by 0 error later on + T VectorMagnitude = x.x * x.x + x.y * x.y + x.z * x.z; + //Despite the compiler might say, we actually want to compare + //VectorMagnitude to 0. here; we could use denorm_int() compiling a + //project with unsafe maths optimizations might make the comparison + //always false, even when VectorMagnitude is 0. + if (VectorMagnitude < std::numeric_limits::min()) { + //Equivalent to raising a real number to a power + return qua::wxyz(pow(x.w, y), 0, 0, 0); + } + + Angle = asin(sqrt(VectorMagnitude) / magnitude); + } + else + { + //Scalar component is small, shouldn't cause loss of precision + Angle = acos(x.w / magnitude); + } + + T NewAngle = Angle * y; + T Div = sin(NewAngle) / sin(Angle); + T Mag = pow(magnitude, y - static_cast(1)); + return qua::wxyz(cos(NewAngle) * magnitude * Mag, x.x * Div * Mag, x.y * Div * Mag, x.z * Div * Mag); + } + + template + GLM_FUNC_QUALIFIER qua sqrt(qua const& x) + { + return pow(x, static_cast(0.5)); + } +}//namespace glm + + diff --git a/includes/glm/ext/quaternion_float.hpp b/includes/glm/ext/quaternion_float.hpp new file mode 100644 index 0000000..ca42a60 --- /dev/null +++ b/includes/glm/ext/quaternion_float.hpp @@ -0,0 +1,39 @@ +/// @ref ext_quaternion_float +/// @file glm/ext/quaternion_float.hpp +/// +/// @defgroup ext_quaternion_float GLM_EXT_quaternion_float +/// @ingroup ext +/// +/// Exposes single-precision floating point quaternion type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_double +/// @see ext_quaternion_float_precision +/// @see ext_quaternion_common +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_float extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_float + /// @{ + + /// Quaternion of single-precision floating-point numbers. + typedef qua quat; + + /// @} +} //namespace glm + diff --git a/includes/glm/ext/quaternion_float_precision.hpp b/includes/glm/ext/quaternion_float_precision.hpp new file mode 100644 index 0000000..f9e4f5c --- /dev/null +++ b/includes/glm/ext/quaternion_float_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_quaternion_float_precision +/// @file glm/ext/quaternion_float_precision.hpp +/// +/// @defgroup ext_quaternion_float_precision GLM_EXT_quaternion_float_precision +/// @ingroup ext +/// +/// Exposes single-precision floating point quaternion type with various precision in term of ULPs. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependency: +#include "../detail/type_quat.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_float_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_float_precision + /// @{ + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua lowp_quat; + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua mediump_quat; + + /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef qua highp_quat; + + /// @} +} //namespace glm + diff --git a/includes/glm/ext/quaternion_geometric.hpp b/includes/glm/ext/quaternion_geometric.hpp new file mode 100644 index 0000000..6a2403f --- /dev/null +++ b/includes/glm/ext/quaternion_geometric.hpp @@ -0,0 +1,70 @@ +/// @ref ext_quaternion_geometric +/// @file glm/ext/quaternion_geometric.hpp +/// +/// @defgroup ext_quaternion_geometric GLM_EXT_quaternion_geometric +/// @ingroup ext +/// +/// Provides geometric functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see core_func_geometric +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "../ext/vector_relational.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_geometric extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_geometric + /// @{ + + /// Returns the norm of a quaternions + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL T length(qua const& q); + + /// Returns the normalized quaternion. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL qua normalize(qua const& q); + + /// Returns dot product of q1 and q2, i.e., q1[0] * q2[0] + q1[1] * q2[1] + ... + /// + /// @tparam T Floating-point scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_DECL GLM_CONSTEXPR T dot(qua const& x, qua const& y); + + /// Compute a cross product. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_geometric + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua cross(qua const& q1, qua const& q2); + + /// @} +} //namespace glm + +#include "quaternion_geometric.inl" diff --git a/includes/glm/ext/quaternion_geometric.inl b/includes/glm/ext/quaternion_geometric.inl new file mode 100644 index 0000000..88dc4d6 --- /dev/null +++ b/includes/glm/ext/quaternion_geometric.inl @@ -0,0 +1,36 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T dot(qua const& x, qua const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'dot' accepts only floating-point inputs"); + return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER T length(qua const& q) + { + return glm::sqrt(dot(q, q)); + } + + template + GLM_FUNC_QUALIFIER qua normalize(qua const& q) + { + T len = length(q); + if(len <= static_cast(0)) // Problem + return qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); + T oneOverLen = static_cast(1) / len; + return qua::wxyz(q.w * oneOverLen, q.x * oneOverLen, q.y * oneOverLen, q.z * oneOverLen); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua cross(qua const& q1, qua const& q2) + { + return qua::wxyz( + q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z, + q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y, + q1.w * q2.y + q1.y * q2.w + q1.z * q2.x - q1.x * q2.z, + q1.w * q2.z + q1.z * q2.w + q1.x * q2.y - q1.y * q2.x); + } +}//namespace glm + diff --git a/includes/glm/ext/quaternion_relational.hpp b/includes/glm/ext/quaternion_relational.hpp new file mode 100644 index 0000000..7aa121d --- /dev/null +++ b/includes/glm/ext/quaternion_relational.hpp @@ -0,0 +1,62 @@ +/// @ref ext_quaternion_relational +/// @file glm/ext/quaternion_relational.hpp +/// +/// @defgroup ext_quaternion_relational GLM_EXT_quaternion_relational +/// @ingroup ext +/// +/// Exposes comparison functions for quaternion types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_vector_relational +/// @see ext_matrix_relational +/// @see ext_quaternion_float +/// @see ext_quaternion_double + +#pragma once + +// Dependency: +#include "../vector_relational.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_relational + /// @{ + + /// Returns the component-wise comparison of result x == y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon); + + /// Returns the component-wise comparison of result x != y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon); + + /// @} +} //namespace glm + +#include "quaternion_relational.inl" diff --git a/includes/glm/ext/quaternion_relational.inl b/includes/glm/ext/quaternion_relational.inl new file mode 100644 index 0000000..b1713e9 --- /dev/null +++ b/includes/glm/ext/quaternion_relational.inl @@ -0,0 +1,35 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y) + { + vec<4, bool, Q> Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] == y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return lessThan(abs(v), vec<4, T, Q>(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result; + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] != y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); + } +}//namespace glm + diff --git a/includes/glm/ext/quaternion_transform.hpp b/includes/glm/ext/quaternion_transform.hpp new file mode 100644 index 0000000..a9cc5c2 --- /dev/null +++ b/includes/glm/ext/quaternion_transform.hpp @@ -0,0 +1,47 @@ +/// @ref ext_quaternion_transform +/// @file glm/ext/quaternion_transform.hpp +/// +/// @defgroup ext_quaternion_transform GLM_EXT_quaternion_transform +/// @ingroup ext +/// +/// Provides transformation functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_trigonometric + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../trigonometric.hpp" +#include "../geometric.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_transform extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_transform + /// @{ + + /// Rotates a quaternion from a vector of 3 components axis and an angle. + /// + /// @param q Source orientation + /// @param angle Angle expressed in radians. + /// @param axis Axis of the rotation + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& axis); + /// @} +} //namespace glm + +#include "quaternion_transform.inl" diff --git a/includes/glm/ext/quaternion_transform.inl b/includes/glm/ext/quaternion_transform.inl new file mode 100644 index 0000000..7e773fb --- /dev/null +++ b/includes/glm/ext/quaternion_transform.inl @@ -0,0 +1,24 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& v) + { + vec<3, T, Q> Tmp = v; + + // Axis of rotation must be normalised + T len = glm::length(Tmp); + if(abs(len - static_cast(1)) > static_cast(0.001)) + { + T oneOverLen = static_cast(1) / len; + Tmp.x *= oneOverLen; + Tmp.y *= oneOverLen; + Tmp.z *= oneOverLen; + } + + T const AngleRad(angle); + T const Sin = sin(AngleRad * static_cast(0.5)); + + return q * qua::wxyz(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); + } +}//namespace glm + diff --git a/includes/glm/ext/quaternion_trigonometric.hpp b/includes/glm/ext/quaternion_trigonometric.hpp new file mode 100644 index 0000000..574a704 --- /dev/null +++ b/includes/glm/ext/quaternion_trigonometric.hpp @@ -0,0 +1,65 @@ +/// @ref ext_quaternion_trigonometric +/// @file glm/ext/quaternion_trigonometric.hpp +/// +/// @defgroup ext_quaternion_trigonometric GLM_EXT_quaternion_trigonometric +/// @ingroup ext +/// +/// Provides trigonometric functions for quaternion types +/// +/// Include to use the features of this extension. +/// +/// @see ext_quaternion_float +/// @see ext_quaternion_double +/// @see ext_quaternion_exponential +/// @see ext_quaternion_geometric +/// @see ext_quaternion_relational +/// @see ext_quaternion_transform + +#pragma once + +// Dependency: +#include "../trigonometric.hpp" +#include "../exponential.hpp" +#include "scalar_constants.hpp" +#include "vector_relational.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_quaternion_trigonometric extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_quaternion_trigonometric + /// @{ + + /// Returns the quaternion rotation angle. + /// + /// @param x A normalized quaternion. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL T angle(qua const& x); + + /// Returns the q rotation axis. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL vec<3, T, Q> axis(qua const& x); + + /// Build a quaternion from an angle and a normalized axis. + /// + /// @param angle Angle expressed in radians. + /// @param axis Axis of the quaternion, must be normalized. + /// + /// @tparam T A floating-point scalar type + /// @tparam Q A value from qualifier enum + template + GLM_FUNC_DECL qua angleAxis(T const& angle, vec<3, T, Q> const& axis); + + /// @} +} //namespace glm + +#include "quaternion_trigonometric.inl" diff --git a/includes/glm/ext/quaternion_trigonometric.inl b/includes/glm/ext/quaternion_trigonometric.inl new file mode 100644 index 0000000..896449a --- /dev/null +++ b/includes/glm/ext/quaternion_trigonometric.inl @@ -0,0 +1,37 @@ +#include "scalar_constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T angle(qua const& x) + { + if (abs(x.w) > cos_one_over_two()) + { + T const a = asin(sqrt(x.x * x.x + x.y * x.y + x.z * x.z)) * static_cast(2); + if(x.w < static_cast(0)) + return pi() * static_cast(2) - a; + return a; + } + + return acos(x.w) * static_cast(2); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> axis(qua const& x) + { + T const tmp1 = static_cast(1) - x.w * x.w; + if(tmp1 <= static_cast(0)) + return vec<3, T, Q>(0, 0, 1); + T const tmp2 = static_cast(1) / sqrt(tmp1); + return vec<3, T, Q>(x.x * tmp2, x.y * tmp2, x.z * tmp2); + } + + template + GLM_FUNC_QUALIFIER qua angleAxis(T const& angle, vec<3, T, Q> const& v) + { + T const a(angle); + T const s = glm::sin(a * static_cast(0.5)); + + return qua(glm::cos(a * static_cast(0.5)), v * s); + } +}//namespace glm diff --git a/includes/glm/ext/scalar_common.hpp b/includes/glm/ext/scalar_common.hpp new file mode 100644 index 0000000..df04b6b --- /dev/null +++ b/includes/glm/ext/scalar_common.hpp @@ -0,0 +1,181 @@ +/// @ref ext_scalar_common +/// @file glm/ext/scalar_common.hpp +/// +/// @defgroup ext_scalar_common GLM_EXT_scalar_common +/// @ingroup ext +/// +/// Exposes min and max functions for 3 to 4 scalar parameters. +/// +/// Include to use the features of this extension. +/// +/// @see core_func_common +/// @see ext_vector_common + +#pragma once + +// Dependency: +#include "../common.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_common + /// @{ + + /// Returns the minimum component-wise values of 3 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T min(T a, T b, T c); + + /// Returns the minimum component-wise values of 4 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T min(T a, T b, T c, T d); + + /// Returns the maximum component-wise values of 3 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T max(T a, T b, T c); + + /// Returns the maximum component-wise values of 4 inputs + /// + /// @tparam T A floating-point scalar type. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL T max(T a, T b, T c, T d); + + /// Returns the minimum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b); + + /// Returns the minimum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b, T c); + + /// Returns the minimum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmin documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmin(T a, T b, T c, T d); + + /// Returns the maximum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b); + + /// Returns the maximum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b, T C); + + /// Returns the maximum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam T A floating-point scalar type. + /// + /// @see std::fmax documentation + /// @see ext_scalar_common + template + GLM_FUNC_DECL T fmax(T a, T b, T C, T D); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common + template + GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal); + + /// Simulate GL_CLAMP OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType clamp(genType const& Texcoord); + + /// Simulate GL_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType repeat(genType const& Texcoord); + + /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType mirrorClamp(genType const& Texcoord); + + /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode + /// + /// @tparam genType Floating-point scalar types. + /// + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL genType mirrorRepeat(genType const& Texcoord); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam genType floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL int iround(genType const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam genType floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_scalar_common extension. + template + GLM_FUNC_DECL uint uround(genType const& x); + + /// @} +}//namespace glm + +#include "scalar_common.inl" diff --git a/includes/glm/ext/scalar_common.inl b/includes/glm/ext/scalar_common.inl new file mode 100644 index 0000000..3d09fef --- /dev/null +++ b/includes/glm/ext/scalar_common.inl @@ -0,0 +1,170 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER T min(T a, T b, T c) + { + return glm::min(glm::min(a, b), c); + } + + template + GLM_FUNC_QUALIFIER T min(T a, T b, T c, T d) + { + return glm::min(glm::min(a, b), glm::min(c, d)); + } + + template + GLM_FUNC_QUALIFIER T max(T a, T b, T c) + { + return glm::max(glm::max(a, b), c); + } + + template + GLM_FUNC_QUALIFIER T max(T a, T b, T c, T d) + { + return glm::max(glm::max(a, b), glm::max(c, d)); + } + +# if GLM_HAS_CXX11_STL + using std::fmin; +# else + template + GLM_FUNC_QUALIFIER T fmin(T a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return b; + return min(a, b); + } +# endif + + template + GLM_FUNC_QUALIFIER T fmin(T a, T b, T c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return fmin(b, c); + if (isnan(b)) + return fmin(a, c); + if (isnan(c)) + return min(a, b); + return min(a, b, c); + } + + template + GLM_FUNC_QUALIFIER T fmin(T a, T b, T c, T d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmin' only accept floating-point input"); + + if (isnan(a)) + return fmin(b, c, d); + if (isnan(b)) + return min(a, fmin(c, d)); + if (isnan(c)) + return fmin(min(a, b), d); + if (isnan(d)) + return min(a, b, c); + return min(a, b, c, d); + } + + +# if GLM_HAS_CXX11_STL + using std::fmax; +# else + template + GLM_FUNC_QUALIFIER T fmax(T a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return b; + return max(a, b); + } +# endif + + template + GLM_FUNC_QUALIFIER T fmax(T a, T b, T c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return fmax(b, c); + if (isnan(b)) + return fmax(a, c); + if (isnan(c)) + return max(a, b); + return max(a, b, c); + } + + template + GLM_FUNC_QUALIFIER T fmax(T a, T b, T c, T d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fmax' only accept floating-point input"); + + if (isnan(a)) + return fmax(b, c, d); + if (isnan(b)) + return max(a, fmax(c, d)); + if (isnan(c)) + return fmax(max(a, b), d); + if (isnan(d)) + return max(a, b, c); + return max(a, b, c, d); + } + + // fclamp + template + GLM_FUNC_QUALIFIER genType fclamp(genType x, genType minVal, genType maxVal) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fclamp' only accept floating-point or integer inputs"); + return fmin(fmax(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER genType clamp(genType const& Texcoord) + { + return glm::clamp(Texcoord, static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_QUALIFIER genType repeat(genType const& Texcoord) + { + return glm::fract(Texcoord); + } + + template + GLM_FUNC_QUALIFIER genType mirrorClamp(genType const& Texcoord) + { + return glm::fract(glm::abs(Texcoord)); + } + + template + GLM_FUNC_QUALIFIER genType mirrorRepeat(genType const& Texcoord) + { + genType const Abs = glm::abs(Texcoord); + genType const Clamp = glm::mod(glm::floor(Abs), static_cast(2)); + genType const Floor = glm::floor(Abs); + genType const Rest = Abs - Floor; + genType const Mirror = Clamp + Rest; + return mix(Rest, static_cast(1) - Rest, Mirror >= static_cast(1)); + } + + template + GLM_FUNC_QUALIFIER int iround(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'iround' only accept floating-point inputs"); + assert(static_cast(0.0) <= x); + + return static_cast(x + static_cast(0.5)); + } + + template + GLM_FUNC_QUALIFIER uint uround(genType const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'uround' only accept floating-point inputs"); + assert(static_cast(0.0) <= x); + + return static_cast(x + static_cast(0.5)); + } +}//namespace glm diff --git a/includes/glm/ext/scalar_constants.hpp b/includes/glm/ext/scalar_constants.hpp new file mode 100644 index 0000000..74e210d --- /dev/null +++ b/includes/glm/ext/scalar_constants.hpp @@ -0,0 +1,40 @@ +/// @ref ext_scalar_constants +/// @file glm/ext/scalar_constants.hpp +/// +/// @defgroup ext_scalar_constants GLM_EXT_scalar_constants +/// @ingroup ext +/// +/// Provides a list of constants and precomputed useful values. +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_constants extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_constants + /// @{ + + /// Return the epsilon constant for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon(); + + /// Return the pi constant for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType pi(); + + /// Return the value of cos(1 / 2) for floating point types. + template + GLM_FUNC_DECL GLM_CONSTEXPR genType cos_one_over_two(); + + /// @} +} //namespace glm + +#include "scalar_constants.inl" diff --git a/includes/glm/ext/scalar_constants.inl b/includes/glm/ext/scalar_constants.inl new file mode 100644 index 0000000..b928e51 --- /dev/null +++ b/includes/glm/ext/scalar_constants.inl @@ -0,0 +1,24 @@ +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType epsilon() + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'epsilon' only accepts floating-point inputs"); + return std::numeric_limits::epsilon(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType pi() + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'pi' only accepts floating-point inputs"); + return static_cast(3.14159265358979323846264338327950288); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType cos_one_over_two() + { + return genType(0.877582561890372716130286068203503191); + } +} //namespace glm diff --git a/includes/glm/ext/scalar_int_sized.hpp b/includes/glm/ext/scalar_int_sized.hpp new file mode 100644 index 0000000..8e9c511 --- /dev/null +++ b/includes/glm/ext/scalar_int_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_scalar_int_sized +/// @file glm/ext/scalar_int_sized.hpp +/// +/// @defgroup ext_scalar_int_sized GLM_EXT_scalar_int_sized +/// @ingroup ext +/// +/// Exposes sized signed integer scalar types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized + +#pragma once + +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_int_sized extension included") +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::int8_t int8; + typedef std::int16_t int16; + typedef std::int32_t int32; +# else + typedef signed char int8; + typedef signed short int16; + typedef signed int int32; +#endif// + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + + /// @addtogroup ext_scalar_int_sized + /// @{ + + /// 8 bit signed integer type. + typedef detail::int8 int8; + + /// 16 bit signed integer type. + typedef detail::int16 int16; + + /// 32 bit signed integer type. + typedef detail::int32 int32; + + /// 64 bit signed integer type. + typedef detail::int64 int64; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/scalar_integer.hpp b/includes/glm/ext/scalar_integer.hpp new file mode 100644 index 0000000..a2ca8a2 --- /dev/null +++ b/includes/glm/ext/scalar_integer.hpp @@ -0,0 +1,92 @@ +/// @ref ext_scalar_integer +/// @file glm/ext/scalar_integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_integer GLM_EXT_scalar_integer +/// @ingroup ext +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../detail/type_float.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_integer + /// @{ + + /// Return true if the value is a power of two number. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL bool isPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType nextPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType prevPowerOfTwo(genIUType v); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL bool isMultiple(genIUType v, genIUType Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam genIUType Integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType nextMultiple(genIUType v, genIUType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genIUType Integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL genIUType prevMultiple(genIUType v, genIUType Multiple); + + /// Returns the bit number of the Nth significant bit set to + /// 1 in the binary representation of value. + /// If value bitcount is less than the Nth significant bit, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see ext_scalar_integer + template + GLM_FUNC_DECL int findNSB(genIUType x, int significantBitCount); + + /// @} +} //namespace glm + +#include "scalar_integer.inl" diff --git a/includes/glm/ext/scalar_integer.inl b/includes/glm/ext/scalar_integer.inl new file mode 100644 index 0000000..d416197 --- /dev/null +++ b/includes/glm/ext/scalar_integer.inl @@ -0,0 +1,243 @@ +#include "../integer.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_ceilShift + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T) + { + return v; + } + }; + + template + struct compute_ceilShift + { + GLM_FUNC_QUALIFIER static vec call(vec const& v, T Shift) + { + return v | (v >> Shift); + } + }; + + template + struct compute_ceilPowerOfTwo + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); + + vec const Sign(sign(x)); + + vec v(abs(x)); + + v = v - static_cast(1); + v = v | (v >> static_cast(1)); + v = v | (v >> static_cast(2)); + v = v | (v >> static_cast(4)); + v = compute_ceilShift= 2>::call(v, 8); + v = compute_ceilShift= 4>::call(v, 16); + v = compute_ceilShift= 8>::call(v, 32); + return (v + static_cast(1)) * Sign; + } + }; + + template + struct compute_ceilPowerOfTwo + { + GLM_FUNC_QUALIFIER static vec call(vec const& x) + { + GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); + + vec v(x); + + v = v - static_cast(1); + v = v | (v >> static_cast(1)); + v = v | (v >> static_cast(2)); + v = v | (v >> static_cast(4)); + v = compute_ceilShift= 2>::call(v, 8); + v = compute_ceilShift= 4>::call(v, 16); + v = compute_ceilShift= 8>::call(v, 32); + return v + static_cast(1); + } + }; + + template + struct compute_ceilMultiple{}; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source > genType(0)) + return Source + (Multiple - std::fmod(Source, Multiple)); + else + return Source + std::fmod(-Source, Multiple); + } + }; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + genType Tmp = Source - genType(1); + return Tmp + (Multiple - (Tmp % Multiple)); + } + }; + + template<> + struct compute_ceilMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + assert(Multiple > genType(0)); + if(Source > genType(0)) + { + genType Tmp = Source - genType(1); + return Tmp + (Multiple - (Tmp % Multiple)); + } + else + return Source + (-Source % Multiple); + } + }; + + template + struct compute_floorMultiple{}; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - std::fmod(Source, Multiple); + else + return Source - std::fmod(Source, Multiple) - Multiple; + } + }; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; + + template<> + struct compute_floorMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if(Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool isPowerOfTwo(genIUType Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); + + genIUType const Result = glm::abs(Value); + return !(Result & (Result - 1)); + } + + template + GLM_FUNC_QUALIFIER genIUType nextPowerOfTwo(genIUType value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); + + return detail::compute_ceilPowerOfTwo<1, genIUType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genIUType, defaultp>(value)).x; + } + + template + GLM_FUNC_QUALIFIER genIUType prevPowerOfTwo(genIUType value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); + + return isPowerOfTwo(value) ? value : static_cast(static_cast(1) << static_cast(findMSB(value))); + } + + template + GLM_FUNC_QUALIFIER bool isMultiple(genIUType Value, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return isMultiple(vec<1, genIUType>(Value), vec<1, genIUType>(Multiple)).x; + } + + template + GLM_FUNC_QUALIFIER genIUType nextMultiple(genIUType Source, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER genIUType prevMultiple(genIUType Source, genIUType Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER int findNSB(genIUType x, int significantBitCount) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); + + if(bitCount(x) < significantBitCount) + return -1; + + genIUType const One = static_cast(1); + int bitPos = 0; + + genIUType key = x; + int nBitCount = significantBitCount; + int Step = sizeof(x) * 8 / 2; + while (key > One) + { + genIUType Mask = static_cast((One << Step) - One); + genIUType currentKey = key & Mask; + int currentBitCount = bitCount(currentKey); + if (nBitCount > currentBitCount) + { + nBitCount -= currentBitCount; + bitPos += Step; + key >>= static_cast(Step); + } + else + { + key = key & Mask; + } + + Step >>= 1; + } + + return static_cast(bitPos); + } +}//namespace glm diff --git a/includes/glm/ext/scalar_packing.hpp b/includes/glm/ext/scalar_packing.hpp new file mode 100644 index 0000000..18b85b7 --- /dev/null +++ b/includes/glm/ext/scalar_packing.hpp @@ -0,0 +1,32 @@ +/// @ref ext_scalar_packing +/// @file glm/ext/scalar_packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_packing GLM_EXT_scalar_packing +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert scalar values to packed +/// formats. + +#pragma once + +// Dependency: +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_packing + /// @{ + + + /// @} +}// namespace glm + +#include "scalar_packing.inl" diff --git a/includes/glm/ext/scalar_packing.inl b/includes/glm/ext/scalar_packing.inl new file mode 100644 index 0000000..e69de29 diff --git a/includes/glm/ext/scalar_reciprocal.hpp b/includes/glm/ext/scalar_reciprocal.hpp new file mode 100644 index 0000000..1c7b81d --- /dev/null +++ b/includes/glm/ext/scalar_reciprocal.hpp @@ -0,0 +1,135 @@ +/// @ref ext_scalar_reciprocal +/// @file glm/ext/scalar_reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_scalar_reciprocal GLM_EXT_scalar_reciprocal +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_reciprocal extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_reciprocal + /// @{ + + /// Secant function. + /// hypotenuse / adjacent or 1 / cos(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType sec(genType angle); + + /// Cosecant function. + /// hypotenuse / opposite or 1 / sin(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType csc(genType angle); + + /// Cotangent function. + /// adjacent / opposite or 1 / tan(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType cot(genType angle); + + /// Inverse secant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType asec(genType x); + + /// Inverse cosecant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acsc(genType x); + + /// Inverse cotangent function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acot(genType x); + + /// Secant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType sech(genType angle); + + /// Cosecant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType csch(genType angle); + + /// Cotangent hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType coth(genType angle); + + /// Inverse secant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType asech(genType x); + + /// Inverse cosecant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acsch(genType x); + + /// Inverse cotangent hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_scalar_reciprocal + template + GLM_FUNC_DECL genType acoth(genType x); + + /// @} +}//namespace glm + +#include "scalar_reciprocal.inl" diff --git a/includes/glm/ext/scalar_reciprocal.inl b/includes/glm/ext/scalar_reciprocal.inl new file mode 100644 index 0000000..0cd5f87 --- /dev/null +++ b/includes/glm/ext/scalar_reciprocal.inl @@ -0,0 +1,107 @@ +/// @ref ext_scalar_reciprocal + +#include "../trigonometric.hpp" +#include + +namespace glm +{ + // sec + template + GLM_FUNC_QUALIFIER genType sec(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sec' only accept floating-point values"); + return genType(1) / glm::cos(angle); + } + + // csc + template + GLM_FUNC_QUALIFIER genType csc(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csc' only accept floating-point values"); + return genType(1) / glm::sin(angle); + } + + // cot + template + GLM_FUNC_QUALIFIER genType cot(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cot' only accept floating-point values"); + + genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); + return glm::tan(pi_over_2 - angle); + } + + // asec + template + GLM_FUNC_QUALIFIER genType asec(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asec' only accept floating-point values"); + return acos(genType(1) / x); + } + + // acsc + template + GLM_FUNC_QUALIFIER genType acsc(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsc' only accept floating-point values"); + return asin(genType(1) / x); + } + + // acot + template + GLM_FUNC_QUALIFIER genType acot(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acot' only accept floating-point values"); + + genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); + return pi_over_2 - atan(x); + } + + // sech + template + GLM_FUNC_QUALIFIER genType sech(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sech' only accept floating-point values"); + return genType(1) / glm::cosh(angle); + } + + // csch + template + GLM_FUNC_QUALIFIER genType csch(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csch' only accept floating-point values"); + return genType(1) / glm::sinh(angle); + } + + // coth + template + GLM_FUNC_QUALIFIER genType coth(genType angle) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'coth' only accept floating-point values"); + return glm::cosh(angle) / glm::sinh(angle); + } + + // asech + template + GLM_FUNC_QUALIFIER genType asech(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asech' only accept floating-point values"); + return acosh(genType(1) / x); + } + + // acsch + template + GLM_FUNC_QUALIFIER genType acsch(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsch' only accept floating-point values"); + return asinh(genType(1) / x); + } + + // acoth + template + GLM_FUNC_QUALIFIER genType acoth(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acoth' only accept floating-point values"); + return atanh(genType(1) / x); + } +}//namespace glm diff --git a/includes/glm/ext/scalar_relational.hpp b/includes/glm/ext/scalar_relational.hpp new file mode 100644 index 0000000..e84df17 --- /dev/null +++ b/includes/glm/ext/scalar_relational.hpp @@ -0,0 +1,68 @@ +/// @ref ext_scalar_relational +/// @file glm/ext/scalar_relational.hpp +/// +/// @defgroup ext_scalar_relational GLM_EXT_scalar_relational +/// @ingroup ext +/// +/// Exposes comparison functions for scalar types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_vector_relational +/// @see ext_matrix_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_relational + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison between two scalars in term of ULPs. + /// True if this expression is satisfied. + /// + /// @param x First operand. + /// @param y Second operand. + /// @param ULPs Maximum difference in ULPs between the two operators to consider them equal. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int ULPs); + + /// Returns the component-wise comparison between two scalars in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @param x First operand. + /// @param y Second operand. + /// @param ULPs Maximum difference in ULPs between the two operators to consider them not equal. + /// + /// @tparam genType Floating-point or integer scalar types + template + GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs); + + /// @} +}//namespace glm + +#include "scalar_relational.inl" diff --git a/includes/glm/ext/scalar_relational.inl b/includes/glm/ext/scalar_relational.inl new file mode 100644 index 0000000..c85583e --- /dev/null +++ b/includes/glm/ext/scalar_relational.inl @@ -0,0 +1,40 @@ +#include "../common.hpp" +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/type_float.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon) + { + return abs(x - y) <= epsilon; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon) + { + return abs(x - y) > epsilon; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int MaxULPs) + { + detail::float_t const a(x); + detail::float_t const b(y); + + // Different signs means they do not match. + if(a.negative() != b.negative()) + return false; + + // Find the difference in ULPs. + typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); + return DiffULPs <= MaxULPs; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs) + { + return !equal(x, y, ULPs); + } +}//namespace glm diff --git a/includes/glm/ext/scalar_uint_sized.hpp b/includes/glm/ext/scalar_uint_sized.hpp new file mode 100644 index 0000000..fd5267f --- /dev/null +++ b/includes/glm/ext/scalar_uint_sized.hpp @@ -0,0 +1,70 @@ +/// @ref ext_scalar_uint_sized +/// @file glm/ext/scalar_uint_sized.hpp +/// +/// @defgroup ext_scalar_uint_sized GLM_EXT_scalar_uint_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer scalar types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized + +#pragma once + +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_uint_sized extension included") +#endif + +namespace glm{ +namespace detail +{ +# if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::uint8_t uint8; + typedef std::uint16_t uint16; + typedef std::uint32_t uint32; +# else + typedef unsigned char uint8; + typedef unsigned short uint16; + typedef unsigned int uint32; +#endif + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; + + template<> + struct is_int + { + enum test {value = ~0}; + }; +}//namespace detail + + + /// @addtogroup ext_scalar_uint_sized + /// @{ + + /// 8 bit unsigned integer type. + typedef detail::uint8 uint8; + + /// 16 bit unsigned integer type. + typedef detail::uint16 uint16; + + /// 32 bit unsigned integer type. + typedef detail::uint32 uint32; + + /// 64 bit unsigned integer type. + typedef detail::uint64 uint64; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/scalar_ulp.hpp b/includes/glm/ext/scalar_ulp.hpp new file mode 100644 index 0000000..6344d95 --- /dev/null +++ b/includes/glm/ext/scalar_ulp.hpp @@ -0,0 +1,77 @@ +/// @ref ext_scalar_ulp +/// @file glm/ext/scalar_ulp.hpp +/// +/// @defgroup ext_scalar_ulp GLM_EXT_scalar_ulp +/// @ingroup ext +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_ulp +/// @see ext_scalar_relational + +#pragma once + +// Dependencies +#include "../ext/scalar_int_sized.hpp" +#include "../common.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_scalar_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_scalar_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType nextFloat(genType x); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType prevFloat(genType x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType nextFloat(genType x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL genType prevFloat(genType x, int ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @see ext_scalar_ulp + GLM_FUNC_DECL int floatDistance(float x, float y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @see ext_scalar_ulp + GLM_FUNC_DECL int64 floatDistance(double x, double y); + + /// @} +}//namespace glm + +#include "scalar_ulp.inl" diff --git a/includes/glm/ext/scalar_ulp.inl b/includes/glm/ext/scalar_ulp.inl new file mode 100644 index 0000000..716528d --- /dev/null +++ b/includes/glm/ext/scalar_ulp.inl @@ -0,0 +1,291 @@ +/// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +/// +/// Developed at SunPro, a Sun Microsystems, Inc. business. +/// Permission to use, copy, modify, and distribute this +/// software is freely granted, provided that this notice +/// is preserved. + +#include "../detail/type_float.hpp" +#include "../ext/scalar_constants.hpp" +#include +#include + +#if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4127) +# pragma warning(disable : 4365) // '=': signed/unsigned mismatch +#elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wsign-conversion" +# pragma clang diagnostic ignored "-Wpadded" +#endif + +typedef union +{ + float value; + /* FIXME: Assumes 32 bit int. */ + unsigned int word; +} ieee_float_shape_type; + +typedef union +{ + double value; + struct + { + int lsw; + int msw; + } parts; +} ieee_double_shape_type; + +#define GLM_EXTRACT_WORDS(ix0,ix1,d) \ + do { \ + ieee_double_shape_type ew_u; \ + ew_u.value = (d); \ + (ix0) = ew_u.parts.msw; \ + (ix1) = ew_u.parts.lsw; \ + } while (0) + +#define GLM_GET_FLOAT_WORD(i,d) \ + do { \ + ieee_float_shape_type gf_u; \ + gf_u.value = (d); \ + (i) = static_cast(gf_u.word); \ + } while (0) + +#define GLM_SET_FLOAT_WORD(d,i) \ + do { \ + ieee_float_shape_type sf_u; \ + sf_u.word = static_cast(i); \ + (d) = sf_u.value; \ + } while (0) + +#define GLM_INSERT_WORDS(d,ix0,ix1) \ + do { \ + ieee_double_shape_type iw_u; \ + iw_u.parts.msw = (ix0); \ + iw_u.parts.lsw = (ix1); \ + (d) = iw_u.value; \ + } while (0) + +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER float nextafterf(float x, float y) + { + volatile float t; + int hx, hy, ix, iy; + + GLM_GET_FLOAT_WORD(hx, x); + GLM_GET_FLOAT_WORD(hy, y); + ix = hx & 0x7fffffff; // |x| + iy = hy & 0x7fffffff; // |y| + + if((ix > 0x7f800000) || // x is nan + (iy > 0x7f800000)) // y is nan + return x + y; + if(abs(y - x) <= epsilon()) + return y; // x=y, return y + if(ix == 0) + { // x == 0 + GLM_SET_FLOAT_WORD(x, (hy & 0x80000000) | 1);// return +-minsubnormal + t = x * x; + if(abs(t - x) <= epsilon()) + return t; + else + return x; // raise underflow flag + } + if(hx >= 0) + { // x > 0 + if(hx > hy) // x > y, x -= ulp + hx -= 1; + else // x < y, x += ulp + hx += 1; + } + else + { // x < 0 + if(hy >= 0 || hx > hy) // x < y, x -= ulp + hx -= 1; + else // x > y, x += ulp + hx += 1; + } + hy = hx & 0x7f800000; + if(hy >= 0x7f800000) + return x + x; // overflow + if(hy < 0x00800000) // underflow + { + t = x * x; + if(abs(t - x) > epsilon()) + { // raise underflow flag + GLM_SET_FLOAT_WORD(y, hx); + return y; + } + } + GLM_SET_FLOAT_WORD(x, hx); + return x; + } + + GLM_FUNC_QUALIFIER double nextafter(double x, double y) + { + volatile double t; + int hx, hy, ix, iy; + unsigned int lx, ly; + + GLM_EXTRACT_WORDS(hx, lx, x); + GLM_EXTRACT_WORDS(hy, ly, y); + ix = hx & 0x7fffffff; // |x| + iy = hy & 0x7fffffff; // |y| + + if(((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || // x is nan + ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) // y is nan + return x + y; + if(abs(y - x) <= epsilon()) + return y; // x=y, return y + if((ix | lx) == 0) + { // x == 0 + GLM_INSERT_WORDS(x, hy & 0x80000000, 1); // return +-minsubnormal + t = x * x; + if(abs(t - x) <= epsilon()) + return t; + else + return x; // raise underflow flag + } + if(hx >= 0) { // x > 0 + if(hx > hy || ((hx == hy) && (lx > ly))) { // x > y, x -= ulp + if(lx == 0) hx -= 1; + lx -= 1; + } + else { // x < y, x += ulp + lx += 1; + if(lx == 0) hx += 1; + } + } + else { // x < 0 + if(hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))){// x < y, x -= ulp + if(lx == 0) hx -= 1; + lx -= 1; + } + else { // x > y, x += ulp + lx += 1; + if(lx == 0) hx += 1; + } + } + hy = hx & 0x7ff00000; + if(hy >= 0x7ff00000) + return x + x; // overflow + if(hy < 0x00100000) + { // underflow + t = x * x; + if(abs(t - x) > epsilon()) + { // raise underflow flag + GLM_INSERT_WORDS(y, hx, lx); + return y; + } + } + GLM_INSERT_WORDS(x, hx, lx); + return x; + } +}//namespace detail +}//namespace glm + +#if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +#elif GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +#endif + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER float nextFloat(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MAX); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MAX); +# else + return nextafterf(x, FLT_MAX); +# endif + } + + template<> + GLM_FUNC_QUALIFIER double nextFloat(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafter(x, std::numeric_limits::max()); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MAX); +# else + return nextafter(x, DBL_MAX); +# endif + } + + template + GLM_FUNC_QUALIFIER T nextFloat(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'next_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for(int i = 0; i < ULPs; ++i) + temp = nextFloat(temp); + return temp; + } + + GLM_FUNC_QUALIFIER float prevFloat(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MIN); +# else + return nextafterf(x, FLT_MIN); +# endif + } + + GLM_FUNC_QUALIFIER double prevFloat(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return _nextafter(x, DBL_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MIN); +# else + return nextafter(x, DBL_MIN); +# endif + } + + template + GLM_FUNC_QUALIFIER T prevFloat(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'prev_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for(int i = 0; i < ULPs; ++i) + temp = prevFloat(temp); + return temp; + } + + GLM_FUNC_QUALIFIER int floatDistance(float x, float y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + GLM_FUNC_QUALIFIER int64 floatDistance(double x, double y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } +}//namespace glm diff --git a/includes/glm/ext/vector_bool1.hpp b/includes/glm/ext/vector_bool1.hpp new file mode 100644 index 0000000..002c320 --- /dev/null +++ b/includes/glm/ext/vector_bool1.hpp @@ -0,0 +1,30 @@ +/// @ref ext_vector_bool1 +/// @file glm/ext/vector_bool1.hpp +/// +/// @defgroup ext_vector_bool1 GLM_EXT_vector_bool1 +/// @ingroup ext +/// +/// Exposes bvec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_bool1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_bool1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_bool1 + /// @{ + + /// 1 components vector of boolean. + typedef vec<1, bool, defaultp> bvec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool1_precision.hpp b/includes/glm/ext/vector_bool1_precision.hpp new file mode 100644 index 0000000..e62d3cf --- /dev/null +++ b/includes/glm/ext/vector_bool1_precision.hpp @@ -0,0 +1,34 @@ +/// @ref ext_vector_bool1_precision +/// @file glm/ext/vector_bool1_precision.hpp +/// +/// @defgroup ext_vector_bool1_precision GLM_EXT_vector_bool1_precision +/// @ingroup ext +/// +/// Exposes highp_bvec1, mediump_bvec1 and lowp_bvec1 types. +/// +/// Include to use the features of this extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_bool1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_bool1_precision + /// @{ + + /// 1 component vector of bool values. + typedef vec<1, bool, highp> highp_bvec1; + + /// 1 component vector of bool values. + typedef vec<1, bool, mediump> mediump_bvec1; + + /// 1 component vector of bool values. + typedef vec<1, bool, lowp> lowp_bvec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool2.hpp b/includes/glm/ext/vector_bool2.hpp new file mode 100644 index 0000000..52288b7 --- /dev/null +++ b/includes/glm/ext/vector_bool2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, bool, defaultp> bvec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool2_precision.hpp b/includes/glm/ext/vector_bool2_precision.hpp new file mode 100644 index 0000000..4370933 --- /dev/null +++ b/includes/glm/ext/vector_bool2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, highp> highp_bvec2; + + /// 2 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, mediump> mediump_bvec2; + + /// 2 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, bool, lowp> lowp_bvec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool3.hpp b/includes/glm/ext/vector_bool3.hpp new file mode 100644 index 0000000..90a0b7e --- /dev/null +++ b/includes/glm/ext/vector_bool3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, bool, defaultp> bvec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool3_precision.hpp b/includes/glm/ext/vector_bool3_precision.hpp new file mode 100644 index 0000000..89cd2d3 --- /dev/null +++ b/includes/glm/ext/vector_bool3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, highp> highp_bvec3; + + /// 3 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, mediump> mediump_bvec3; + + /// 3 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, bool, lowp> lowp_bvec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool4.hpp b/includes/glm/ext/vector_bool4.hpp new file mode 100644 index 0000000..18aa71b --- /dev/null +++ b/includes/glm/ext/vector_bool4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_bool4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of boolean. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, bool, defaultp> bvec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_bool4_precision.hpp b/includes/glm/ext/vector_bool4_precision.hpp new file mode 100644 index 0000000..79786e5 --- /dev/null +++ b/includes/glm/ext/vector_bool4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_bool4_precision.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, highp> highp_bvec4; + + /// 4 components vector of medium qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, mediump> mediump_bvec4; + + /// 4 components vector of low qualifier bool numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, bool, lowp> lowp_bvec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_common.hpp b/includes/glm/ext/vector_common.hpp new file mode 100644 index 0000000..c0a2858 --- /dev/null +++ b/includes/glm/ext/vector_common.hpp @@ -0,0 +1,228 @@ +/// @ref ext_vector_common +/// @file glm/ext/vector_common.hpp +/// +/// @defgroup ext_vector_common GLM_EXT_vector_common +/// @ingroup ext +/// +/// Exposes min and max functions for 3 to 4 vector parameters. +/// +/// Include to use the features of this extension. +/// +/// @see core_common +/// @see ext_scalar_common + +#pragma once + +// Dependency: +#include "../ext/scalar_common.hpp" +#include "../common.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_common extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_common + /// @{ + + /// Return the minimum component-wise values of 3 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c); + + /// Return the minimum component-wise values of 4 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Return the maximum component-wise values of 3 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z); + + /// Return the maximum component-wise values of 4 inputs + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec max( vec const& x, vec const& y, vec const& z, vec const& w); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& x, T y); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& x, vec const& y); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c); + + /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmin documentation + template + GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, T b); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c); + + /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see std::fmax documentation + template + GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c, vec const& d); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common + template + GLM_FUNC_DECL vec fclamp(vec const& x, T minVal, T maxVal); + + /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common + template + GLM_FUNC_DECL vec fclamp(vec const& x, vec const& minVal, vec const& maxVal); + + /// Simulate GL_CLAMP OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec clamp(vec const& Texcoord); + + /// Simulate GL_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec repeat(vec const& Texcoord); + + /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec mirrorClamp(vec const& Texcoord); + + /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec mirrorRepeat(vec const& Texcoord); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam T floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec iround(vec const& x); + + /// Returns a value equal to the nearest integer to x. + /// The fraction 0.5 will round in a direction chosen by the + /// implementation, presumably the direction that is fastest. + /// + /// @param x The values of the argument must be greater or equal to zero. + /// @tparam T floating point scalar types. + /// + /// @see GLSL round man page + /// @see ext_vector_common extension. + template + GLM_FUNC_DECL vec uround(vec const& x); + + /// @} +}//namespace glm + +#include "vector_common.inl" diff --git a/includes/glm/ext/vector_common.inl b/includes/glm/ext/vector_common.inl new file mode 100644 index 0000000..67817fc --- /dev/null +++ b/includes/glm/ext/vector_common.inl @@ -0,0 +1,147 @@ +#include "../detail/_vectorize.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return glm::min(glm::min(x, y), z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z, vec const& w) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return glm::max(glm::max(x, y), z); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z, vec const& w) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return detail::functor2::call(fmin, a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return detail::functor2::call(fmin, a, b); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return fmin(fmin(a, b), c); + } + + template + GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c, vec const& d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); + return fmin(fmin(a, b), fmin(c, d)); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return detail::functor2::call(fmax, a, vec(b)); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return detail::functor2::call(fmax, a, b); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return fmax(fmax(a, b), c); + } + + template + GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c, vec const& d) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); + return fmax(fmax(a, b), fmax(c, d)); + } + + template + GLM_FUNC_QUALIFIER vec fclamp(vec const& x, T minVal, T maxVal) + { + return fmin(fmax(x, vec(minVal)), vec(maxVal)); + } + + template + GLM_FUNC_QUALIFIER vec fclamp(vec const& x, vec const& minVal, vec const& maxVal) + { + return fmin(fmax(x, minVal), maxVal); + } + + template + GLM_FUNC_QUALIFIER vec clamp(vec const& Texcoord) + { + return glm::clamp(Texcoord, vec(0), vec(1)); + } + + template + GLM_FUNC_QUALIFIER vec repeat(vec const& Texcoord) + { + return glm::fract(Texcoord); + } + + template + GLM_FUNC_QUALIFIER vec mirrorClamp(vec const& Texcoord) + { + return glm::fract(glm::abs(Texcoord)); + } + + template + GLM_FUNC_QUALIFIER vec mirrorRepeat(vec const& Texcoord) + { + vec const Abs = glm::abs(Texcoord); + vec const Clamp = glm::mod(glm::floor(Abs), vec(2)); + vec const Floor = glm::floor(Abs); + vec const Rest = Abs - Floor; + vec const Mirror = Clamp + Rest; + return mix(Rest, vec(1) - Rest, glm::greaterThanEqual(Mirror, vec(1))); + } + + template + GLM_FUNC_QUALIFIER vec iround(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); + assert(all(lessThanEqual(vec(0), x))); + + return vec(x + static_cast(0.5)); + } + + template + GLM_FUNC_QUALIFIER vec uround(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); + assert(all(lessThanEqual(vec(0), x))); + + return vec(x + static_cast(0.5)); + } +}//namespace glm diff --git a/includes/glm/ext/vector_double1.hpp b/includes/glm/ext/vector_double1.hpp new file mode 100644 index 0000000..3882667 --- /dev/null +++ b/includes/glm/ext/vector_double1.hpp @@ -0,0 +1,31 @@ +/// @ref ext_vector_double1 +/// @file glm/ext/vector_double1.hpp +/// +/// @defgroup ext_vector_double1 GLM_EXT_vector_double1 +/// @ingroup ext +/// +/// Exposes double-precision floating point vector type with one component. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_double1_precision extension. +/// @see ext_vector_float1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_double1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_double1 + /// @{ + + /// 1 components vector of double-precision floating-point numbers. + typedef vec<1, double, defaultp> dvec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double1_precision.hpp b/includes/glm/ext/vector_double1_precision.hpp new file mode 100644 index 0000000..1d47195 --- /dev/null +++ b/includes/glm/ext/vector_double1_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_vector_double1_precision +/// @file glm/ext/vector_double1_precision.hpp +/// +/// @defgroup ext_vector_double1_precision GLM_EXT_vector_double1_precision +/// @ingroup ext +/// +/// Exposes highp_dvec1, mediump_dvec1 and lowp_dvec1 types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_double1 + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_double1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_double1_precision + /// @{ + + /// 1 component vector of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, highp> highp_dvec1; + + /// 1 component vector of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, mediump> mediump_dvec1; + + /// 1 component vector of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, lowp> lowp_dvec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double2.hpp b/includes/glm/ext/vector_double2.hpp new file mode 100644 index 0000000..60e3577 --- /dev/null +++ b/includes/glm/ext/vector_double2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, double, defaultp> dvec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double2_precision.hpp b/includes/glm/ext/vector_double2_precision.hpp new file mode 100644 index 0000000..fa53940 --- /dev/null +++ b/includes/glm/ext/vector_double2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_double2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, highp> highp_dvec2; + + /// 2 components vector of medium double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, mediump> mediump_dvec2; + + /// 2 components vector of low double-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, double, lowp> lowp_dvec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double3.hpp b/includes/glm/ext/vector_double3.hpp new file mode 100644 index 0000000..6dfe4c6 --- /dev/null +++ b/includes/glm/ext/vector_double3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, double, defaultp> dvec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double3_precision.hpp b/includes/glm/ext/vector_double3_precision.hpp new file mode 100644 index 0000000..a8cfa37 --- /dev/null +++ b/includes/glm/ext/vector_double3_precision.hpp @@ -0,0 +1,34 @@ +/// @ref core +/// @file glm/ext/vector_double3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, highp> highp_dvec3; + + /// 3 components vector of medium double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, mediump> mediump_dvec3; + + /// 3 components vector of low double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, double, lowp> lowp_dvec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double4.hpp b/includes/glm/ext/vector_double4.hpp new file mode 100644 index 0000000..87f225f --- /dev/null +++ b/includes/glm/ext/vector_double4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_double4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of double-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, double, defaultp> dvec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_double4_precision.hpp b/includes/glm/ext/vector_double4_precision.hpp new file mode 100644 index 0000000..09cafa1 --- /dev/null +++ b/includes/glm/ext/vector_double4_precision.hpp @@ -0,0 +1,35 @@ +/// @ref core +/// @file glm/ext/vector_double4_precision.hpp + +#pragma once +#include "../detail/setup.hpp" +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, highp> highp_dvec4; + + /// 4 components vector of medium double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, mediump> mediump_dvec4; + + /// 4 components vector of low double-qualifier floating-point numbers. + /// There is no guarantee on the actual qualifier. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, double, lowp> lowp_dvec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float1.hpp b/includes/glm/ext/vector_float1.hpp new file mode 100644 index 0000000..28acc2c --- /dev/null +++ b/includes/glm/ext/vector_float1.hpp @@ -0,0 +1,31 @@ +/// @ref ext_vector_float1 +/// @file glm/ext/vector_float1.hpp +/// +/// @defgroup ext_vector_float1 GLM_EXT_vector_float1 +/// @ingroup ext +/// +/// Exposes single-precision floating point vector type with one component. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_float1_precision extension. +/// @see ext_vector_double1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_float1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_float1 + /// @{ + + /// 1 components vector of single-precision floating-point numbers. + typedef vec<1, float, defaultp> vec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float1_precision.hpp b/includes/glm/ext/vector_float1_precision.hpp new file mode 100644 index 0000000..6e8dad8 --- /dev/null +++ b/includes/glm/ext/vector_float1_precision.hpp @@ -0,0 +1,36 @@ +/// @ref ext_vector_float1_precision +/// @file glm/ext/vector_float1_precision.hpp +/// +/// @defgroup ext_vector_float1_precision GLM_EXT_vector_float1_precision +/// @ingroup ext +/// +/// Exposes highp_vec1, mediump_vec1 and lowp_vec1 types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_float1 extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_float1_precision extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_float1_precision + /// @{ + + /// 1 component vector of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, highp> highp_vec1; + + /// 1 component vector of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, mediump> mediump_vec1; + + /// 1 component vector of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, lowp> lowp_vec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float2.hpp b/includes/glm/ext/vector_float2.hpp new file mode 100644 index 0000000..d31545d --- /dev/null +++ b/includes/glm/ext/vector_float2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, float, defaultp> vec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float2_precision.hpp b/includes/glm/ext/vector_float2_precision.hpp new file mode 100644 index 0000000..23c0820 --- /dev/null +++ b/includes/glm/ext/vector_float2_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float2_precision.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 2 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, highp> highp_vec2; + + /// 2 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, mediump> mediump_vec2; + + /// 2 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<2, float, lowp> lowp_vec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float3.hpp b/includes/glm/ext/vector_float3.hpp new file mode 100644 index 0000000..cd79a62 --- /dev/null +++ b/includes/glm/ext/vector_float3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, float, defaultp> vec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float3_precision.hpp b/includes/glm/ext/vector_float3_precision.hpp new file mode 100644 index 0000000..be640b5 --- /dev/null +++ b/includes/glm/ext/vector_float3_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float3_precision.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 3 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, highp> highp_vec3; + + /// 3 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, mediump> mediump_vec3; + + /// 3 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<3, float, lowp> lowp_vec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float4.hpp b/includes/glm/ext/vector_float4.hpp new file mode 100644 index 0000000..d84adcc --- /dev/null +++ b/includes/glm/ext/vector_float4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_float4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of single-precision floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, float, defaultp> vec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_float4_precision.hpp b/includes/glm/ext/vector_float4_precision.hpp new file mode 100644 index 0000000..aede838 --- /dev/null +++ b/includes/glm/ext/vector_float4_precision.hpp @@ -0,0 +1,31 @@ +/// @ref core +/// @file glm/ext/vector_float4_precision.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector_precision + /// @{ + + /// 4 components vector of high single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, highp> highp_vec4; + + /// 4 components vector of medium single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, mediump> mediump_vec4; + + /// 4 components vector of low single-qualifier floating-point numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier + typedef vec<4, float, lowp> lowp_vec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int1.hpp b/includes/glm/ext/vector_int1.hpp new file mode 100644 index 0000000..dc86038 --- /dev/null +++ b/includes/glm/ext/vector_int1.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_int1 +/// @file glm/ext/vector_int1.hpp +/// +/// @defgroup ext_vector_int1 GLM_EXT_vector_int1 +/// @ingroup ext +/// +/// Exposes ivec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_uint1 extension. +/// @see ext_vector_int1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int1 + /// @{ + + /// 1 component vector of signed integer numbers. + typedef vec<1, int, defaultp> ivec1; + + /// @} +}//namespace glm + diff --git a/includes/glm/ext/vector_int1_sized.hpp b/includes/glm/ext/vector_int1_sized.hpp new file mode 100644 index 0000000..de0d4cf --- /dev/null +++ b/includes/glm/ext/vector_int1_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int1_sized +/// @file glm/ext/vector_int1_sized.hpp +/// +/// @defgroup ext_vector_int1_sized GLM_EXT_vector_int1_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint1_sized + +#pragma once + +#include "../ext/vector_int1.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int1_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int1_sized + /// @{ + + /// 8 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int8, defaultp> i8vec1; + + /// 16 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int16, defaultp> i16vec1; + + /// 32 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int32, defaultp> i32vec1; + + /// 64 bit signed integer vector of 1 component type. + /// + /// @see ext_vector_int1_sized + typedef vec<1, int64, defaultp> i64vec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int2.hpp b/includes/glm/ext/vector_int2.hpp new file mode 100644 index 0000000..aef803e --- /dev/null +++ b/includes/glm/ext/vector_int2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, int, defaultp> ivec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int2_sized.hpp b/includes/glm/ext/vector_int2_sized.hpp new file mode 100644 index 0000000..1fd57ee --- /dev/null +++ b/includes/glm/ext/vector_int2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int2_sized +/// @file glm/ext/vector_int2_sized.hpp +/// +/// @defgroup ext_vector_int2_sized GLM_EXT_vector_int2_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 2 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint2_sized + +#pragma once + +#include "../ext/vector_int2.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int2_sized + /// @{ + + /// 8 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int8, defaultp> i8vec2; + + /// 16 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int16, defaultp> i16vec2; + + /// 32 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int32, defaultp> i32vec2; + + /// 64 bit signed integer vector of 2 components type. + /// + /// @see ext_vector_int2_sized + typedef vec<2, int64, defaultp> i64vec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int3.hpp b/includes/glm/ext/vector_int3.hpp new file mode 100644 index 0000000..4767e61 --- /dev/null +++ b/includes/glm/ext/vector_int3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, int, defaultp> ivec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int3_sized.hpp b/includes/glm/ext/vector_int3_sized.hpp new file mode 100644 index 0000000..085a3fe --- /dev/null +++ b/includes/glm/ext/vector_int3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int3_sized +/// @file glm/ext/vector_int3_sized.hpp +/// +/// @defgroup ext_vector_int3_sized GLM_EXT_vector_int3_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 3 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint3_sized + +#pragma once + +#include "../ext/vector_int3.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int3_sized + /// @{ + + /// 8 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int8, defaultp> i8vec3; + + /// 16 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int16, defaultp> i16vec3; + + /// 32 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int32, defaultp> i32vec3; + + /// 64 bit signed integer vector of 3 components type. + /// + /// @see ext_vector_int3_sized + typedef vec<3, int64, defaultp> i64vec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int4.hpp b/includes/glm/ext/vector_int4.hpp new file mode 100644 index 0000000..bb23adf --- /dev/null +++ b/includes/glm/ext/vector_int4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_int4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of signed integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, int, defaultp> ivec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_int4_sized.hpp b/includes/glm/ext/vector_int4_sized.hpp new file mode 100644 index 0000000..c63d465 --- /dev/null +++ b/includes/glm/ext/vector_int4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_int4_sized +/// @file glm/ext/vector_int4_sized.hpp +/// +/// @defgroup ext_vector_int4_sized GLM_EXT_vector_int4_sized +/// @ingroup ext +/// +/// Exposes sized signed integer vector of 4 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_int_sized +/// @see ext_vector_uint4_sized + +#pragma once + +#include "../ext/vector_int4.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_int4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_int4_sized + /// @{ + + /// 8 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int8, defaultp> i8vec4; + + /// 16 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int16, defaultp> i16vec4; + + /// 32 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int32, defaultp> i32vec4; + + /// 64 bit signed integer vector of 4 components type. + /// + /// @see ext_vector_int4_sized + typedef vec<4, int64, defaultp> i64vec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_integer.hpp b/includes/glm/ext/vector_integer.hpp new file mode 100644 index 0000000..1304dd8 --- /dev/null +++ b/includes/glm/ext/vector_integer.hpp @@ -0,0 +1,149 @@ +/// @ref ext_vector_integer +/// @file glm/ext/vector_integer.hpp +/// +/// @see core (dependence) +/// @see ext_vector_integer (dependence) +/// +/// @defgroup ext_vector_integer GLM_EXT_vector_integer +/// @ingroup ext +/// +/// Include to use the features of this extension. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_integer + /// @{ + + /// Return true if the value is a power of two number. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevPowerOfTwo(vec const& v); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isMultiple(vec const& v, T Multiple); + + /// Return true if the 'Value' is a multiple of 'Multiple'. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec isMultiple(vec const& v, vec const& Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextMultiple(vec const& v, T Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec nextMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevMultiple(vec const& v, T Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed or unsigned integer scalar types. + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec prevMultiple(vec const& v, vec const& Multiple); + + /// Returns the bit number of the Nth significant bit set to + /// 1 in the binary representation of value. + /// If value bitcount is less than the Nth significant bit, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see ext_vector_integer + template + GLM_FUNC_DECL vec findNSB(vec const& Source, vec SignificantBitCount); + + /// @} +} //namespace glm + +#include "vector_integer.inl" diff --git a/includes/glm/ext/vector_integer.inl b/includes/glm/ext/vector_integer.inl new file mode 100644 index 0000000..cefb132 --- /dev/null +++ b/includes/glm/ext/vector_integer.inl @@ -0,0 +1,85 @@ +#include "scalar_integer.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec isPowerOfTwo(vec const& Value) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); + + vec const Result(abs(Value)); + return equal(Result & (Result - vec(1)), vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec nextPowerOfTwo(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); + + return detail::compute_ceilPowerOfTwo::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER vec prevPowerOfTwo(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); + + return detail::functor1::call(prevPowerOfTwo, v); + } + + template + GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return equal(Value % Multiple, vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); + + return equal(Value % Multiple, vec(0)); + } + + template + GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::functor2::call(nextMultiple, Source, vec(Multiple)); + } + + template + GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); + + return detail::functor2::call(nextMultiple, Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, T Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::functor2::call(prevMultiple, Source, vec(Multiple)); + } + + template + GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, vec const& Multiple) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); + + return detail::functor2::call(prevMultiple, Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec findNSB(vec const& Source, vec SignificantBitCount) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); + + return detail::functor2_vec_int::call(findNSB, Source, SignificantBitCount); + } +}//namespace glm diff --git a/includes/glm/ext/vector_packing.hpp b/includes/glm/ext/vector_packing.hpp new file mode 100644 index 0000000..76e5d0c --- /dev/null +++ b/includes/glm/ext/vector_packing.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_packing +/// @file glm/ext/vector_packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_vector_packing GLM_EXT_vector_packing +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert vectors to packed +/// formats. + +#pragma once + +// Dependency: +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_packing + /// @{ + + + /// @} +}// namespace glm + +#include "vector_packing.inl" diff --git a/includes/glm/ext/vector_packing.inl b/includes/glm/ext/vector_packing.inl new file mode 100644 index 0000000..e69de29 diff --git a/includes/glm/ext/vector_reciprocal.hpp b/includes/glm/ext/vector_reciprocal.hpp new file mode 100644 index 0000000..84d6766 --- /dev/null +++ b/includes/glm/ext/vector_reciprocal.hpp @@ -0,0 +1,135 @@ +/// @ref ext_vector_reciprocal +/// @file glm/ext/vector_reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup ext_vector_reciprocal GLM_EXT_vector_reciprocal +/// @ingroup ext +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_reciprocal extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_reciprocal + /// @{ + + /// Secant function. + /// hypotenuse / adjacent or 1 / cos(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType sec(genType angle); + + /// Cosecant function. + /// hypotenuse / opposite or 1 / sin(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType csc(genType angle); + + /// Cotangent function. + /// adjacent / opposite or 1 / tan(x) + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType cot(genType angle); + + /// Inverse secant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType asec(genType x); + + /// Inverse cosecant function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acsc(genType x); + + /// Inverse cotangent function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acot(genType x); + + /// Secant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType sech(genType angle); + + /// Cosecant hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType csch(genType angle); + + /// Cotangent hyperbolic function. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType coth(genType angle); + + /// Inverse secant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType asech(genType x); + + /// Inverse cosecant hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acsch(genType x); + + /// Inverse cotangent hyperbolic function. + /// + /// @return Return an angle expressed in radians. + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see ext_vector_reciprocal + template + GLM_FUNC_DECL genType acoth(genType x); + + /// @} +}//namespace glm + +#include "vector_reciprocal.inl" diff --git a/includes/glm/ext/vector_reciprocal.inl b/includes/glm/ext/vector_reciprocal.inl new file mode 100644 index 0000000..b85102a --- /dev/null +++ b/includes/glm/ext/vector_reciprocal.inl @@ -0,0 +1,105 @@ +/// @ref ext_vector_reciprocal + +#include "../trigonometric.hpp" +#include + +namespace glm +{ + // sec + template + GLM_FUNC_QUALIFIER vec sec(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sec' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(cos, x); + } + + // csc + template + GLM_FUNC_QUALIFIER vec csc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csc' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(sin, x); + } + + // cot + template + GLM_FUNC_QUALIFIER vec cot(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cot' only accept floating-point inputs"); + T const pi_over_2 = static_cast(3.1415926535897932384626433832795 / 2.0); + return detail::functor1::call(tan, pi_over_2 - x); + } + + // asec + template + GLM_FUNC_QUALIFIER vec asec(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asec' only accept floating-point inputs"); + return detail::functor1::call(acos, static_cast(1) / x); + } + + // acsc + template + GLM_FUNC_QUALIFIER vec acsc(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsc' only accept floating-point inputs"); + return detail::functor1::call(asin, static_cast(1) / x); + } + + // acot + template + GLM_FUNC_QUALIFIER vec acot(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acot' only accept floating-point inputs"); + T const pi_over_2 = static_cast(3.1415926535897932384626433832795 / 2.0); + return pi_over_2 - detail::functor1::call(atan, x); + } + + // sech + template + GLM_FUNC_QUALIFIER vec sech(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'sech' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(cosh, x); + } + + // csch + template + GLM_FUNC_QUALIFIER vec csch(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'csch' only accept floating-point inputs"); + return static_cast(1) / detail::functor1::call(sinh, x); + } + + // coth + template + GLM_FUNC_QUALIFIER vec coth(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'coth' only accept floating-point inputs"); + return glm::cosh(x) / glm::sinh(x); + } + + // asech + template + GLM_FUNC_QUALIFIER vec asech(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'asech' only accept floating-point inputs"); + return detail::functor1::call(acosh, static_cast(1) / x); + } + + // acsch + template + GLM_FUNC_QUALIFIER vec acsch(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acsch' only accept floating-point inputs"); + return detail::functor1::call(asinh, static_cast(1) / x); + } + + // acoth + template + GLM_FUNC_QUALIFIER vec acoth(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'acoth' only accept floating-point inputs"); + return detail::functor1::call(atanh, static_cast(1) / x); + } +}//namespace glm diff --git a/includes/glm/ext/vector_relational.hpp b/includes/glm/ext/vector_relational.hpp new file mode 100644 index 0000000..1c2367d --- /dev/null +++ b/includes/glm/ext/vector_relational.hpp @@ -0,0 +1,107 @@ +/// @ref ext_vector_relational +/// @file glm/ext/vector_relational.hpp +/// +/// @see core (dependence) +/// @see ext_scalar_integer (dependence) +/// +/// @defgroup ext_vector_relational GLM_EXT_vector_relational +/// @ingroup ext +/// +/// Exposes comparison functions for vector types that take a user defined epsilon values. +/// +/// Include to use the features of this extension. +/// +/// @see core_vector_relational +/// @see ext_scalar_relational +/// @see ext_matrix_relational + +#pragma once + +// Dependencies +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_relational + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& epsilon); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int ULPs); + + /// Returns the component-wise comparison between two vectors in term of ULPs. + /// True if this expression is not satisfied. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& ULPs); + + /// @} +}//namespace glm + +#include "vector_relational.inl" diff --git a/includes/glm/ext/vector_relational.inl b/includes/glm/ext/vector_relational.inl new file mode 100644 index 0000000..8c50b2f --- /dev/null +++ b/includes/glm/ext/vector_relational.inl @@ -0,0 +1,75 @@ +#include "../vector_relational.hpp" +#include "../common.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/type_float.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T Epsilon) + { + return equal(x, y, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& Epsilon) + { + return lessThanEqual(abs(x - y), Epsilon); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T Epsilon) + { + return notEqual(x, y, vec(Epsilon)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& Epsilon) + { + return greaterThan(abs(x - y), Epsilon); + } + + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int MaxULPs) + { + return equal(x, y, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& MaxULPs) + { + vec Result(false); + for(length_t i = 0; i < L; ++i) + { + detail::float_t const a(x[i]); + detail::float_t const b(y[i]); + + // Different signs means they do not match. + if(a.negative() != b.negative()) + { + // Check for equality to make sure +0==-0 + Result[i] = a.mantissa() == b.mantissa() && a.exponent() == b.exponent(); + } + else + { + // Find the difference in ULPs. + typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); + Result[i] = DiffULPs <= MaxULPs[i]; + } + } + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int MaxULPs) + { + return notEqual(x, y, vec(MaxULPs)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& MaxULPs) + { + return not_(equal(x, y, MaxULPs)); + } +}//namespace glm diff --git a/includes/glm/ext/vector_uint1.hpp b/includes/glm/ext/vector_uint1.hpp new file mode 100644 index 0000000..eb8a704 --- /dev/null +++ b/includes/glm/ext/vector_uint1.hpp @@ -0,0 +1,32 @@ +/// @ref ext_vector_uint1 +/// @file glm/ext/vector_uint1.hpp +/// +/// @defgroup ext_vector_uint1 GLM_EXT_vector_uint1 +/// @ingroup ext +/// +/// Exposes uvec1 vector type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_vector_int1 extension. +/// @see ext_vector_uint1_precision extension. + +#pragma once + +#include "../detail/type_vec1.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint1 extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint1 + /// @{ + + /// 1 component vector of unsigned integer numbers. + typedef vec<1, unsigned int, defaultp> uvec1; + + /// @} +}//namespace glm + diff --git a/includes/glm/ext/vector_uint1_sized.hpp b/includes/glm/ext/vector_uint1_sized.hpp new file mode 100644 index 0000000..2a938bb --- /dev/null +++ b/includes/glm/ext/vector_uint1_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint1_sized +/// @file glm/ext/vector_uint1_sized.hpp +/// +/// @defgroup ext_vector_uint1_sized GLM_EXT_vector_uint1_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector types. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int1_sized + +#pragma once + +#include "../ext/vector_uint1.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint1_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint1_sized + /// @{ + + /// 8 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint8, defaultp> u8vec1; + + /// 16 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint16, defaultp> u16vec1; + + /// 32 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint32, defaultp> u32vec1; + + /// 64 bit unsigned integer vector of 1 component type. + /// + /// @see ext_vector_uint1_sized + typedef vec<1, uint64, defaultp> u64vec1; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint2.hpp b/includes/glm/ext/vector_uint2.hpp new file mode 100644 index 0000000..03c00f5 --- /dev/null +++ b/includes/glm/ext/vector_uint2.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint2.hpp + +#pragma once +#include "../detail/type_vec2.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 2 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<2, unsigned int, defaultp> uvec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint2_sized.hpp b/includes/glm/ext/vector_uint2_sized.hpp new file mode 100644 index 0000000..620fdc6 --- /dev/null +++ b/includes/glm/ext/vector_uint2_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint2_sized +/// @file glm/ext/vector_uint2_sized.hpp +/// +/// @defgroup ext_vector_uint2_sized GLM_EXT_vector_uint2_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 2 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int2_sized + +#pragma once + +#include "../ext/vector_uint2.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint2_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint2_sized + /// @{ + + /// 8 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint8, defaultp> u8vec2; + + /// 16 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint16, defaultp> u16vec2; + + /// 32 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint32, defaultp> u32vec2; + + /// 64 bit unsigned integer vector of 2 components type. + /// + /// @see ext_vector_uint2_sized + typedef vec<2, uint64, defaultp> u64vec2; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint3.hpp b/includes/glm/ext/vector_uint3.hpp new file mode 100644 index 0000000..f5b41c4 --- /dev/null +++ b/includes/glm/ext/vector_uint3.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint3.hpp + +#pragma once +#include "../detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 3 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<3, unsigned int, defaultp> uvec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint3_sized.hpp b/includes/glm/ext/vector_uint3_sized.hpp new file mode 100644 index 0000000..6f96b98 --- /dev/null +++ b/includes/glm/ext/vector_uint3_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint3_sized +/// @file glm/ext/vector_uint3_sized.hpp +/// +/// @defgroup ext_vector_uint3_sized GLM_EXT_vector_uint3_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 3 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int3_sized + +#pragma once + +#include "../ext/vector_uint3.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint3_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint3_sized + /// @{ + + /// 8 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint8, defaultp> u8vec3; + + /// 16 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint16, defaultp> u16vec3; + + /// 32 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint32, defaultp> u32vec3; + + /// 64 bit unsigned integer vector of 3 components type. + /// + /// @see ext_vector_uint3_sized + typedef vec<3, uint64, defaultp> u64vec3; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint4.hpp b/includes/glm/ext/vector_uint4.hpp new file mode 100644 index 0000000..32ced58 --- /dev/null +++ b/includes/glm/ext/vector_uint4.hpp @@ -0,0 +1,18 @@ +/// @ref core +/// @file glm/ext/vector_uint4.hpp + +#pragma once +#include "../detail/type_vec4.hpp" + +namespace glm +{ + /// @addtogroup core_vector + /// @{ + + /// 4 components vector of unsigned integer numbers. + /// + /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors + typedef vec<4, unsigned int, defaultp> uvec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_uint4_sized.hpp b/includes/glm/ext/vector_uint4_sized.hpp new file mode 100644 index 0000000..da992ea --- /dev/null +++ b/includes/glm/ext/vector_uint4_sized.hpp @@ -0,0 +1,49 @@ +/// @ref ext_vector_uint4_sized +/// @file glm/ext/vector_uint4_sized.hpp +/// +/// @defgroup ext_vector_uint4_sized GLM_EXT_vector_uint4_sized +/// @ingroup ext +/// +/// Exposes sized unsigned integer vector of 4 components type. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_uint_sized +/// @see ext_vector_int4_sized + +#pragma once + +#include "../ext/vector_uint4.hpp" +#include "../ext/scalar_uint_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_uint4_sized extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_uint4_sized + /// @{ + + /// 8 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint8, defaultp> u8vec4; + + /// 16 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint16, defaultp> u16vec4; + + /// 32 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint32, defaultp> u32vec4; + + /// 64 bit unsigned integer vector of 4 components type. + /// + /// @see ext_vector_uint4_sized + typedef vec<4, uint64, defaultp> u64vec4; + + /// @} +}//namespace glm diff --git a/includes/glm/ext/vector_ulp.hpp b/includes/glm/ext/vector_ulp.hpp new file mode 100644 index 0000000..7c539bb --- /dev/null +++ b/includes/glm/ext/vector_ulp.hpp @@ -0,0 +1,112 @@ +/// @ref ext_vector_ulp +/// @file glm/ext/vector_ulp.hpp +/// +/// @defgroup ext_vector_ulp GLM_EXT_vector_ulp +/// @ingroup ext +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. +/// +/// Include to use the features of this extension. +/// +/// @see ext_scalar_ulp +/// @see ext_scalar_relational +/// @see ext_vector_relational + +#pragma once + +// Dependencies +#include "../ext/scalar_ulp.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_EXT_vector_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup ext_vector_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x, int ULPs); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec nextFloat(vec const& x, vec const& ULPs); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec prevFloat(vec const& x, vec const& ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see ext_scalar_ulp + template + GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "vector_ulp.inl" diff --git a/includes/glm/ext/vector_ulp.inl b/includes/glm/ext/vector_ulp.inl new file mode 100644 index 0000000..d3c7648 --- /dev/null +++ b/includes/glm/ext/vector_ulp.inl @@ -0,0 +1,74 @@ +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, int ULPs) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, vec const& ULPs) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = nextFloat(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, int ULPs) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, vec const& ULPs) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prevFloat(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = floatDistance(x[i], y[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) + { + vec Result(0); + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = floatDistance(x[i], y[i]); + return Result; + } +}//namespace glm diff --git a/includes/glm/fwd.hpp b/includes/glm/fwd.hpp new file mode 100644 index 0000000..9c2e5ea --- /dev/null +++ b/includes/glm/fwd.hpp @@ -0,0 +1,1233 @@ +#pragma once + +#include "detail/qualifier.hpp" + +namespace glm +{ +#if GLM_HAS_EXTENDED_INTEGER_TYPE + typedef std::int8_t int8; + typedef std::int16_t int16; + typedef std::int32_t int32; + typedef std::int64_t int64; + + typedef std::uint8_t uint8; + typedef std::uint16_t uint16; + typedef std::uint32_t uint32; + typedef std::uint64_t uint64; +#else + typedef signed char int8; + typedef signed short int16; + typedef signed int int32; + typedef detail::int64 int64; + + typedef unsigned char uint8; + typedef unsigned short uint16; + typedef unsigned int uint32; + typedef detail::uint64 uint64; +#endif + + // Scalar int + + typedef int8 lowp_i8; + typedef int8 mediump_i8; + typedef int8 highp_i8; + typedef int8 i8; + + typedef int8 lowp_int8; + typedef int8 mediump_int8; + typedef int8 highp_int8; + + typedef int8 lowp_int8_t; + typedef int8 mediump_int8_t; + typedef int8 highp_int8_t; + typedef int8 int8_t; + + typedef int16 lowp_i16; + typedef int16 mediump_i16; + typedef int16 highp_i16; + typedef int16 i16; + + typedef int16 lowp_int16; + typedef int16 mediump_int16; + typedef int16 highp_int16; + + typedef int16 lowp_int16_t; + typedef int16 mediump_int16_t; + typedef int16 highp_int16_t; + typedef int16 int16_t; + + typedef int32 lowp_i32; + typedef int32 mediump_i32; + typedef int32 highp_i32; + typedef int32 i32; + + typedef int32 lowp_int32; + typedef int32 mediump_int32; + typedef int32 highp_int32; + + typedef int32 lowp_int32_t; + typedef int32 mediump_int32_t; + typedef int32 highp_int32_t; + typedef int32 int32_t; + + typedef int64 lowp_i64; + typedef int64 mediump_i64; + typedef int64 highp_i64; + typedef int64 i64; + + typedef int64 lowp_int64; + typedef int64 mediump_int64; + typedef int64 highp_int64; + + typedef int64 lowp_int64_t; + typedef int64 mediump_int64_t; + typedef int64 highp_int64_t; + typedef int64 int64_t; + + // Scalar uint + + typedef unsigned int uint; + + typedef uint8 lowp_u8; + typedef uint8 mediump_u8; + typedef uint8 highp_u8; + typedef uint8 u8; + + typedef uint8 lowp_uint8; + typedef uint8 mediump_uint8; + typedef uint8 highp_uint8; + + typedef uint8 lowp_uint8_t; + typedef uint8 mediump_uint8_t; + typedef uint8 highp_uint8_t; + typedef uint8 uint8_t; + + typedef uint16 lowp_u16; + typedef uint16 mediump_u16; + typedef uint16 highp_u16; + typedef uint16 u16; + + typedef uint16 lowp_uint16; + typedef uint16 mediump_uint16; + typedef uint16 highp_uint16; + + typedef uint16 lowp_uint16_t; + typedef uint16 mediump_uint16_t; + typedef uint16 highp_uint16_t; + typedef uint16 uint16_t; + + typedef uint32 lowp_u32; + typedef uint32 mediump_u32; + typedef uint32 highp_u32; + typedef uint32 u32; + + typedef uint32 lowp_uint32; + typedef uint32 mediump_uint32; + typedef uint32 highp_uint32; + + typedef uint32 lowp_uint32_t; + typedef uint32 mediump_uint32_t; + typedef uint32 highp_uint32_t; + typedef uint32 uint32_t; + + typedef uint64 lowp_u64; + typedef uint64 mediump_u64; + typedef uint64 highp_u64; + typedef uint64 u64; + + typedef uint64 lowp_uint64; + typedef uint64 mediump_uint64; + typedef uint64 highp_uint64; + + typedef uint64 lowp_uint64_t; + typedef uint64 mediump_uint64_t; + typedef uint64 highp_uint64_t; + typedef uint64 uint64_t; + + // Scalar float + + typedef float lowp_f32; + typedef float mediump_f32; + typedef float highp_f32; + typedef float f32; + + typedef float lowp_float32; + typedef float mediump_float32; + typedef float highp_float32; + typedef float float32; + + typedef float lowp_float32_t; + typedef float mediump_float32_t; + typedef float highp_float32_t; + typedef float float32_t; + + + typedef double lowp_f64; + typedef double mediump_f64; + typedef double highp_f64; + typedef double f64; + + typedef double lowp_float64; + typedef double mediump_float64; + typedef double highp_float64; + typedef double float64; + + typedef double lowp_float64_t; + typedef double mediump_float64_t; + typedef double highp_float64_t; + typedef double float64_t; + + // Vector bool + + typedef vec<1, bool, lowp> lowp_bvec1; + typedef vec<2, bool, lowp> lowp_bvec2; + typedef vec<3, bool, lowp> lowp_bvec3; + typedef vec<4, bool, lowp> lowp_bvec4; + + typedef vec<1, bool, mediump> mediump_bvec1; + typedef vec<2, bool, mediump> mediump_bvec2; + typedef vec<3, bool, mediump> mediump_bvec3; + typedef vec<4, bool, mediump> mediump_bvec4; + + typedef vec<1, bool, highp> highp_bvec1; + typedef vec<2, bool, highp> highp_bvec2; + typedef vec<3, bool, highp> highp_bvec3; + typedef vec<4, bool, highp> highp_bvec4; + + typedef vec<1, bool, defaultp> bvec1; + typedef vec<2, bool, defaultp> bvec2; + typedef vec<3, bool, defaultp> bvec3; + typedef vec<4, bool, defaultp> bvec4; + + // Vector int + + typedef vec<1, int, lowp> lowp_ivec1; + typedef vec<2, int, lowp> lowp_ivec2; + typedef vec<3, int, lowp> lowp_ivec3; + typedef vec<4, int, lowp> lowp_ivec4; + + typedef vec<1, int, mediump> mediump_ivec1; + typedef vec<2, int, mediump> mediump_ivec2; + typedef vec<3, int, mediump> mediump_ivec3; + typedef vec<4, int, mediump> mediump_ivec4; + + typedef vec<1, int, highp> highp_ivec1; + typedef vec<2, int, highp> highp_ivec2; + typedef vec<3, int, highp> highp_ivec3; + typedef vec<4, int, highp> highp_ivec4; + + typedef vec<1, int, defaultp> ivec1; + typedef vec<2, int, defaultp> ivec2; + typedef vec<3, int, defaultp> ivec3; + typedef vec<4, int, defaultp> ivec4; + + typedef vec<1, i8, lowp> lowp_i8vec1; + typedef vec<2, i8, lowp> lowp_i8vec2; + typedef vec<3, i8, lowp> lowp_i8vec3; + typedef vec<4, i8, lowp> lowp_i8vec4; + + typedef vec<1, i8, mediump> mediump_i8vec1; + typedef vec<2, i8, mediump> mediump_i8vec2; + typedef vec<3, i8, mediump> mediump_i8vec3; + typedef vec<4, i8, mediump> mediump_i8vec4; + + typedef vec<1, i8, highp> highp_i8vec1; + typedef vec<2, i8, highp> highp_i8vec2; + typedef vec<3, i8, highp> highp_i8vec3; + typedef vec<4, i8, highp> highp_i8vec4; + + typedef vec<1, i8, defaultp> i8vec1; + typedef vec<2, i8, defaultp> i8vec2; + typedef vec<3, i8, defaultp> i8vec3; + typedef vec<4, i8, defaultp> i8vec4; + + typedef vec<1, i16, lowp> lowp_i16vec1; + typedef vec<2, i16, lowp> lowp_i16vec2; + typedef vec<3, i16, lowp> lowp_i16vec3; + typedef vec<4, i16, lowp> lowp_i16vec4; + + typedef vec<1, i16, mediump> mediump_i16vec1; + typedef vec<2, i16, mediump> mediump_i16vec2; + typedef vec<3, i16, mediump> mediump_i16vec3; + typedef vec<4, i16, mediump> mediump_i16vec4; + + typedef vec<1, i16, highp> highp_i16vec1; + typedef vec<2, i16, highp> highp_i16vec2; + typedef vec<3, i16, highp> highp_i16vec3; + typedef vec<4, i16, highp> highp_i16vec4; + + typedef vec<1, i16, defaultp> i16vec1; + typedef vec<2, i16, defaultp> i16vec2; + typedef vec<3, i16, defaultp> i16vec3; + typedef vec<4, i16, defaultp> i16vec4; + + typedef vec<1, i32, lowp> lowp_i32vec1; + typedef vec<2, i32, lowp> lowp_i32vec2; + typedef vec<3, i32, lowp> lowp_i32vec3; + typedef vec<4, i32, lowp> lowp_i32vec4; + + typedef vec<1, i32, mediump> mediump_i32vec1; + typedef vec<2, i32, mediump> mediump_i32vec2; + typedef vec<3, i32, mediump> mediump_i32vec3; + typedef vec<4, i32, mediump> mediump_i32vec4; + + typedef vec<1, i32, highp> highp_i32vec1; + typedef vec<2, i32, highp> highp_i32vec2; + typedef vec<3, i32, highp> highp_i32vec3; + typedef vec<4, i32, highp> highp_i32vec4; + + typedef vec<1, i32, defaultp> i32vec1; + typedef vec<2, i32, defaultp> i32vec2; + typedef vec<3, i32, defaultp> i32vec3; + typedef vec<4, i32, defaultp> i32vec4; + + typedef vec<1, i64, lowp> lowp_i64vec1; + typedef vec<2, i64, lowp> lowp_i64vec2; + typedef vec<3, i64, lowp> lowp_i64vec3; + typedef vec<4, i64, lowp> lowp_i64vec4; + + typedef vec<1, i64, mediump> mediump_i64vec1; + typedef vec<2, i64, mediump> mediump_i64vec2; + typedef vec<3, i64, mediump> mediump_i64vec3; + typedef vec<4, i64, mediump> mediump_i64vec4; + + typedef vec<1, i64, highp> highp_i64vec1; + typedef vec<2, i64, highp> highp_i64vec2; + typedef vec<3, i64, highp> highp_i64vec3; + typedef vec<4, i64, highp> highp_i64vec4; + + typedef vec<1, i64, defaultp> i64vec1; + typedef vec<2, i64, defaultp> i64vec2; + typedef vec<3, i64, defaultp> i64vec3; + typedef vec<4, i64, defaultp> i64vec4; + + // Vector uint + + typedef vec<1, uint, lowp> lowp_uvec1; + typedef vec<2, uint, lowp> lowp_uvec2; + typedef vec<3, uint, lowp> lowp_uvec3; + typedef vec<4, uint, lowp> lowp_uvec4; + + typedef vec<1, uint, mediump> mediump_uvec1; + typedef vec<2, uint, mediump> mediump_uvec2; + typedef vec<3, uint, mediump> mediump_uvec3; + typedef vec<4, uint, mediump> mediump_uvec4; + + typedef vec<1, uint, highp> highp_uvec1; + typedef vec<2, uint, highp> highp_uvec2; + typedef vec<3, uint, highp> highp_uvec3; + typedef vec<4, uint, highp> highp_uvec4; + + typedef vec<1, uint, defaultp> uvec1; + typedef vec<2, uint, defaultp> uvec2; + typedef vec<3, uint, defaultp> uvec3; + typedef vec<4, uint, defaultp> uvec4; + + typedef vec<1, u8, lowp> lowp_u8vec1; + typedef vec<2, u8, lowp> lowp_u8vec2; + typedef vec<3, u8, lowp> lowp_u8vec3; + typedef vec<4, u8, lowp> lowp_u8vec4; + + typedef vec<1, u8, mediump> mediump_u8vec1; + typedef vec<2, u8, mediump> mediump_u8vec2; + typedef vec<3, u8, mediump> mediump_u8vec3; + typedef vec<4, u8, mediump> mediump_u8vec4; + + typedef vec<1, u8, highp> highp_u8vec1; + typedef vec<2, u8, highp> highp_u8vec2; + typedef vec<3, u8, highp> highp_u8vec3; + typedef vec<4, u8, highp> highp_u8vec4; + + typedef vec<1, u8, defaultp> u8vec1; + typedef vec<2, u8, defaultp> u8vec2; + typedef vec<3, u8, defaultp> u8vec3; + typedef vec<4, u8, defaultp> u8vec4; + + typedef vec<1, u16, lowp> lowp_u16vec1; + typedef vec<2, u16, lowp> lowp_u16vec2; + typedef vec<3, u16, lowp> lowp_u16vec3; + typedef vec<4, u16, lowp> lowp_u16vec4; + + typedef vec<1, u16, mediump> mediump_u16vec1; + typedef vec<2, u16, mediump> mediump_u16vec2; + typedef vec<3, u16, mediump> mediump_u16vec3; + typedef vec<4, u16, mediump> mediump_u16vec4; + + typedef vec<1, u16, highp> highp_u16vec1; + typedef vec<2, u16, highp> highp_u16vec2; + typedef vec<3, u16, highp> highp_u16vec3; + typedef vec<4, u16, highp> highp_u16vec4; + + typedef vec<1, u16, defaultp> u16vec1; + typedef vec<2, u16, defaultp> u16vec2; + typedef vec<3, u16, defaultp> u16vec3; + typedef vec<4, u16, defaultp> u16vec4; + + typedef vec<1, u32, lowp> lowp_u32vec1; + typedef vec<2, u32, lowp> lowp_u32vec2; + typedef vec<3, u32, lowp> lowp_u32vec3; + typedef vec<4, u32, lowp> lowp_u32vec4; + + typedef vec<1, u32, mediump> mediump_u32vec1; + typedef vec<2, u32, mediump> mediump_u32vec2; + typedef vec<3, u32, mediump> mediump_u32vec3; + typedef vec<4, u32, mediump> mediump_u32vec4; + + typedef vec<1, u32, highp> highp_u32vec1; + typedef vec<2, u32, highp> highp_u32vec2; + typedef vec<3, u32, highp> highp_u32vec3; + typedef vec<4, u32, highp> highp_u32vec4; + + typedef vec<1, u32, defaultp> u32vec1; + typedef vec<2, u32, defaultp> u32vec2; + typedef vec<3, u32, defaultp> u32vec3; + typedef vec<4, u32, defaultp> u32vec4; + + typedef vec<1, u64, lowp> lowp_u64vec1; + typedef vec<2, u64, lowp> lowp_u64vec2; + typedef vec<3, u64, lowp> lowp_u64vec3; + typedef vec<4, u64, lowp> lowp_u64vec4; + + typedef vec<1, u64, mediump> mediump_u64vec1; + typedef vec<2, u64, mediump> mediump_u64vec2; + typedef vec<3, u64, mediump> mediump_u64vec3; + typedef vec<4, u64, mediump> mediump_u64vec4; + + typedef vec<1, u64, highp> highp_u64vec1; + typedef vec<2, u64, highp> highp_u64vec2; + typedef vec<3, u64, highp> highp_u64vec3; + typedef vec<4, u64, highp> highp_u64vec4; + + typedef vec<1, u64, defaultp> u64vec1; + typedef vec<2, u64, defaultp> u64vec2; + typedef vec<3, u64, defaultp> u64vec3; + typedef vec<4, u64, defaultp> u64vec4; + + // Vector float + + typedef vec<1, float, lowp> lowp_vec1; + typedef vec<2, float, lowp> lowp_vec2; + typedef vec<3, float, lowp> lowp_vec3; + typedef vec<4, float, lowp> lowp_vec4; + + typedef vec<1, float, mediump> mediump_vec1; + typedef vec<2, float, mediump> mediump_vec2; + typedef vec<3, float, mediump> mediump_vec3; + typedef vec<4, float, mediump> mediump_vec4; + + typedef vec<1, float, highp> highp_vec1; + typedef vec<2, float, highp> highp_vec2; + typedef vec<3, float, highp> highp_vec3; + typedef vec<4, float, highp> highp_vec4; + + typedef vec<1, float, defaultp> vec1; + typedef vec<2, float, defaultp> vec2; + typedef vec<3, float, defaultp> vec3; + typedef vec<4, float, defaultp> vec4; + + typedef vec<1, float, lowp> lowp_fvec1; + typedef vec<2, float, lowp> lowp_fvec2; + typedef vec<3, float, lowp> lowp_fvec3; + typedef vec<4, float, lowp> lowp_fvec4; + + typedef vec<1, float, mediump> mediump_fvec1; + typedef vec<2, float, mediump> mediump_fvec2; + typedef vec<3, float, mediump> mediump_fvec3; + typedef vec<4, float, mediump> mediump_fvec4; + + typedef vec<1, float, highp> highp_fvec1; + typedef vec<2, float, highp> highp_fvec2; + typedef vec<3, float, highp> highp_fvec3; + typedef vec<4, float, highp> highp_fvec4; + + typedef vec<1, f32, defaultp> fvec1; + typedef vec<2, f32, defaultp> fvec2; + typedef vec<3, f32, defaultp> fvec3; + typedef vec<4, f32, defaultp> fvec4; + + typedef vec<1, f32, lowp> lowp_f32vec1; + typedef vec<2, f32, lowp> lowp_f32vec2; + typedef vec<3, f32, lowp> lowp_f32vec3; + typedef vec<4, f32, lowp> lowp_f32vec4; + + typedef vec<1, f32, mediump> mediump_f32vec1; + typedef vec<2, f32, mediump> mediump_f32vec2; + typedef vec<3, f32, mediump> mediump_f32vec3; + typedef vec<4, f32, mediump> mediump_f32vec4; + + typedef vec<1, f32, highp> highp_f32vec1; + typedef vec<2, f32, highp> highp_f32vec2; + typedef vec<3, f32, highp> highp_f32vec3; + typedef vec<4, f32, highp> highp_f32vec4; + + typedef vec<1, f32, defaultp> f32vec1; + typedef vec<2, f32, defaultp> f32vec2; + typedef vec<3, f32, defaultp> f32vec3; + typedef vec<4, f32, defaultp> f32vec4; + + typedef vec<1, f64, lowp> lowp_dvec1; + typedef vec<2, f64, lowp> lowp_dvec2; + typedef vec<3, f64, lowp> lowp_dvec3; + typedef vec<4, f64, lowp> lowp_dvec4; + + typedef vec<1, f64, mediump> mediump_dvec1; + typedef vec<2, f64, mediump> mediump_dvec2; + typedef vec<3, f64, mediump> mediump_dvec3; + typedef vec<4, f64, mediump> mediump_dvec4; + + typedef vec<1, f64, highp> highp_dvec1; + typedef vec<2, f64, highp> highp_dvec2; + typedef vec<3, f64, highp> highp_dvec3; + typedef vec<4, f64, highp> highp_dvec4; + + typedef vec<1, f64, defaultp> dvec1; + typedef vec<2, f64, defaultp> dvec2; + typedef vec<3, f64, defaultp> dvec3; + typedef vec<4, f64, defaultp> dvec4; + + typedef vec<1, f64, lowp> lowp_f64vec1; + typedef vec<2, f64, lowp> lowp_f64vec2; + typedef vec<3, f64, lowp> lowp_f64vec3; + typedef vec<4, f64, lowp> lowp_f64vec4; + + typedef vec<1, f64, mediump> mediump_f64vec1; + typedef vec<2, f64, mediump> mediump_f64vec2; + typedef vec<3, f64, mediump> mediump_f64vec3; + typedef vec<4, f64, mediump> mediump_f64vec4; + + typedef vec<1, f64, highp> highp_f64vec1; + typedef vec<2, f64, highp> highp_f64vec2; + typedef vec<3, f64, highp> highp_f64vec3; + typedef vec<4, f64, highp> highp_f64vec4; + + typedef vec<1, f64, defaultp> f64vec1; + typedef vec<2, f64, defaultp> f64vec2; + typedef vec<3, f64, defaultp> f64vec3; + typedef vec<4, f64, defaultp> f64vec4; + + // Matrix NxN + + typedef mat<2, 2, f32, lowp> lowp_mat2; + typedef mat<3, 3, f32, lowp> lowp_mat3; + typedef mat<4, 4, f32, lowp> lowp_mat4; + + typedef mat<2, 2, f32, mediump> mediump_mat2; + typedef mat<3, 3, f32, mediump> mediump_mat3; + typedef mat<4, 4, f32, mediump> mediump_mat4; + + typedef mat<2, 2, f32, highp> highp_mat2; + typedef mat<3, 3, f32, highp> highp_mat3; + typedef mat<4, 4, f32, highp> highp_mat4; + + typedef mat<2, 2, f32, defaultp> mat2; + typedef mat<3, 3, f32, defaultp> mat3; + typedef mat<4, 4, f32, defaultp> mat4; + + typedef mat<2, 2, f32, lowp> lowp_fmat2; + typedef mat<3, 3, f32, lowp> lowp_fmat3; + typedef mat<4, 4, f32, lowp> lowp_fmat4; + + typedef mat<2, 2, f32, mediump> mediump_fmat2; + typedef mat<3, 3, f32, mediump> mediump_fmat3; + typedef mat<4, 4, f32, mediump> mediump_fmat4; + + typedef mat<2, 2, f32, highp> highp_fmat2; + typedef mat<3, 3, f32, highp> highp_fmat3; + typedef mat<4, 4, f32, highp> highp_fmat4; + + typedef mat<2, 2, f32, defaultp> fmat2; + typedef mat<3, 3, f32, defaultp> fmat3; + typedef mat<4, 4, f32, defaultp> fmat4; + + typedef mat<2, 2, f32, lowp> lowp_f32mat2; + typedef mat<3, 3, f32, lowp> lowp_f32mat3; + typedef mat<4, 4, f32, lowp> lowp_f32mat4; + + typedef mat<2, 2, f32, mediump> mediump_f32mat2; + typedef mat<3, 3, f32, mediump> mediump_f32mat3; + typedef mat<4, 4, f32, mediump> mediump_f32mat4; + + typedef mat<2, 2, f32, highp> highp_f32mat2; + typedef mat<3, 3, f32, highp> highp_f32mat3; + typedef mat<4, 4, f32, highp> highp_f32mat4; + + typedef mat<2, 2, f32, defaultp> f32mat2; + typedef mat<3, 3, f32, defaultp> f32mat3; + typedef mat<4, 4, f32, defaultp> f32mat4; + + typedef mat<2, 2, f64, lowp> lowp_dmat2; + typedef mat<3, 3, f64, lowp> lowp_dmat3; + typedef mat<4, 4, f64, lowp> lowp_dmat4; + + typedef mat<2, 2, f64, mediump> mediump_dmat2; + typedef mat<3, 3, f64, mediump> mediump_dmat3; + typedef mat<4, 4, f64, mediump> mediump_dmat4; + + typedef mat<2, 2, f64, highp> highp_dmat2; + typedef mat<3, 3, f64, highp> highp_dmat3; + typedef mat<4, 4, f64, highp> highp_dmat4; + + typedef mat<2, 2, f64, defaultp> dmat2; + typedef mat<3, 3, f64, defaultp> dmat3; + typedef mat<4, 4, f64, defaultp> dmat4; + + typedef mat<2, 2, f64, lowp> lowp_f64mat2; + typedef mat<3, 3, f64, lowp> lowp_f64mat3; + typedef mat<4, 4, f64, lowp> lowp_f64mat4; + + typedef mat<2, 2, f64, mediump> mediump_f64mat2; + typedef mat<3, 3, f64, mediump> mediump_f64mat3; + typedef mat<4, 4, f64, mediump> mediump_f64mat4; + + typedef mat<2, 2, f64, highp> highp_f64mat2; + typedef mat<3, 3, f64, highp> highp_f64mat3; + typedef mat<4, 4, f64, highp> highp_f64mat4; + + typedef mat<2, 2, f64, defaultp> f64mat2; + typedef mat<3, 3, f64, defaultp> f64mat3; + typedef mat<4, 4, f64, defaultp> f64mat4; + + // Matrix MxN + + typedef mat<2, 2, f32, lowp> lowp_mat2x2; + typedef mat<2, 3, f32, lowp> lowp_mat2x3; + typedef mat<2, 4, f32, lowp> lowp_mat2x4; + typedef mat<3, 2, f32, lowp> lowp_mat3x2; + typedef mat<3, 3, f32, lowp> lowp_mat3x3; + typedef mat<3, 4, f32, lowp> lowp_mat3x4; + typedef mat<4, 2, f32, lowp> lowp_mat4x2; + typedef mat<4, 3, f32, lowp> lowp_mat4x3; + typedef mat<4, 4, f32, lowp> lowp_mat4x4; + + typedef mat<2, 2, f32, mediump> mediump_mat2x2; + typedef mat<2, 3, f32, mediump> mediump_mat2x3; + typedef mat<2, 4, f32, mediump> mediump_mat2x4; + typedef mat<3, 2, f32, mediump> mediump_mat3x2; + typedef mat<3, 3, f32, mediump> mediump_mat3x3; + typedef mat<3, 4, f32, mediump> mediump_mat3x4; + typedef mat<4, 2, f32, mediump> mediump_mat4x2; + typedef mat<4, 3, f32, mediump> mediump_mat4x3; + typedef mat<4, 4, f32, mediump> mediump_mat4x4; + + typedef mat<2, 2, f32, highp> highp_mat2x2; + typedef mat<2, 3, f32, highp> highp_mat2x3; + typedef mat<2, 4, f32, highp> highp_mat2x4; + typedef mat<3, 2, f32, highp> highp_mat3x2; + typedef mat<3, 3, f32, highp> highp_mat3x3; + typedef mat<3, 4, f32, highp> highp_mat3x4; + typedef mat<4, 2, f32, highp> highp_mat4x2; + typedef mat<4, 3, f32, highp> highp_mat4x3; + typedef mat<4, 4, f32, highp> highp_mat4x4; + + typedef mat<2, 2, f32, defaultp> mat2x2; + typedef mat<2, 3, f32, defaultp> mat2x3; + typedef mat<2, 4, f32, defaultp> mat2x4; + typedef mat<3, 2, f32, defaultp> mat3x2; + typedef mat<3, 3, f32, defaultp> mat3x3; + typedef mat<3, 4, f32, defaultp> mat3x4; + typedef mat<4, 2, f32, defaultp> mat4x2; + typedef mat<4, 3, f32, defaultp> mat4x3; + typedef mat<4, 4, f32, defaultp> mat4x4; + + typedef mat<2, 2, f32, lowp> lowp_fmat2x2; + typedef mat<2, 3, f32, lowp> lowp_fmat2x3; + typedef mat<2, 4, f32, lowp> lowp_fmat2x4; + typedef mat<3, 2, f32, lowp> lowp_fmat3x2; + typedef mat<3, 3, f32, lowp> lowp_fmat3x3; + typedef mat<3, 4, f32, lowp> lowp_fmat3x4; + typedef mat<4, 2, f32, lowp> lowp_fmat4x2; + typedef mat<4, 3, f32, lowp> lowp_fmat4x3; + typedef mat<4, 4, f32, lowp> lowp_fmat4x4; + + typedef mat<2, 2, f32, mediump> mediump_fmat2x2; + typedef mat<2, 3, f32, mediump> mediump_fmat2x3; + typedef mat<2, 4, f32, mediump> mediump_fmat2x4; + typedef mat<3, 2, f32, mediump> mediump_fmat3x2; + typedef mat<3, 3, f32, mediump> mediump_fmat3x3; + typedef mat<3, 4, f32, mediump> mediump_fmat3x4; + typedef mat<4, 2, f32, mediump> mediump_fmat4x2; + typedef mat<4, 3, f32, mediump> mediump_fmat4x3; + typedef mat<4, 4, f32, mediump> mediump_fmat4x4; + + typedef mat<2, 2, f32, highp> highp_fmat2x2; + typedef mat<2, 3, f32, highp> highp_fmat2x3; + typedef mat<2, 4, f32, highp> highp_fmat2x4; + typedef mat<3, 2, f32, highp> highp_fmat3x2; + typedef mat<3, 3, f32, highp> highp_fmat3x3; + typedef mat<3, 4, f32, highp> highp_fmat3x4; + typedef mat<4, 2, f32, highp> highp_fmat4x2; + typedef mat<4, 3, f32, highp> highp_fmat4x3; + typedef mat<4, 4, f32, highp> highp_fmat4x4; + + typedef mat<2, 2, f32, defaultp> fmat2x2; + typedef mat<2, 3, f32, defaultp> fmat2x3; + typedef mat<2, 4, f32, defaultp> fmat2x4; + typedef mat<3, 2, f32, defaultp> fmat3x2; + typedef mat<3, 3, f32, defaultp> fmat3x3; + typedef mat<3, 4, f32, defaultp> fmat3x4; + typedef mat<4, 2, f32, defaultp> fmat4x2; + typedef mat<4, 3, f32, defaultp> fmat4x3; + typedef mat<4, 4, f32, defaultp> fmat4x4; + + typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; + typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; + typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; + typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; + typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; + typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; + typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; + typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; + typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; + + typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; + typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; + typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; + typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; + typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; + typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; + typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; + typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; + typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; + + typedef mat<2, 2, f32, highp> highp_f32mat2x2; + typedef mat<2, 3, f32, highp> highp_f32mat2x3; + typedef mat<2, 4, f32, highp> highp_f32mat2x4; + typedef mat<3, 2, f32, highp> highp_f32mat3x2; + typedef mat<3, 3, f32, highp> highp_f32mat3x3; + typedef mat<3, 4, f32, highp> highp_f32mat3x4; + typedef mat<4, 2, f32, highp> highp_f32mat4x2; + typedef mat<4, 3, f32, highp> highp_f32mat4x3; + typedef mat<4, 4, f32, highp> highp_f32mat4x4; + + typedef mat<2, 2, f32, defaultp> f32mat2x2; + typedef mat<2, 3, f32, defaultp> f32mat2x3; + typedef mat<2, 4, f32, defaultp> f32mat2x4; + typedef mat<3, 2, f32, defaultp> f32mat3x2; + typedef mat<3, 3, f32, defaultp> f32mat3x3; + typedef mat<3, 4, f32, defaultp> f32mat3x4; + typedef mat<4, 2, f32, defaultp> f32mat4x2; + typedef mat<4, 3, f32, defaultp> f32mat4x3; + typedef mat<4, 4, f32, defaultp> f32mat4x4; + + typedef mat<2, 2, double, lowp> lowp_dmat2x2; + typedef mat<2, 3, double, lowp> lowp_dmat2x3; + typedef mat<2, 4, double, lowp> lowp_dmat2x4; + typedef mat<3, 2, double, lowp> lowp_dmat3x2; + typedef mat<3, 3, double, lowp> lowp_dmat3x3; + typedef mat<3, 4, double, lowp> lowp_dmat3x4; + typedef mat<4, 2, double, lowp> lowp_dmat4x2; + typedef mat<4, 3, double, lowp> lowp_dmat4x3; + typedef mat<4, 4, double, lowp> lowp_dmat4x4; + + typedef mat<2, 2, double, mediump> mediump_dmat2x2; + typedef mat<2, 3, double, mediump> mediump_dmat2x3; + typedef mat<2, 4, double, mediump> mediump_dmat2x4; + typedef mat<3, 2, double, mediump> mediump_dmat3x2; + typedef mat<3, 3, double, mediump> mediump_dmat3x3; + typedef mat<3, 4, double, mediump> mediump_dmat3x4; + typedef mat<4, 2, double, mediump> mediump_dmat4x2; + typedef mat<4, 3, double, mediump> mediump_dmat4x3; + typedef mat<4, 4, double, mediump> mediump_dmat4x4; + + typedef mat<2, 2, double, highp> highp_dmat2x2; + typedef mat<2, 3, double, highp> highp_dmat2x3; + typedef mat<2, 4, double, highp> highp_dmat2x4; + typedef mat<3, 2, double, highp> highp_dmat3x2; + typedef mat<3, 3, double, highp> highp_dmat3x3; + typedef mat<3, 4, double, highp> highp_dmat3x4; + typedef mat<4, 2, double, highp> highp_dmat4x2; + typedef mat<4, 3, double, highp> highp_dmat4x3; + typedef mat<4, 4, double, highp> highp_dmat4x4; + + typedef mat<2, 2, double, defaultp> dmat2x2; + typedef mat<2, 3, double, defaultp> dmat2x3; + typedef mat<2, 4, double, defaultp> dmat2x4; + typedef mat<3, 2, double, defaultp> dmat3x2; + typedef mat<3, 3, double, defaultp> dmat3x3; + typedef mat<3, 4, double, defaultp> dmat3x4; + typedef mat<4, 2, double, defaultp> dmat4x2; + typedef mat<4, 3, double, defaultp> dmat4x3; + typedef mat<4, 4, double, defaultp> dmat4x4; + + typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; + typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; + typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; + typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; + typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; + typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; + typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; + typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; + typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; + + typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; + typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; + typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; + typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; + typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; + typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; + typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; + typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; + typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; + + typedef mat<2, 2, f64, highp> highp_f64mat2x2; + typedef mat<2, 3, f64, highp> highp_f64mat2x3; + typedef mat<2, 4, f64, highp> highp_f64mat2x4; + typedef mat<3, 2, f64, highp> highp_f64mat3x2; + typedef mat<3, 3, f64, highp> highp_f64mat3x3; + typedef mat<3, 4, f64, highp> highp_f64mat3x4; + typedef mat<4, 2, f64, highp> highp_f64mat4x2; + typedef mat<4, 3, f64, highp> highp_f64mat4x3; + typedef mat<4, 4, f64, highp> highp_f64mat4x4; + + typedef mat<2, 2, f64, defaultp> f64mat2x2; + typedef mat<2, 3, f64, defaultp> f64mat2x3; + typedef mat<2, 4, f64, defaultp> f64mat2x4; + typedef mat<3, 2, f64, defaultp> f64mat3x2; + typedef mat<3, 3, f64, defaultp> f64mat3x3; + typedef mat<3, 4, f64, defaultp> f64mat3x4; + typedef mat<4, 2, f64, defaultp> f64mat4x2; + typedef mat<4, 3, f64, defaultp> f64mat4x3; + typedef mat<4, 4, f64, defaultp> f64mat4x4; + + // Signed integer matrix MxN + + typedef mat<2, 2, int, lowp> lowp_imat2x2; + typedef mat<2, 3, int, lowp> lowp_imat2x3; + typedef mat<2, 4, int, lowp> lowp_imat2x4; + typedef mat<3, 2, int, lowp> lowp_imat3x2; + typedef mat<3, 3, int, lowp> lowp_imat3x3; + typedef mat<3, 4, int, lowp> lowp_imat3x4; + typedef mat<4, 2, int, lowp> lowp_imat4x2; + typedef mat<4, 3, int, lowp> lowp_imat4x3; + typedef mat<4, 4, int, lowp> lowp_imat4x4; + + typedef mat<2, 2, int, mediump> mediump_imat2x2; + typedef mat<2, 3, int, mediump> mediump_imat2x3; + typedef mat<2, 4, int, mediump> mediump_imat2x4; + typedef mat<3, 2, int, mediump> mediump_imat3x2; + typedef mat<3, 3, int, mediump> mediump_imat3x3; + typedef mat<3, 4, int, mediump> mediump_imat3x4; + typedef mat<4, 2, int, mediump> mediump_imat4x2; + typedef mat<4, 3, int, mediump> mediump_imat4x3; + typedef mat<4, 4, int, mediump> mediump_imat4x4; + + typedef mat<2, 2, int, highp> highp_imat2x2; + typedef mat<2, 3, int, highp> highp_imat2x3; + typedef mat<2, 4, int, highp> highp_imat2x4; + typedef mat<3, 2, int, highp> highp_imat3x2; + typedef mat<3, 3, int, highp> highp_imat3x3; + typedef mat<3, 4, int, highp> highp_imat3x4; + typedef mat<4, 2, int, highp> highp_imat4x2; + typedef mat<4, 3, int, highp> highp_imat4x3; + typedef mat<4, 4, int, highp> highp_imat4x4; + + typedef mat<2, 2, int, defaultp> imat2x2; + typedef mat<2, 3, int, defaultp> imat2x3; + typedef mat<2, 4, int, defaultp> imat2x4; + typedef mat<3, 2, int, defaultp> imat3x2; + typedef mat<3, 3, int, defaultp> imat3x3; + typedef mat<3, 4, int, defaultp> imat3x4; + typedef mat<4, 2, int, defaultp> imat4x2; + typedef mat<4, 3, int, defaultp> imat4x3; + typedef mat<4, 4, int, defaultp> imat4x4; + + + typedef mat<2, 2, int8, lowp> lowp_i8mat2x2; + typedef mat<2, 3, int8, lowp> lowp_i8mat2x3; + typedef mat<2, 4, int8, lowp> lowp_i8mat2x4; + typedef mat<3, 2, int8, lowp> lowp_i8mat3x2; + typedef mat<3, 3, int8, lowp> lowp_i8mat3x3; + typedef mat<3, 4, int8, lowp> lowp_i8mat3x4; + typedef mat<4, 2, int8, lowp> lowp_i8mat4x2; + typedef mat<4, 3, int8, lowp> lowp_i8mat4x3; + typedef mat<4, 4, int8, lowp> lowp_i8mat4x4; + + typedef mat<2, 2, int8, mediump> mediump_i8mat2x2; + typedef mat<2, 3, int8, mediump> mediump_i8mat2x3; + typedef mat<2, 4, int8, mediump> mediump_i8mat2x4; + typedef mat<3, 2, int8, mediump> mediump_i8mat3x2; + typedef mat<3, 3, int8, mediump> mediump_i8mat3x3; + typedef mat<3, 4, int8, mediump> mediump_i8mat3x4; + typedef mat<4, 2, int8, mediump> mediump_i8mat4x2; + typedef mat<4, 3, int8, mediump> mediump_i8mat4x3; + typedef mat<4, 4, int8, mediump> mediump_i8mat4x4; + + typedef mat<2, 2, int8, highp> highp_i8mat2x2; + typedef mat<2, 3, int8, highp> highp_i8mat2x3; + typedef mat<2, 4, int8, highp> highp_i8mat2x4; + typedef mat<3, 2, int8, highp> highp_i8mat3x2; + typedef mat<3, 3, int8, highp> highp_i8mat3x3; + typedef mat<3, 4, int8, highp> highp_i8mat3x4; + typedef mat<4, 2, int8, highp> highp_i8mat4x2; + typedef mat<4, 3, int8, highp> highp_i8mat4x3; + typedef mat<4, 4, int8, highp> highp_i8mat4x4; + + typedef mat<2, 2, int8, defaultp> i8mat2x2; + typedef mat<2, 3, int8, defaultp> i8mat2x3; + typedef mat<2, 4, int8, defaultp> i8mat2x4; + typedef mat<3, 2, int8, defaultp> i8mat3x2; + typedef mat<3, 3, int8, defaultp> i8mat3x3; + typedef mat<3, 4, int8, defaultp> i8mat3x4; + typedef mat<4, 2, int8, defaultp> i8mat4x2; + typedef mat<4, 3, int8, defaultp> i8mat4x3; + typedef mat<4, 4, int8, defaultp> i8mat4x4; + + + typedef mat<2, 2, int16, lowp> lowp_i16mat2x2; + typedef mat<2, 3, int16, lowp> lowp_i16mat2x3; + typedef mat<2, 4, int16, lowp> lowp_i16mat2x4; + typedef mat<3, 2, int16, lowp> lowp_i16mat3x2; + typedef mat<3, 3, int16, lowp> lowp_i16mat3x3; + typedef mat<3, 4, int16, lowp> lowp_i16mat3x4; + typedef mat<4, 2, int16, lowp> lowp_i16mat4x2; + typedef mat<4, 3, int16, lowp> lowp_i16mat4x3; + typedef mat<4, 4, int16, lowp> lowp_i16mat4x4; + + typedef mat<2, 2, int16, mediump> mediump_i16mat2x2; + typedef mat<2, 3, int16, mediump> mediump_i16mat2x3; + typedef mat<2, 4, int16, mediump> mediump_i16mat2x4; + typedef mat<3, 2, int16, mediump> mediump_i16mat3x2; + typedef mat<3, 3, int16, mediump> mediump_i16mat3x3; + typedef mat<3, 4, int16, mediump> mediump_i16mat3x4; + typedef mat<4, 2, int16, mediump> mediump_i16mat4x2; + typedef mat<4, 3, int16, mediump> mediump_i16mat4x3; + typedef mat<4, 4, int16, mediump> mediump_i16mat4x4; + + typedef mat<2, 2, int16, highp> highp_i16mat2x2; + typedef mat<2, 3, int16, highp> highp_i16mat2x3; + typedef mat<2, 4, int16, highp> highp_i16mat2x4; + typedef mat<3, 2, int16, highp> highp_i16mat3x2; + typedef mat<3, 3, int16, highp> highp_i16mat3x3; + typedef mat<3, 4, int16, highp> highp_i16mat3x4; + typedef mat<4, 2, int16, highp> highp_i16mat4x2; + typedef mat<4, 3, int16, highp> highp_i16mat4x3; + typedef mat<4, 4, int16, highp> highp_i16mat4x4; + + typedef mat<2, 2, int16, defaultp> i16mat2x2; + typedef mat<2, 3, int16, defaultp> i16mat2x3; + typedef mat<2, 4, int16, defaultp> i16mat2x4; + typedef mat<3, 2, int16, defaultp> i16mat3x2; + typedef mat<3, 3, int16, defaultp> i16mat3x3; + typedef mat<3, 4, int16, defaultp> i16mat3x4; + typedef mat<4, 2, int16, defaultp> i16mat4x2; + typedef mat<4, 3, int16, defaultp> i16mat4x3; + typedef mat<4, 4, int16, defaultp> i16mat4x4; + + + typedef mat<2, 2, int32, lowp> lowp_i32mat2x2; + typedef mat<2, 3, int32, lowp> lowp_i32mat2x3; + typedef mat<2, 4, int32, lowp> lowp_i32mat2x4; + typedef mat<3, 2, int32, lowp> lowp_i32mat3x2; + typedef mat<3, 3, int32, lowp> lowp_i32mat3x3; + typedef mat<3, 4, int32, lowp> lowp_i32mat3x4; + typedef mat<4, 2, int32, lowp> lowp_i32mat4x2; + typedef mat<4, 3, int32, lowp> lowp_i32mat4x3; + typedef mat<4, 4, int32, lowp> lowp_i32mat4x4; + + typedef mat<2, 2, int32, mediump> mediump_i32mat2x2; + typedef mat<2, 3, int32, mediump> mediump_i32mat2x3; + typedef mat<2, 4, int32, mediump> mediump_i32mat2x4; + typedef mat<3, 2, int32, mediump> mediump_i32mat3x2; + typedef mat<3, 3, int32, mediump> mediump_i32mat3x3; + typedef mat<3, 4, int32, mediump> mediump_i32mat3x4; + typedef mat<4, 2, int32, mediump> mediump_i32mat4x2; + typedef mat<4, 3, int32, mediump> mediump_i32mat4x3; + typedef mat<4, 4, int32, mediump> mediump_i32mat4x4; + + typedef mat<2, 2, int32, highp> highp_i32mat2x2; + typedef mat<2, 3, int32, highp> highp_i32mat2x3; + typedef mat<2, 4, int32, highp> highp_i32mat2x4; + typedef mat<3, 2, int32, highp> highp_i32mat3x2; + typedef mat<3, 3, int32, highp> highp_i32mat3x3; + typedef mat<3, 4, int32, highp> highp_i32mat3x4; + typedef mat<4, 2, int32, highp> highp_i32mat4x2; + typedef mat<4, 3, int32, highp> highp_i32mat4x3; + typedef mat<4, 4, int32, highp> highp_i32mat4x4; + + typedef mat<2, 2, int32, defaultp> i32mat2x2; + typedef mat<2, 3, int32, defaultp> i32mat2x3; + typedef mat<2, 4, int32, defaultp> i32mat2x4; + typedef mat<3, 2, int32, defaultp> i32mat3x2; + typedef mat<3, 3, int32, defaultp> i32mat3x3; + typedef mat<3, 4, int32, defaultp> i32mat3x4; + typedef mat<4, 2, int32, defaultp> i32mat4x2; + typedef mat<4, 3, int32, defaultp> i32mat4x3; + typedef mat<4, 4, int32, defaultp> i32mat4x4; + + + typedef mat<2, 2, int64, lowp> lowp_i64mat2x2; + typedef mat<2, 3, int64, lowp> lowp_i64mat2x3; + typedef mat<2, 4, int64, lowp> lowp_i64mat2x4; + typedef mat<3, 2, int64, lowp> lowp_i64mat3x2; + typedef mat<3, 3, int64, lowp> lowp_i64mat3x3; + typedef mat<3, 4, int64, lowp> lowp_i64mat3x4; + typedef mat<4, 2, int64, lowp> lowp_i64mat4x2; + typedef mat<4, 3, int64, lowp> lowp_i64mat4x3; + typedef mat<4, 4, int64, lowp> lowp_i64mat4x4; + + typedef mat<2, 2, int64, mediump> mediump_i64mat2x2; + typedef mat<2, 3, int64, mediump> mediump_i64mat2x3; + typedef mat<2, 4, int64, mediump> mediump_i64mat2x4; + typedef mat<3, 2, int64, mediump> mediump_i64mat3x2; + typedef mat<3, 3, int64, mediump> mediump_i64mat3x3; + typedef mat<3, 4, int64, mediump> mediump_i64mat3x4; + typedef mat<4, 2, int64, mediump> mediump_i64mat4x2; + typedef mat<4, 3, int64, mediump> mediump_i64mat4x3; + typedef mat<4, 4, int64, mediump> mediump_i64mat4x4; + + typedef mat<2, 2, int64, highp> highp_i64mat2x2; + typedef mat<2, 3, int64, highp> highp_i64mat2x3; + typedef mat<2, 4, int64, highp> highp_i64mat2x4; + typedef mat<3, 2, int64, highp> highp_i64mat3x2; + typedef mat<3, 3, int64, highp> highp_i64mat3x3; + typedef mat<3, 4, int64, highp> highp_i64mat3x4; + typedef mat<4, 2, int64, highp> highp_i64mat4x2; + typedef mat<4, 3, int64, highp> highp_i64mat4x3; + typedef mat<4, 4, int64, highp> highp_i64mat4x4; + + typedef mat<2, 2, int64, defaultp> i64mat2x2; + typedef mat<2, 3, int64, defaultp> i64mat2x3; + typedef mat<2, 4, int64, defaultp> i64mat2x4; + typedef mat<3, 2, int64, defaultp> i64mat3x2; + typedef mat<3, 3, int64, defaultp> i64mat3x3; + typedef mat<3, 4, int64, defaultp> i64mat3x4; + typedef mat<4, 2, int64, defaultp> i64mat4x2; + typedef mat<4, 3, int64, defaultp> i64mat4x3; + typedef mat<4, 4, int64, defaultp> i64mat4x4; + + + // Unsigned integer matrix MxN + + typedef mat<2, 2, uint, lowp> lowp_umat2x2; + typedef mat<2, 3, uint, lowp> lowp_umat2x3; + typedef mat<2, 4, uint, lowp> lowp_umat2x4; + typedef mat<3, 2, uint, lowp> lowp_umat3x2; + typedef mat<3, 3, uint, lowp> lowp_umat3x3; + typedef mat<3, 4, uint, lowp> lowp_umat3x4; + typedef mat<4, 2, uint, lowp> lowp_umat4x2; + typedef mat<4, 3, uint, lowp> lowp_umat4x3; + typedef mat<4, 4, uint, lowp> lowp_umat4x4; + + typedef mat<2, 2, uint, mediump> mediump_umat2x2; + typedef mat<2, 3, uint, mediump> mediump_umat2x3; + typedef mat<2, 4, uint, mediump> mediump_umat2x4; + typedef mat<3, 2, uint, mediump> mediump_umat3x2; + typedef mat<3, 3, uint, mediump> mediump_umat3x3; + typedef mat<3, 4, uint, mediump> mediump_umat3x4; + typedef mat<4, 2, uint, mediump> mediump_umat4x2; + typedef mat<4, 3, uint, mediump> mediump_umat4x3; + typedef mat<4, 4, uint, mediump> mediump_umat4x4; + + typedef mat<2, 2, uint, highp> highp_umat2x2; + typedef mat<2, 3, uint, highp> highp_umat2x3; + typedef mat<2, 4, uint, highp> highp_umat2x4; + typedef mat<3, 2, uint, highp> highp_umat3x2; + typedef mat<3, 3, uint, highp> highp_umat3x3; + typedef mat<3, 4, uint, highp> highp_umat3x4; + typedef mat<4, 2, uint, highp> highp_umat4x2; + typedef mat<4, 3, uint, highp> highp_umat4x3; + typedef mat<4, 4, uint, highp> highp_umat4x4; + + typedef mat<2, 2, uint, defaultp> umat2x2; + typedef mat<2, 3, uint, defaultp> umat2x3; + typedef mat<2, 4, uint, defaultp> umat2x4; + typedef mat<3, 2, uint, defaultp> umat3x2; + typedef mat<3, 3, uint, defaultp> umat3x3; + typedef mat<3, 4, uint, defaultp> umat3x4; + typedef mat<4, 2, uint, defaultp> umat4x2; + typedef mat<4, 3, uint, defaultp> umat4x3; + typedef mat<4, 4, uint, defaultp> umat4x4; + + + typedef mat<2, 2, uint8, lowp> lowp_u8mat2x2; + typedef mat<2, 3, uint8, lowp> lowp_u8mat2x3; + typedef mat<2, 4, uint8, lowp> lowp_u8mat2x4; + typedef mat<3, 2, uint8, lowp> lowp_u8mat3x2; + typedef mat<3, 3, uint8, lowp> lowp_u8mat3x3; + typedef mat<3, 4, uint8, lowp> lowp_u8mat3x4; + typedef mat<4, 2, uint8, lowp> lowp_u8mat4x2; + typedef mat<4, 3, uint8, lowp> lowp_u8mat4x3; + typedef mat<4, 4, uint8, lowp> lowp_u8mat4x4; + + typedef mat<2, 2, uint8, mediump> mediump_u8mat2x2; + typedef mat<2, 3, uint8, mediump> mediump_u8mat2x3; + typedef mat<2, 4, uint8, mediump> mediump_u8mat2x4; + typedef mat<3, 2, uint8, mediump> mediump_u8mat3x2; + typedef mat<3, 3, uint8, mediump> mediump_u8mat3x3; + typedef mat<3, 4, uint8, mediump> mediump_u8mat3x4; + typedef mat<4, 2, uint8, mediump> mediump_u8mat4x2; + typedef mat<4, 3, uint8, mediump> mediump_u8mat4x3; + typedef mat<4, 4, uint8, mediump> mediump_u8mat4x4; + + typedef mat<2, 2, uint8, highp> highp_u8mat2x2; + typedef mat<2, 3, uint8, highp> highp_u8mat2x3; + typedef mat<2, 4, uint8, highp> highp_u8mat2x4; + typedef mat<3, 2, uint8, highp> highp_u8mat3x2; + typedef mat<3, 3, uint8, highp> highp_u8mat3x3; + typedef mat<3, 4, uint8, highp> highp_u8mat3x4; + typedef mat<4, 2, uint8, highp> highp_u8mat4x2; + typedef mat<4, 3, uint8, highp> highp_u8mat4x3; + typedef mat<4, 4, uint8, highp> highp_u8mat4x4; + + typedef mat<2, 2, uint8, defaultp> u8mat2x2; + typedef mat<2, 3, uint8, defaultp> u8mat2x3; + typedef mat<2, 4, uint8, defaultp> u8mat2x4; + typedef mat<3, 2, uint8, defaultp> u8mat3x2; + typedef mat<3, 3, uint8, defaultp> u8mat3x3; + typedef mat<3, 4, uint8, defaultp> u8mat3x4; + typedef mat<4, 2, uint8, defaultp> u8mat4x2; + typedef mat<4, 3, uint8, defaultp> u8mat4x3; + typedef mat<4, 4, uint8, defaultp> u8mat4x4; + + + typedef mat<2, 2, uint16, lowp> lowp_u16mat2x2; + typedef mat<2, 3, uint16, lowp> lowp_u16mat2x3; + typedef mat<2, 4, uint16, lowp> lowp_u16mat2x4; + typedef mat<3, 2, uint16, lowp> lowp_u16mat3x2; + typedef mat<3, 3, uint16, lowp> lowp_u16mat3x3; + typedef mat<3, 4, uint16, lowp> lowp_u16mat3x4; + typedef mat<4, 2, uint16, lowp> lowp_u16mat4x2; + typedef mat<4, 3, uint16, lowp> lowp_u16mat4x3; + typedef mat<4, 4, uint16, lowp> lowp_u16mat4x4; + + typedef mat<2, 2, uint16, mediump> mediump_u16mat2x2; + typedef mat<2, 3, uint16, mediump> mediump_u16mat2x3; + typedef mat<2, 4, uint16, mediump> mediump_u16mat2x4; + typedef mat<3, 2, uint16, mediump> mediump_u16mat3x2; + typedef mat<3, 3, uint16, mediump> mediump_u16mat3x3; + typedef mat<3, 4, uint16, mediump> mediump_u16mat3x4; + typedef mat<4, 2, uint16, mediump> mediump_u16mat4x2; + typedef mat<4, 3, uint16, mediump> mediump_u16mat4x3; + typedef mat<4, 4, uint16, mediump> mediump_u16mat4x4; + + typedef mat<2, 2, uint16, highp> highp_u16mat2x2; + typedef mat<2, 3, uint16, highp> highp_u16mat2x3; + typedef mat<2, 4, uint16, highp> highp_u16mat2x4; + typedef mat<3, 2, uint16, highp> highp_u16mat3x2; + typedef mat<3, 3, uint16, highp> highp_u16mat3x3; + typedef mat<3, 4, uint16, highp> highp_u16mat3x4; + typedef mat<4, 2, uint16, highp> highp_u16mat4x2; + typedef mat<4, 3, uint16, highp> highp_u16mat4x3; + typedef mat<4, 4, uint16, highp> highp_u16mat4x4; + + typedef mat<2, 2, uint16, defaultp> u16mat2x2; + typedef mat<2, 3, uint16, defaultp> u16mat2x3; + typedef mat<2, 4, uint16, defaultp> u16mat2x4; + typedef mat<3, 2, uint16, defaultp> u16mat3x2; + typedef mat<3, 3, uint16, defaultp> u16mat3x3; + typedef mat<3, 4, uint16, defaultp> u16mat3x4; + typedef mat<4, 2, uint16, defaultp> u16mat4x2; + typedef mat<4, 3, uint16, defaultp> u16mat4x3; + typedef mat<4, 4, uint16, defaultp> u16mat4x4; + + + typedef mat<2, 2, uint32, lowp> lowp_u32mat2x2; + typedef mat<2, 3, uint32, lowp> lowp_u32mat2x3; + typedef mat<2, 4, uint32, lowp> lowp_u32mat2x4; + typedef mat<3, 2, uint32, lowp> lowp_u32mat3x2; + typedef mat<3, 3, uint32, lowp> lowp_u32mat3x3; + typedef mat<3, 4, uint32, lowp> lowp_u32mat3x4; + typedef mat<4, 2, uint32, lowp> lowp_u32mat4x2; + typedef mat<4, 3, uint32, lowp> lowp_u32mat4x3; + typedef mat<4, 4, uint32, lowp> lowp_u32mat4x4; + + typedef mat<2, 2, uint32, mediump> mediump_u32mat2x2; + typedef mat<2, 3, uint32, mediump> mediump_u32mat2x3; + typedef mat<2, 4, uint32, mediump> mediump_u32mat2x4; + typedef mat<3, 2, uint32, mediump> mediump_u32mat3x2; + typedef mat<3, 3, uint32, mediump> mediump_u32mat3x3; + typedef mat<3, 4, uint32, mediump> mediump_u32mat3x4; + typedef mat<4, 2, uint32, mediump> mediump_u32mat4x2; + typedef mat<4, 3, uint32, mediump> mediump_u32mat4x3; + typedef mat<4, 4, uint32, mediump> mediump_u32mat4x4; + + typedef mat<2, 2, uint32, highp> highp_u32mat2x2; + typedef mat<2, 3, uint32, highp> highp_u32mat2x3; + typedef mat<2, 4, uint32, highp> highp_u32mat2x4; + typedef mat<3, 2, uint32, highp> highp_u32mat3x2; + typedef mat<3, 3, uint32, highp> highp_u32mat3x3; + typedef mat<3, 4, uint32, highp> highp_u32mat3x4; + typedef mat<4, 2, uint32, highp> highp_u32mat4x2; + typedef mat<4, 3, uint32, highp> highp_u32mat4x3; + typedef mat<4, 4, uint32, highp> highp_u32mat4x4; + + typedef mat<2, 2, uint32, defaultp> u32mat2x2; + typedef mat<2, 3, uint32, defaultp> u32mat2x3; + typedef mat<2, 4, uint32, defaultp> u32mat2x4; + typedef mat<3, 2, uint32, defaultp> u32mat3x2; + typedef mat<3, 3, uint32, defaultp> u32mat3x3; + typedef mat<3, 4, uint32, defaultp> u32mat3x4; + typedef mat<4, 2, uint32, defaultp> u32mat4x2; + typedef mat<4, 3, uint32, defaultp> u32mat4x3; + typedef mat<4, 4, uint32, defaultp> u32mat4x4; + + + typedef mat<2, 2, uint64, lowp> lowp_u64mat2x2; + typedef mat<2, 3, uint64, lowp> lowp_u64mat2x3; + typedef mat<2, 4, uint64, lowp> lowp_u64mat2x4; + typedef mat<3, 2, uint64, lowp> lowp_u64mat3x2; + typedef mat<3, 3, uint64, lowp> lowp_u64mat3x3; + typedef mat<3, 4, uint64, lowp> lowp_u64mat3x4; + typedef mat<4, 2, uint64, lowp> lowp_u64mat4x2; + typedef mat<4, 3, uint64, lowp> lowp_u64mat4x3; + typedef mat<4, 4, uint64, lowp> lowp_u64mat4x4; + + typedef mat<2, 2, uint64, mediump> mediump_u64mat2x2; + typedef mat<2, 3, uint64, mediump> mediump_u64mat2x3; + typedef mat<2, 4, uint64, mediump> mediump_u64mat2x4; + typedef mat<3, 2, uint64, mediump> mediump_u64mat3x2; + typedef mat<3, 3, uint64, mediump> mediump_u64mat3x3; + typedef mat<3, 4, uint64, mediump> mediump_u64mat3x4; + typedef mat<4, 2, uint64, mediump> mediump_u64mat4x2; + typedef mat<4, 3, uint64, mediump> mediump_u64mat4x3; + typedef mat<4, 4, uint64, mediump> mediump_u64mat4x4; + + typedef mat<2, 2, uint64, highp> highp_u64mat2x2; + typedef mat<2, 3, uint64, highp> highp_u64mat2x3; + typedef mat<2, 4, uint64, highp> highp_u64mat2x4; + typedef mat<3, 2, uint64, highp> highp_u64mat3x2; + typedef mat<3, 3, uint64, highp> highp_u64mat3x3; + typedef mat<3, 4, uint64, highp> highp_u64mat3x4; + typedef mat<4, 2, uint64, highp> highp_u64mat4x2; + typedef mat<4, 3, uint64, highp> highp_u64mat4x3; + typedef mat<4, 4, uint64, highp> highp_u64mat4x4; + + typedef mat<2, 2, uint64, defaultp> u64mat2x2; + typedef mat<2, 3, uint64, defaultp> u64mat2x3; + typedef mat<2, 4, uint64, defaultp> u64mat2x4; + typedef mat<3, 2, uint64, defaultp> u64mat3x2; + typedef mat<3, 3, uint64, defaultp> u64mat3x3; + typedef mat<3, 4, uint64, defaultp> u64mat3x4; + typedef mat<4, 2, uint64, defaultp> u64mat4x2; + typedef mat<4, 3, uint64, defaultp> u64mat4x3; + typedef mat<4, 4, uint64, defaultp> u64mat4x4; + + // Quaternion + + typedef qua lowp_quat; + typedef qua mediump_quat; + typedef qua highp_quat; + typedef qua quat; + + typedef qua lowp_fquat; + typedef qua mediump_fquat; + typedef qua highp_fquat; + typedef qua fquat; + + typedef qua lowp_f32quat; + typedef qua mediump_f32quat; + typedef qua highp_f32quat; + typedef qua f32quat; + + typedef qua lowp_dquat; + typedef qua mediump_dquat; + typedef qua highp_dquat; + typedef qua dquat; + + typedef qua lowp_f64quat; + typedef qua mediump_f64quat; + typedef qua highp_f64quat; + typedef qua f64quat; +}//namespace glm + + diff --git a/includes/glm/geometric.hpp b/includes/glm/geometric.hpp new file mode 100644 index 0000000..ac857e6 --- /dev/null +++ b/includes/glm/geometric.hpp @@ -0,0 +1,116 @@ +/// @ref core +/// @file glm/geometric.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions +/// +/// @defgroup core_func_geometric Geometric functions +/// @ingroup core +/// +/// These operate on vectors as vectors, not component-wise. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/type_vec3.hpp" + +namespace glm +{ + /// @addtogroup core_func_geometric + /// @{ + + /// Returns the length of x, i.e., sqrt(x * x). + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL length man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL T length(vec const& x); + + /// Returns the distance between p0 and p1, i.e., length(p0 - p1). + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL distance man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL T distance(vec const& p0, vec const& p1); + + /// Returns the dot product of x and y, i.e., result = x * y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL dot man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR T dot(vec const& x, vec const& y); + + /// Returns the cross product of x and y. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL cross man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + /// Returns a vector in the same direction as x but with length of 1. + /// According to issue 10 GLSL 1.10 specification, if length(x) == 0 then result is undefined and generate an error. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL normalize man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec normalize(vec const& x); + + /// If dot(Nref, I) < 0.0, return N, otherwise, return -N. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL faceforward man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec faceforward( + vec const& N, + vec const& I, + vec const& Nref); + + /// For the incident vector I and surface orientation N, + /// returns the reflection direction : result = I - 2.0 * dot(N, I) * N. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL reflect man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec reflect( + vec const& I, + vec const& N); + + /// For the incident vector I and surface normal N, + /// and the ratio of indices of refraction eta, + /// return the refraction vector. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Floating-point scalar types. + /// + /// @see GLSL refract man page + /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions + template + GLM_FUNC_DECL vec refract( + vec const& I, + vec const& N, + T eta); + + /// @} +}//namespace glm + +#include "detail/func_geometric.inl" diff --git a/includes/glm/glm.cppm b/includes/glm/glm.cppm new file mode 100644 index 0000000..85e946e --- /dev/null +++ b/includes/glm/glm.cppm @@ -0,0 +1,2675 @@ +module; + +// #define GLM_GTC_INLINE_NAMESPACE to inline glm::gtc into glm +// #define GLM_EXT_INLINE_NAMESPACE to inline glm::ext into glm +// #define GLM_GTX_INLINE_NAMESPACE to inline glm::gtx into glm + +#include +#include + +export module glm; + +export namespace glm { + // Base types + using glm::qualifier; + using glm::precision; + using glm::vec; + using glm::mat; + using glm::qua; +# if GLM_HAS_TEMPLATE_ALIASES + using glm::tvec1; + using glm::tvec2; + using glm::tvec3; + using glm::tvec4; + using glm::tmat2x2; + using glm::tmat2x3; + using glm::tmat2x4; + using glm::tmat3x2; + using glm::tmat3x3; + using glm::tmat3x4; + using glm::tmat4x2; + using glm::tmat4x3; + using glm::tmat4x4; + using glm::tquat; +# endif + + using glm::int8; + using glm::int16; + using glm::int32; + using glm::int64; + using glm::uint8; + using glm::uint16; + using glm::uint32; + using glm::uint64; + using glm::lowp_i8; + using glm::mediump_i8; + using glm::highp_i8; + using glm::i8; + using glm::lowp_int8; + using glm::mediump_int8; + using glm::highp_int8; + using glm::lowp_int8_t; + using glm::mediump_int8_t; + using glm::highp_int8_t; + using glm::int8_t; + using glm::lowp_i16; + using glm::mediump_i16; + using glm::highp_i16; + using glm::i16; + using glm::lowp_int16; + using glm::mediump_int16; + using glm::highp_int16; + using glm::lowp_int16_t; + using glm::mediump_int16_t; + using glm::highp_int16_t; + using glm::int16_t; + using glm::lowp_i32; + using glm::mediump_i32; + using glm::highp_i32; + using glm::i32; + using glm::lowp_int32; + using glm::mediump_int32; + using glm::highp_int32; + using glm::lowp_int32_t; + using glm::mediump_int32_t; + using glm::highp_int32_t; + using glm::int32_t; + using glm::lowp_i64; + using glm::mediump_i64; + using glm::highp_i64; + using glm::i64; + using glm::lowp_int64; + using glm::mediump_int64; + using glm::highp_int64; + using glm::lowp_int64_t; + using glm::mediump_int64_t; + using glm::highp_int64_t; + using glm::int64_t; + using glm::uint; + using glm::lowp_u8; + using glm::mediump_u8; + using glm::highp_u8; + using glm::u8; + using glm::lowp_uint8; + using glm::mediump_uint8; + using glm::highp_uint8; + using glm::lowp_uint8_t; + using glm::mediump_uint8_t; + using glm::highp_uint8_t; + using glm::uint8_t; + using glm::lowp_u16; + using glm::mediump_u16; + using glm::highp_u16; + using glm::u16; + using glm::lowp_uint16; + using glm::mediump_uint16; + using glm::highp_uint16; + using glm::lowp_uint16_t; + using glm::mediump_uint16_t; + using glm::highp_uint16_t; + using glm::uint16_t; + using glm::lowp_u32; + using glm::mediump_u32; + using glm::highp_u32; + using glm::u32; + using glm::lowp_uint32; + using glm::mediump_uint32; + using glm::highp_uint32; + using glm::lowp_uint32_t; + using glm::mediump_uint32_t; + using glm::highp_uint32_t; + using glm::uint32_t; + using glm::lowp_u64; + using glm::mediump_u64; + using glm::highp_u64; + using glm::u64; + using glm::lowp_uint64; + using glm::mediump_uint64; + using glm::highp_uint64; + using glm::lowp_uint64_t; + using glm::mediump_uint64_t; + using glm::highp_uint64_t; + using glm::uint64_t; + using glm::lowp_f32; + using glm::mediump_f32; + using glm::highp_f32; + using glm::f32; + using glm::lowp_float32; + using glm::mediump_float32; + using glm::highp_float32; + using glm::float32; + using glm::lowp_float32_t; + using glm::mediump_float32_t; + using glm::highp_float32_t; + using glm::float32_t; + using glm::lowp_f64; + using glm::mediump_f64; + using glm::highp_f64; + using glm::f64; + using glm::lowp_float64; + using glm::mediump_float64; + using glm::highp_float64; + using glm::float64; + using glm::lowp_float64_t; + using glm::mediump_float64_t; + using glm::highp_float64_t; + using glm::float64_t; + using glm::lowp_bvec1; + using glm::lowp_bvec2; + using glm::lowp_bvec3; + using glm::lowp_bvec4; + using glm::mediump_bvec1; + using glm::mediump_bvec2; + using glm::mediump_bvec3; + using glm::mediump_bvec4; + using glm::highp_bvec1; + using glm::highp_bvec2; + using glm::highp_bvec3; + using glm::highp_bvec4; + using glm::bvec1; + using glm::bvec2; + using glm::bvec3; + using glm::bvec4; + using glm::lowp_ivec1; + using glm::lowp_ivec2; + using glm::lowp_ivec3; + using glm::lowp_ivec4; + using glm::mediump_ivec1; + using glm::mediump_ivec2; + using glm::mediump_ivec3; + using glm::mediump_ivec4; + using glm::highp_ivec1; + using glm::highp_ivec2; + using glm::highp_ivec3; + using glm::highp_ivec4; + using glm::ivec1; + using glm::ivec2; + using glm::ivec3; + using glm::ivec4; + using glm::lowp_i8vec1; + using glm::lowp_i8vec2; + using glm::lowp_i8vec3; + using glm::lowp_i8vec4; + using glm::mediump_i8vec1; + using glm::mediump_i8vec2; + using glm::mediump_i8vec3; + using glm::mediump_i8vec4; + using glm::highp_i8vec1; + using glm::highp_i8vec2; + using glm::highp_i8vec3; + using glm::highp_i8vec4; + using glm::i8vec1; + using glm::i8vec2; + using glm::i8vec3; + using glm::i8vec4; + using glm::lowp_i16vec1; + using glm::lowp_i16vec2; + using glm::lowp_i16vec3; + using glm::lowp_i16vec4; + using glm::mediump_i16vec1; + using glm::mediump_i16vec2; + using glm::mediump_i16vec3; + using glm::mediump_i16vec4; + using glm::highp_i16vec1; + using glm::highp_i16vec2; + using glm::highp_i16vec3; + using glm::highp_i16vec4; + using glm::i16vec1; + using glm::i16vec2; + using glm::i16vec3; + using glm::i16vec4; + using glm::lowp_i32vec1; + using glm::lowp_i32vec2; + using glm::lowp_i32vec3; + using glm::lowp_i32vec4; + using glm::mediump_i32vec1; + using glm::mediump_i32vec2; + using glm::mediump_i32vec3; + using glm::mediump_i32vec4; + using glm::highp_i32vec1; + using glm::highp_i32vec2; + using glm::highp_i32vec3; + using glm::highp_i32vec4; + using glm::i32vec1; + using glm::i32vec2; + using glm::i32vec3; + using glm::i32vec4; + using glm::lowp_i64vec1; + using glm::lowp_i64vec2; + using glm::lowp_i64vec3; + using glm::lowp_i64vec4; + using glm::mediump_i64vec1; + using glm::mediump_i64vec2; + using glm::mediump_i64vec3; + using glm::mediump_i64vec4; + using glm::highp_i64vec1; + using glm::highp_i64vec2; + using glm::highp_i64vec3; + using glm::highp_i64vec4; + using glm::i64vec1; + using glm::i64vec2; + using glm::i64vec3; + using glm::i64vec4; + using glm::lowp_uvec1; + using glm::lowp_uvec2; + using glm::lowp_uvec3; + using glm::lowp_uvec4; + using glm::mediump_uvec1; + using glm::mediump_uvec2; + using glm::mediump_uvec3; + using glm::mediump_uvec4; + using glm::highp_uvec1; + using glm::highp_uvec2; + using glm::highp_uvec3; + using glm::highp_uvec4; + using glm::uvec1; + using glm::uvec2; + using glm::uvec3; + using glm::uvec4; + using glm::lowp_u8vec1; + using glm::lowp_u8vec2; + using glm::lowp_u8vec3; + using glm::lowp_u8vec4; + using glm::mediump_u8vec1; + using glm::mediump_u8vec2; + using glm::mediump_u8vec3; + using glm::mediump_u8vec4; + using glm::highp_u8vec1; + using glm::highp_u8vec2; + using glm::highp_u8vec3; + using glm::highp_u8vec4; + using glm::u8vec1; + using glm::u8vec2; + using glm::u8vec3; + using glm::u8vec4; + using glm::lowp_u16vec1; + using glm::lowp_u16vec2; + using glm::lowp_u16vec3; + using glm::lowp_u16vec4; + using glm::mediump_u16vec1; + using glm::mediump_u16vec2; + using glm::mediump_u16vec3; + using glm::mediump_u16vec4; + using glm::highp_u16vec1; + using glm::highp_u16vec2; + using glm::highp_u16vec3; + using glm::highp_u16vec4; + using glm::u16vec1; + using glm::u16vec2; + using glm::u16vec3; + using glm::u16vec4; + using glm::lowp_u32vec1; + using glm::lowp_u32vec2; + using glm::lowp_u32vec3; + using glm::lowp_u32vec4; + using glm::mediump_u32vec1; + using glm::mediump_u32vec2; + using glm::mediump_u32vec3; + using glm::mediump_u32vec4; + using glm::highp_u32vec1; + using glm::highp_u32vec2; + using glm::highp_u32vec3; + using glm::highp_u32vec4; + using glm::u32vec1; + using glm::u32vec2; + using glm::u32vec3; + using glm::u32vec4; + using glm::lowp_u64vec1; + using glm::lowp_u64vec2; + using glm::lowp_u64vec3; + using glm::lowp_u64vec4; + using glm::mediump_u64vec1; + using glm::mediump_u64vec2; + using glm::mediump_u64vec3; + using glm::mediump_u64vec4; + using glm::highp_u64vec1; + using glm::highp_u64vec2; + using glm::highp_u64vec3; + using glm::highp_u64vec4; + using glm::u64vec1; + using glm::u64vec2; + using glm::u64vec3; + using glm::u64vec4; + using glm::lowp_vec1; + using glm::lowp_vec2; + using glm::lowp_vec3; + using glm::lowp_vec4; + using glm::mediump_vec1; + using glm::mediump_vec2; + using glm::mediump_vec3; + using glm::mediump_vec4; + using glm::highp_vec1; + using glm::highp_vec2; + using glm::highp_vec3; + using glm::highp_vec4; + using glm::vec1; + using glm::vec2; + using glm::vec3; + using glm::vec4; + using glm::lowp_fvec1; + using glm::lowp_fvec2; + using glm::lowp_fvec3; + using glm::lowp_fvec4; + using glm::mediump_fvec1; + using glm::mediump_fvec2; + using glm::mediump_fvec3; + using glm::mediump_fvec4; + using glm::highp_fvec1; + using glm::highp_fvec2; + using glm::highp_fvec3; + using glm::highp_fvec4; + using glm::fvec1; + using glm::fvec2; + using glm::fvec3; + using glm::fvec4; + using glm::lowp_f32vec1; + using glm::lowp_f32vec2; + using glm::lowp_f32vec3; + using glm::lowp_f32vec4; + using glm::mediump_f32vec1; + using glm::mediump_f32vec2; + using glm::mediump_f32vec3; + using glm::mediump_f32vec4; + using glm::highp_f32vec1; + using glm::highp_f32vec2; + using glm::highp_f32vec3; + using glm::highp_f32vec4; + using glm::f32vec1; + using glm::f32vec2; + using glm::f32vec3; + using glm::f32vec4; + using glm::lowp_dvec1; + using glm::lowp_dvec2; + using glm::lowp_dvec3; + using glm::lowp_dvec4; + using glm::mediump_dvec1; + using glm::mediump_dvec2; + using glm::mediump_dvec3; + using glm::mediump_dvec4; + using glm::highp_dvec1; + using glm::highp_dvec2; + using glm::highp_dvec3; + using glm::highp_dvec4; + using glm::dvec1; + using glm::dvec2; + using glm::dvec3; + using glm::dvec4; + using glm::lowp_f64vec1; + using glm::lowp_f64vec2; + using glm::lowp_f64vec3; + using glm::lowp_f64vec4; + using glm::mediump_f64vec1; + using glm::mediump_f64vec2; + using glm::mediump_f64vec3; + using glm::mediump_f64vec4; + using glm::highp_f64vec1; + using glm::highp_f64vec2; + using glm::highp_f64vec3; + using glm::highp_f64vec4; + using glm::f64vec1; + using glm::f64vec2; + using glm::f64vec3; + using glm::f64vec4; + using glm::lowp_mat2; + using glm::lowp_mat3; + using glm::lowp_mat4; + using glm::mediump_mat2; + using glm::mediump_mat3; + using glm::mediump_mat4; + using glm::highp_mat2; + using glm::highp_mat3; + using glm::highp_mat4; + using glm::mat2; + using glm::mat3; + using glm::mat4; + using glm::lowp_fmat2; + using glm::lowp_fmat3; + using glm::lowp_fmat4; + using glm::mediump_fmat2; + using glm::mediump_fmat3; + using glm::mediump_fmat4; + using glm::highp_fmat2; + using glm::highp_fmat3; + using glm::highp_fmat4; + using glm::fmat2; + using glm::fmat3; + using glm::fmat4; + using glm::lowp_f32mat2; + using glm::lowp_f32mat3; + using glm::lowp_f32mat4; + using glm::mediump_f32mat2; + using glm::mediump_f32mat3; + using glm::mediump_f32mat4; + using glm::highp_f32mat2; + using glm::highp_f32mat3; + using glm::highp_f32mat4; + using glm::f32mat2; + using glm::f32mat3; + using glm::f32mat4; + using glm::lowp_dmat2; + using glm::lowp_dmat3; + using glm::lowp_dmat4; + using glm::mediump_dmat2; + using glm::mediump_dmat3; + using glm::mediump_dmat4; + using glm::highp_dmat2; + using glm::highp_dmat3; + using glm::highp_dmat4; + using glm::dmat2; + using glm::dmat3; + using glm::dmat4; + using glm::lowp_f64mat2; + using glm::lowp_f64mat3; + using glm::lowp_f64mat4; + using glm::mediump_f64mat2; + using glm::mediump_f64mat3; + using glm::mediump_f64mat4; + using glm::highp_f64mat2; + using glm::highp_f64mat3; + using glm::highp_f64mat4; + using glm::f64mat2; + using glm::f64mat3; + using glm::f64mat4; + using glm::lowp_mat2x2; + using glm::lowp_mat2x3; + using glm::lowp_mat2x4; + using glm::lowp_mat3x2; + using glm::lowp_mat3x3; + using glm::lowp_mat3x4; + using glm::lowp_mat4x2; + using glm::lowp_mat4x3; + using glm::lowp_mat4x4; + using glm::mediump_mat2x2; + using glm::mediump_mat2x3; + using glm::mediump_mat2x4; + using glm::mediump_mat3x2; + using glm::mediump_mat3x3; + using glm::mediump_mat3x4; + using glm::mediump_mat4x2; + using glm::mediump_mat4x3; + using glm::mediump_mat4x4; + using glm::highp_mat2x2; + using glm::highp_mat2x3; + using glm::highp_mat2x4; + using glm::highp_mat3x2; + using glm::highp_mat3x3; + using glm::highp_mat3x4; + using glm::highp_mat4x2; + using glm::highp_mat4x3; + using glm::highp_mat4x4; + using glm::mat2x2; + using glm::mat2x3; + using glm::mat2x4; + using glm::mat3x2; + using glm::mat3x3; + using glm::mat3x4; + using glm::mat4x2; + using glm::mat4x3; + using glm::mat4x4; + using glm::lowp_fmat2x2; + using glm::lowp_fmat2x3; + using glm::lowp_fmat2x4; + using glm::lowp_fmat3x2; + using glm::lowp_fmat3x3; + using glm::lowp_fmat3x4; + using glm::lowp_fmat4x2; + using glm::lowp_fmat4x3; + using glm::lowp_fmat4x4; + using glm::mediump_fmat2x2; + using glm::mediump_fmat2x3; + using glm::mediump_fmat2x4; + using glm::mediump_fmat3x2; + using glm::mediump_fmat3x3; + using glm::mediump_fmat3x4; + using glm::mediump_fmat4x2; + using glm::mediump_fmat4x3; + using glm::mediump_fmat4x4; + using glm::highp_fmat2x2; + using glm::highp_fmat2x3; + using glm::highp_fmat2x4; + using glm::highp_fmat3x2; + using glm::highp_fmat3x3; + using glm::highp_fmat3x4; + using glm::highp_fmat4x2; + using glm::highp_fmat4x3; + using glm::highp_fmat4x4; + using glm::fmat2x2; + using glm::fmat2x3; + using glm::fmat2x4; + using glm::fmat3x2; + using glm::fmat3x3; + using glm::fmat3x4; + using glm::fmat4x2; + using glm::fmat4x3; + using glm::fmat4x4; + using glm::lowp_f32mat2x2; + using glm::lowp_f32mat2x3; + using glm::lowp_f32mat2x4; + using glm::lowp_f32mat3x2; + using glm::lowp_f32mat3x3; + using glm::lowp_f32mat3x4; + using glm::lowp_f32mat4x2; + using glm::lowp_f32mat4x3; + using glm::lowp_f32mat4x4; + + using glm::mediump_f32mat2x2; + using glm::mediump_f32mat2x3; + using glm::mediump_f32mat2x4; + using glm::mediump_f32mat3x2; + using glm::mediump_f32mat3x3; + using glm::mediump_f32mat3x4; + using glm::mediump_f32mat4x2; + using glm::mediump_f32mat4x3; + using glm::mediump_f32mat4x4; + using glm::highp_f32mat2x2; + using glm::highp_f32mat2x3; + using glm::highp_f32mat2x4; + using glm::highp_f32mat3x2; + using glm::highp_f32mat3x3; + using glm::highp_f32mat3x4; + using glm::highp_f32mat4x2; + using glm::highp_f32mat4x3; + using glm::highp_f32mat4x4; + using glm::f32mat2x2; + using glm::f32mat2x3; + using glm::f32mat2x4; + using glm::f32mat3x2; + using glm::f32mat3x3; + using glm::f32mat3x4; + using glm::f32mat4x2; + using glm::f32mat4x3; + using glm::f32mat4x4; + using glm::lowp_dmat2x2; + using glm::lowp_dmat2x3; + using glm::lowp_dmat2x4; + using glm::lowp_dmat3x2; + using glm::lowp_dmat3x3; + using glm::lowp_dmat3x4; + using glm::lowp_dmat4x2; + using glm::lowp_dmat4x3; + using glm::lowp_dmat4x4; + using glm::mediump_dmat2x2; + using glm::mediump_dmat2x3; + using glm::mediump_dmat2x4; + using glm::mediump_dmat3x2; + using glm::mediump_dmat3x3; + using glm::mediump_dmat3x4; + using glm::mediump_dmat4x2; + using glm::mediump_dmat4x3; + using glm::mediump_dmat4x4; + using glm::highp_dmat2x2; + using glm::highp_dmat2x3; + using glm::highp_dmat2x4; + using glm::highp_dmat3x2; + using glm::highp_dmat3x3; + using glm::highp_dmat3x4; + using glm::highp_dmat4x2; + using glm::highp_dmat4x3; + using glm::highp_dmat4x4; + using glm::dmat2x2; + using glm::dmat2x3; + using glm::dmat2x4; + using glm::dmat3x2; + using glm::dmat3x3; + using glm::dmat3x4; + using glm::dmat4x2; + using glm::dmat4x3; + using glm::dmat4x4; + using glm::lowp_f64mat2x2; + using glm::lowp_f64mat2x3; + using glm::lowp_f64mat2x4; + using glm::lowp_f64mat3x2; + using glm::lowp_f64mat3x3; + using glm::lowp_f64mat3x4; + using glm::lowp_f64mat4x2; + using glm::lowp_f64mat4x3; + using glm::lowp_f64mat4x4; + using glm::mediump_f64mat2x2; + using glm::mediump_f64mat2x3; + using glm::mediump_f64mat2x4; + using glm::mediump_f64mat3x2; + using glm::mediump_f64mat3x3; + using glm::mediump_f64mat3x4; + using glm::mediump_f64mat4x2; + using glm::mediump_f64mat4x3; + using glm::mediump_f64mat4x4; + using glm::highp_f64mat2x2; + using glm::highp_f64mat2x3; + using glm::highp_f64mat2x4; + using glm::highp_f64mat3x2; + using glm::highp_f64mat3x3; + using glm::highp_f64mat3x4; + using glm::highp_f64mat4x2; + using glm::highp_f64mat4x3; + using glm::highp_f64mat4x4; + using glm::f64mat2x2; + using glm::f64mat2x3; + using glm::f64mat2x4; + using glm::f64mat3x2; + using glm::f64mat3x3; + using glm::f64mat3x4; + using glm::f64mat4x2; + using glm::f64mat4x3; + using glm::f64mat4x4; + using glm::lowp_imat2x2; + using glm::lowp_imat2x3; + using glm::lowp_imat2x4; + using glm::lowp_imat3x2; + using glm::lowp_imat3x3; + using glm::lowp_imat3x4; + using glm::lowp_imat4x2; + using glm::lowp_imat4x3; + using glm::lowp_imat4x4; + using glm::mediump_imat2x2; + using glm::mediump_imat2x3; + using glm::mediump_imat2x4; + using glm::mediump_imat3x2; + using glm::mediump_imat3x3; + using glm::mediump_imat3x4; + using glm::mediump_imat4x2; + using glm::mediump_imat4x3; + using glm::mediump_imat4x4; + using glm::highp_imat2x2; + using glm::highp_imat2x3; + using glm::highp_imat2x4; + using glm::highp_imat3x2; + using glm::highp_imat3x3; + using glm::highp_imat3x4; + using glm::highp_imat4x2; + using glm::highp_imat4x3; + using glm::highp_imat4x4; + using glm::imat2x2; + using glm::imat2x3; + using glm::imat2x4; + using glm::imat3x2; + using glm::imat3x3; + using glm::imat3x4; + using glm::imat4x2; + using glm::imat4x3; + using glm::imat4x4; + using glm::lowp_i8mat2x2; + using glm::lowp_i8mat2x3; + using glm::lowp_i8mat2x4; + using glm::lowp_i8mat3x2; + using glm::lowp_i8mat3x3; + using glm::lowp_i8mat3x4; + using glm::lowp_i8mat4x2; + using glm::lowp_i8mat4x3; + using glm::lowp_i8mat4x4; + using glm::mediump_i8mat2x2; + using glm::mediump_i8mat2x3; + using glm::mediump_i8mat2x4; + using glm::mediump_i8mat3x2; + using glm::mediump_i8mat3x3; + using glm::mediump_i8mat3x4; + using glm::mediump_i8mat4x2; + using glm::mediump_i8mat4x3; + using glm::mediump_i8mat4x4; + using glm::highp_i8mat2x2; + using glm::highp_i8mat2x3; + using glm::highp_i8mat2x4; + using glm::highp_i8mat3x2; + using glm::highp_i8mat3x3; + using glm::highp_i8mat3x4; + using glm::highp_i8mat4x2; + using glm::highp_i8mat4x3; + using glm::highp_i8mat4x4; + using glm::i8mat2x2; + using glm::i8mat2x3; + using glm::i8mat2x4; + using glm::i8mat3x2; + using glm::i8mat3x3; + using glm::i8mat3x4; + using glm::i8mat4x2; + using glm::i8mat4x3; + using glm::i8mat4x4; + using glm::lowp_i16mat2x2; + using glm::lowp_i16mat2x3; + using glm::lowp_i16mat2x4; + using glm::lowp_i16mat3x2; + using glm::lowp_i16mat3x3; + using glm::lowp_i16mat3x4; + using glm::lowp_i16mat4x2; + using glm::lowp_i16mat4x3; + using glm::lowp_i16mat4x4; + using glm::mediump_i16mat2x2; + using glm::mediump_i16mat2x3; + using glm::mediump_i16mat2x4; + using glm::mediump_i16mat3x2; + using glm::mediump_i16mat3x3; + using glm::mediump_i16mat3x4; + using glm::mediump_i16mat4x2; + using glm::mediump_i16mat4x3; + using glm::mediump_i16mat4x4; + using glm::highp_i16mat2x2; + using glm::highp_i16mat2x3; + using glm::highp_i16mat2x4; + using glm::highp_i16mat3x2; + using glm::highp_i16mat3x3; + using glm::highp_i16mat3x4; + using glm::highp_i16mat4x2; + using glm::highp_i16mat4x3; + using glm::highp_i16mat4x4; + using glm::i16mat2x2; + using glm::i16mat2x3; + using glm::i16mat2x4; + using glm::i16mat3x2; + using glm::i16mat3x3; + using glm::i16mat3x4; + using glm::i16mat4x2; + using glm::i16mat4x3; + using glm::i16mat4x4; + using glm::lowp_i32mat2x2; + using glm::lowp_i32mat2x3; + using glm::lowp_i32mat2x4; + using glm::lowp_i32mat3x2; + using glm::lowp_i32mat3x3; + using glm::lowp_i32mat3x4; + using glm::lowp_i32mat4x2; + using glm::lowp_i32mat4x3; + using glm::lowp_i32mat4x4; + using glm::mediump_i32mat2x2; + using glm::mediump_i32mat2x3; + using glm::mediump_i32mat2x4; + using glm::mediump_i32mat3x2; + using glm::mediump_i32mat3x3; + using glm::mediump_i32mat3x4; + using glm::mediump_i32mat4x2; + using glm::mediump_i32mat4x3; + using glm::mediump_i32mat4x4; + using glm::highp_i32mat2x2; + using glm::highp_i32mat2x3; + using glm::highp_i32mat2x4; + using glm::highp_i32mat3x2; + using glm::highp_i32mat3x3; + using glm::highp_i32mat3x4; + using glm::highp_i32mat4x2; + using glm::highp_i32mat4x3; + using glm::highp_i32mat4x4; + using glm::i32mat2x2; + using glm::i32mat2x3; + using glm::i32mat2x4; + using glm::i32mat3x2; + using glm::i32mat3x3; + using glm::i32mat3x4; + using glm::i32mat4x2; + using glm::i32mat4x3; + using glm::i32mat4x4; + using glm::lowp_i64mat2x2; + using glm::lowp_i64mat2x3; + using glm::lowp_i64mat2x4; + using glm::lowp_i64mat3x2; + using glm::lowp_i64mat3x3; + using glm::lowp_i64mat3x4; + using glm::lowp_i64mat4x2; + using glm::lowp_i64mat4x3; + using glm::lowp_i64mat4x4; + using glm::mediump_i64mat2x2; + using glm::mediump_i64mat2x3; + using glm::mediump_i64mat2x4; + using glm::mediump_i64mat3x2; + using glm::mediump_i64mat3x3; + using glm::mediump_i64mat3x4; + using glm::mediump_i64mat4x2; + using glm::mediump_i64mat4x3; + using glm::mediump_i64mat4x4; + using glm::highp_i64mat2x2; + using glm::highp_i64mat2x3; + using glm::highp_i64mat2x4; + using glm::highp_i64mat3x2; + using glm::highp_i64mat3x3; + using glm::highp_i64mat3x4; + using glm::highp_i64mat4x2; + using glm::highp_i64mat4x3; + using glm::highp_i64mat4x4; + using glm::i64mat2x2; + using glm::i64mat2x3; + using glm::i64mat2x4; + using glm::i64mat3x2; + using glm::i64mat3x3; + using glm::i64mat3x4; + using glm::i64mat4x2; + using glm::i64mat4x3; + using glm::i64mat4x4; + using glm::lowp_umat2x2; + using glm::lowp_umat2x3; + using glm::lowp_umat2x4; + using glm::lowp_umat3x2; + using glm::lowp_umat3x3; + using glm::lowp_umat3x4; + using glm::lowp_umat4x2; + using glm::lowp_umat4x3; + using glm::lowp_umat4x4; + using glm::mediump_umat2x2; + using glm::mediump_umat2x3; + using glm::mediump_umat2x4; + using glm::mediump_umat3x2; + using glm::mediump_umat3x3; + using glm::mediump_umat3x4; + using glm::mediump_umat4x2; + using glm::mediump_umat4x3; + using glm::mediump_umat4x4; + using glm::highp_umat2x2; + using glm::highp_umat2x3; + using glm::highp_umat2x4; + using glm::highp_umat3x2; + using glm::highp_umat3x3; + using glm::highp_umat3x4; + using glm::highp_umat4x2; + using glm::highp_umat4x3; + using glm::highp_umat4x4; + using glm::umat2x2; + using glm::umat2x3; + using glm::umat2x4; + using glm::umat3x2; + using glm::umat3x3; + using glm::umat3x4; + using glm::umat4x2; + using glm::umat4x3; + using glm::umat4x4; + using glm::lowp_u8mat2x2; + using glm::lowp_u8mat2x3; + using glm::lowp_u8mat2x4; + using glm::lowp_u8mat3x2; + using glm::lowp_u8mat3x3; + using glm::lowp_u8mat3x4; + using glm::lowp_u8mat4x2; + using glm::lowp_u8mat4x3; + using glm::lowp_u8mat4x4; + using glm::mediump_u8mat2x2; + using glm::mediump_u8mat2x3; + using glm::mediump_u8mat2x4; + using glm::mediump_u8mat3x2; + using glm::mediump_u8mat3x3; + using glm::mediump_u8mat3x4; + using glm::mediump_u8mat4x2; + using glm::mediump_u8mat4x3; + using glm::mediump_u8mat4x4; + using glm::highp_u8mat2x2; + using glm::highp_u8mat2x3; + using glm::highp_u8mat2x4; + using glm::highp_u8mat3x2; + using glm::highp_u8mat3x3; + using glm::highp_u8mat3x4; + using glm::highp_u8mat4x2; + using glm::highp_u8mat4x3; + using glm::highp_u8mat4x4; + using glm::u8mat2x2; + using glm::u8mat2x3; + using glm::u8mat2x4; + using glm::u8mat3x2; + using glm::u8mat3x3; + using glm::u8mat3x4; + using glm::u8mat4x2; + using glm::u8mat4x3; + using glm::u8mat4x4; + using glm::lowp_u16mat2x2; + using glm::lowp_u16mat2x3; + using glm::lowp_u16mat2x4; + using glm::lowp_u16mat3x2; + using glm::lowp_u16mat3x3; + using glm::lowp_u16mat3x4; + using glm::lowp_u16mat4x2; + using glm::lowp_u16mat4x3; + using glm::lowp_u16mat4x4; + using glm::mediump_u16mat2x2; + using glm::mediump_u16mat2x3; + using glm::mediump_u16mat2x4; + using glm::mediump_u16mat3x2; + using glm::mediump_u16mat3x3; + using glm::mediump_u16mat3x4; + using glm::mediump_u16mat4x2; + using glm::mediump_u16mat4x3; + using glm::mediump_u16mat4x4; + using glm::highp_u16mat2x2; + using glm::highp_u16mat2x3; + using glm::highp_u16mat2x4; + using glm::highp_u16mat3x2; + using glm::highp_u16mat3x3; + using glm::highp_u16mat3x4; + using glm::highp_u16mat4x2; + using glm::highp_u16mat4x3; + using glm::highp_u16mat4x4; + using glm::u16mat2x2; + using glm::u16mat2x3; + using glm::u16mat2x4; + using glm::u16mat3x2; + using glm::u16mat3x3; + using glm::u16mat3x4; + using glm::u16mat4x2; + using glm::u16mat4x3; + using glm::u16mat4x4; + using glm::lowp_u32mat2x2; + using glm::lowp_u32mat2x3; + using glm::lowp_u32mat2x4; + using glm::lowp_u32mat3x2; + using glm::lowp_u32mat3x3; + using glm::lowp_u32mat3x4; + using glm::lowp_u32mat4x2; + using glm::lowp_u32mat4x3; + using glm::lowp_u32mat4x4; + using glm::mediump_u32mat2x2; + using glm::mediump_u32mat2x3; + using glm::mediump_u32mat2x4; + using glm::mediump_u32mat3x2; + using glm::mediump_u32mat3x3; + using glm::mediump_u32mat3x4; + using glm::mediump_u32mat4x2; + using glm::mediump_u32mat4x3; + using glm::mediump_u32mat4x4; + using glm::highp_u32mat2x2; + using glm::highp_u32mat2x3; + using glm::highp_u32mat2x4; + using glm::highp_u32mat3x2; + using glm::highp_u32mat3x3; + using glm::highp_u32mat3x4; + using glm::highp_u32mat4x2; + using glm::highp_u32mat4x3; + using glm::highp_u32mat4x4; + using glm::u32mat2x2; + using glm::u32mat2x3; + using glm::u32mat2x4; + using glm::u32mat3x2; + using glm::u32mat3x3; + using glm::u32mat3x4; + using glm::u32mat4x2; + using glm::u32mat4x3; + using glm::u32mat4x4; + using glm::lowp_u64mat2x2; + using glm::lowp_u64mat2x3; + using glm::lowp_u64mat2x4; + using glm::lowp_u64mat3x2; + using glm::lowp_u64mat3x3; + using glm::lowp_u64mat3x4; + using glm::lowp_u64mat4x2; + using glm::lowp_u64mat4x3; + using glm::lowp_u64mat4x4; + using glm::mediump_u64mat2x2; + using glm::mediump_u64mat2x3; + using glm::mediump_u64mat2x4; + using glm::mediump_u64mat3x2; + using glm::mediump_u64mat3x3; + using glm::mediump_u64mat3x4; + using glm::mediump_u64mat4x2; + using glm::mediump_u64mat4x3; + using glm::mediump_u64mat4x4; + using glm::highp_u64mat2x2; + using glm::highp_u64mat2x3; + using glm::highp_u64mat2x4; + using glm::highp_u64mat3x2; + using glm::highp_u64mat3x3; + using glm::highp_u64mat3x4; + using glm::highp_u64mat4x2; + using glm::highp_u64mat4x3; + using glm::highp_u64mat4x4; + using glm::u64mat2x2; + using glm::u64mat2x3; + using glm::u64mat2x4; + using glm::u64mat3x2; + using glm::u64mat3x3; + using glm::u64mat3x4; + using glm::u64mat4x2; + using glm::u64mat4x3; + using glm::u64mat4x4; + using glm::lowp_quat; + using glm::mediump_quat; + using glm::highp_quat; + using glm::quat; + using glm::lowp_fquat; + using glm::mediump_fquat; + using glm::highp_fquat; + using glm::fquat; + using glm::lowp_f32quat; + using glm::mediump_f32quat; + using glm::highp_f32quat; + using glm::f32quat; + using glm::lowp_dquat; + using glm::mediump_dquat; + using glm::highp_dquat; + using glm::dquat; + using glm::lowp_f64quat; + using glm::mediump_f64quat; + using glm::highp_f64quat; + using glm::f64quat; + + // Operators + using glm::operator+; + using glm::operator-; + using glm::operator*; + using glm::operator/; + using glm::operator%; + using glm::operator^; + using glm::operator&; + using glm::operator|; + using glm::operator~; + using glm::operator<<; + using glm::operator>>; + using glm::operator==; + using glm::operator!=; + using glm::operator&&; + using glm::operator||; + + // Core functions + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::all; + using glm::any; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::bitCount; + using glm::bitfieldExtract; + using glm::bitfieldInsert; + using glm::bitfieldReverse; + using glm::ceil; + using glm::clamp; + using glm::cos; + using glm::cosh; + using glm::cross; + using glm::degrees; + using glm::determinant; + using glm::distance; + using glm::dot; + using glm::equal; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::findLSB; + using glm::findMSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floor; + using glm::fma; + using glm::fract; + using glm::frexp; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::imulExtended; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inversesqrt; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lessThan; + using glm::lessThanEqual; + using glm::log; + using glm::log2; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::outerProduct; + using glm::packDouble2x32; + using glm::packHalf2x16; + using glm::packSnorm2x16; + using glm::packSnorm4x8; + using glm::packUnorm2x16; + using glm::packUnorm4x8; + using glm::pow; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::round; + using glm::roundEven; + using glm::sign; + using glm::sin; + using glm::sinh; + using glm::smoothstep; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::transpose; + using glm::trunc; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unpackDouble2x32; + using glm::unpackHalf2x16; + using glm::unpackSnorm2x16; + using glm::unpackSnorm4x8; + using glm::unpackUnorm2x16; + using glm::unpackUnorm4x8; + using glm::usubBorrow; + +# ifdef GLM_GTC_INLINE_NAMESPACE + inline +# endif + namespace gtc { +# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE + using glm::aligned_highp_vec1; + using glm::aligned_mediump_vec1; + using glm::aligned_lowp_vec1; + using glm::aligned_highp_dvec1; + using glm::aligned_mediump_dvec1; + using glm::aligned_lowp_dvec1; + using glm::aligned_highp_ivec1; + using glm::aligned_mediump_ivec1; + using glm::aligned_lowp_ivec1; + using glm::aligned_highp_uvec1; + using glm::aligned_mediump_uvec1; + using glm::aligned_lowp_uvec1; + using glm::aligned_highp_bvec1; + using glm::aligned_mediump_bvec1; + using glm::aligned_lowp_bvec1; + using glm::packed_highp_vec1; + using glm::packed_mediump_vec1; + using glm::packed_lowp_vec1; + using glm::packed_highp_dvec1; + using glm::packed_mediump_dvec1; + using glm::packed_lowp_dvec1; + using glm::packed_highp_ivec1; + using glm::packed_mediump_ivec1; + using glm::packed_lowp_ivec1; + using glm::packed_highp_uvec1; + using glm::packed_mediump_uvec1; + using glm::packed_lowp_uvec1; + using glm::packed_highp_bvec1; + using glm::packed_mediump_bvec1; + using glm::packed_lowp_bvec1; + using glm::aligned_highp_vec2; + using glm::aligned_mediump_vec2; + using glm::aligned_lowp_vec2; + using glm::aligned_highp_dvec2; + using glm::aligned_mediump_dvec2; + using glm::aligned_lowp_dvec2; + using glm::aligned_highp_ivec2; + using glm::aligned_mediump_ivec2; + using glm::aligned_lowp_ivec2; + using glm::aligned_highp_uvec2; + using glm::aligned_mediump_uvec2; + using glm::aligned_lowp_uvec2; + using glm::aligned_highp_bvec2; + using glm::aligned_mediump_bvec2; + using glm::aligned_lowp_bvec2; + using glm::packed_highp_vec2; + using glm::packed_mediump_vec2; + using glm::packed_lowp_vec2; + using glm::packed_highp_dvec2; + using glm::packed_mediump_dvec2; + using glm::packed_lowp_dvec2; + using glm::packed_highp_ivec2; + using glm::packed_mediump_ivec2; + using glm::packed_lowp_ivec2; + using glm::packed_highp_uvec2; + using glm::packed_mediump_uvec2; + using glm::packed_lowp_uvec2; + using glm::packed_highp_bvec2; + using glm::packed_mediump_bvec2; + using glm::packed_lowp_bvec2; + using glm::aligned_highp_vec3; + using glm::aligned_mediump_vec3; + using glm::aligned_lowp_vec3; + using glm::aligned_highp_dvec3; + using glm::aligned_mediump_dvec3; + using glm::aligned_lowp_dvec3; + using glm::aligned_highp_ivec3; + using glm::aligned_mediump_ivec3; + using glm::aligned_lowp_ivec3; + using glm::aligned_highp_uvec3; + using glm::aligned_mediump_uvec3; + using glm::aligned_lowp_uvec3; + using glm::aligned_highp_bvec3; + using glm::aligned_mediump_bvec3; + using glm::aligned_lowp_bvec3; + using glm::packed_highp_vec3; + using glm::packed_mediump_vec3; + using glm::packed_lowp_vec3; + using glm::packed_highp_dvec3; + using glm::packed_mediump_dvec3; + using glm::packed_lowp_dvec3; + using glm::packed_highp_ivec3; + using glm::packed_mediump_ivec3; + using glm::packed_lowp_ivec3; + using glm::packed_highp_uvec3; + using glm::packed_mediump_uvec3; + using glm::packed_lowp_uvec3; + using glm::packed_highp_bvec3; + using glm::packed_mediump_bvec3; + using glm::packed_lowp_bvec3; + using glm::aligned_highp_vec4; + using glm::aligned_mediump_vec4; + using glm::aligned_lowp_vec4; + using glm::aligned_highp_dvec4; + using glm::aligned_mediump_dvec4; + using glm::aligned_lowp_dvec4; + using glm::aligned_highp_ivec4; + using glm::aligned_mediump_ivec4; + using glm::aligned_lowp_ivec4; + using glm::aligned_highp_uvec4; + using glm::aligned_mediump_uvec4; + using glm::aligned_lowp_uvec4; + using glm::aligned_highp_bvec4; + using glm::aligned_mediump_bvec4; + using glm::aligned_lowp_bvec4; + using glm::packed_highp_vec4; + using glm::packed_mediump_vec4; + using glm::packed_lowp_vec4; + using glm::packed_highp_dvec4; + using glm::packed_mediump_dvec4; + using glm::packed_lowp_dvec4; + using glm::packed_highp_ivec4; + using glm::packed_mediump_ivec4; + using glm::packed_lowp_ivec4; + using glm::packed_highp_uvec4; + using glm::packed_mediump_uvec4; + using glm::packed_lowp_uvec4; + using glm::packed_highp_bvec4; + using glm::packed_mediump_bvec4; + using glm::packed_lowp_bvec4; + using glm::aligned_highp_mat2; + using glm::aligned_mediump_mat2; + using glm::aligned_lowp_mat2; + using glm::aligned_highp_dmat2; + using glm::aligned_mediump_dmat2; + using glm::aligned_lowp_dmat2; + using glm::packed_highp_mat2; + using glm::packed_mediump_mat2; + using glm::packed_lowp_mat2; + using glm::packed_highp_dmat2; + using glm::packed_mediump_dmat2; + using glm::packed_lowp_dmat2; + using glm::aligned_highp_mat3; + using glm::aligned_mediump_mat3; + using glm::aligned_lowp_mat3; + using glm::aligned_highp_dmat3; + using glm::aligned_mediump_dmat3; + using glm::aligned_lowp_dmat3; + using glm::packed_highp_mat3; + using glm::packed_mediump_mat3; + using glm::packed_lowp_mat3; + using glm::packed_highp_dmat3; + using glm::packed_mediump_dmat3; + using glm::packed_lowp_dmat3; + using glm::aligned_highp_mat4; + using glm::aligned_mediump_mat4; + using glm::aligned_lowp_mat4; + using glm::aligned_highp_dmat4; + using glm::aligned_mediump_dmat4; + using glm::aligned_lowp_dmat4; + using glm::packed_highp_mat4; + using glm::packed_mediump_mat4; + using glm::packed_lowp_mat4; + using glm::packed_highp_dmat4; + using glm::packed_mediump_dmat4; + using glm::packed_lowp_dmat4; + using glm::aligned_highp_mat2x2; + using glm::aligned_mediump_mat2x2; + using glm::aligned_lowp_mat2x2; + using glm::aligned_highp_dmat2x2; + using glm::aligned_mediump_dmat2x2; + using glm::aligned_lowp_dmat2x2; + using glm::packed_highp_mat2x2; + using glm::packed_mediump_mat2x2; + using glm::packed_lowp_mat2x2; + using glm::packed_highp_dmat2x2; + using glm::packed_mediump_dmat2x2; + using glm::packed_lowp_dmat2x2; + using glm::aligned_highp_mat2x3; + using glm::aligned_mediump_mat2x3; + using glm::aligned_lowp_mat2x3; + using glm::aligned_highp_dmat2x3; + using glm::aligned_mediump_dmat2x3; + using glm::aligned_lowp_dmat2x3; + using glm::packed_highp_mat2x3; + using glm::packed_mediump_mat2x3; + using glm::packed_lowp_mat2x3; + using glm::packed_highp_dmat2x3; + using glm::packed_mediump_dmat2x3; + using glm::packed_lowp_dmat2x3; + using glm::aligned_highp_mat2x4; + using glm::aligned_mediump_mat2x4; + using glm::aligned_lowp_mat2x4; + using glm::aligned_highp_dmat2x4; + using glm::aligned_mediump_dmat2x4; + using glm::aligned_lowp_dmat2x4; + using glm::packed_highp_mat2x4; + using glm::packed_mediump_mat2x4; + using glm::packed_lowp_mat2x4; + using glm::packed_highp_dmat2x4; + using glm::packed_mediump_dmat2x4; + using glm::packed_lowp_dmat2x4; + using glm::aligned_highp_mat3x2; + using glm::aligned_mediump_mat3x2; + using glm::aligned_lowp_mat3x2; + using glm::aligned_highp_dmat3x2; + using glm::aligned_mediump_dmat3x2; + using glm::aligned_lowp_dmat3x2; + using glm::packed_highp_mat3x2; + using glm::packed_mediump_mat3x2; + using glm::packed_lowp_mat3x2; + using glm::packed_highp_dmat3x2; + using glm::packed_mediump_dmat3x2; + using glm::packed_lowp_dmat3x2; + using glm::aligned_highp_mat3x3; + using glm::aligned_mediump_mat3x3; + using glm::aligned_lowp_mat3x3; + using glm::aligned_highp_dmat3x3; + using glm::aligned_mediump_dmat3x3; + using glm::aligned_lowp_dmat3x3; + using glm::packed_highp_mat3x3; + using glm::packed_mediump_mat3x3; + using glm::packed_lowp_mat3x3; + using glm::packed_highp_dmat3x3; + using glm::packed_mediump_dmat3x3; + using glm::packed_lowp_dmat3x3; + using glm::aligned_highp_mat3x4; + using glm::aligned_mediump_mat3x4; + using glm::aligned_lowp_mat3x4; + using glm::aligned_highp_dmat3x4; + using glm::aligned_mediump_dmat3x4; + using glm::aligned_lowp_dmat3x4; + using glm::packed_highp_mat3x4; + using glm::packed_mediump_mat3x4; + using glm::packed_lowp_mat3x4; + using glm::packed_highp_dmat3x4; + using glm::packed_mediump_dmat3x4; + using glm::packed_lowp_dmat3x4; + using glm::aligned_highp_mat4x2; + using glm::aligned_mediump_mat4x2; + using glm::aligned_lowp_mat4x2; + using glm::aligned_highp_dmat4x2; + using glm::aligned_mediump_dmat4x2; + using glm::aligned_lowp_dmat4x2; + using glm::packed_highp_mat4x2; + using glm::packed_mediump_mat4x2; + using glm::packed_lowp_mat4x2; + using glm::packed_highp_dmat4x2; + using glm::packed_mediump_dmat4x2; + using glm::packed_lowp_dmat4x2; + using glm::aligned_highp_mat4x3; + using glm::aligned_mediump_mat4x3; + using glm::aligned_lowp_mat4x3; + using glm::aligned_highp_dmat4x3; + using glm::aligned_mediump_dmat4x3; + using glm::aligned_lowp_dmat4x3; + using glm::packed_highp_mat4x3; + using glm::packed_mediump_mat4x3; + using glm::packed_lowp_mat4x3; + using glm::packed_highp_dmat4x3; + using glm::packed_mediump_dmat4x3; + using glm::packed_lowp_dmat4x3; + using glm::aligned_highp_mat4x4; + using glm::aligned_mediump_mat4x4; + using glm::aligned_lowp_mat4x4; + using glm::aligned_highp_dmat4x4; + using glm::aligned_mediump_dmat4x4; + using glm::aligned_lowp_dmat4x4; + using glm::packed_highp_mat4x4; + using glm::packed_mediump_mat4x4; + using glm::packed_lowp_mat4x4; + using glm::packed_highp_dmat4x4; + using glm::packed_mediump_dmat4x4; + using glm::packed_lowp_dmat4x4; +# if(defined(GLM_PRECISION_LOWP_FLOAT)) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# else //defined(GLM_PRECISION_HIGHP_FLOAT) + using glm::aligned_vec1; + using glm::aligned_vec2; + using glm::aligned_vec3; + using glm::aligned_vec4; + using glm::packed_vec1; + using glm::packed_vec2; + using glm::packed_vec3; + using glm::packed_vec4; + using glm::aligned_mat2; + using glm::aligned_mat3; + using glm::aligned_mat4; + using glm::packed_mat2; + using glm::packed_mat3; + using glm::packed_mat4; + using glm::aligned_mat2x2; + using glm::aligned_mat2x3; + using glm::aligned_mat2x4; + using glm::aligned_mat3x2; + using glm::aligned_mat3x3; + using glm::aligned_mat3x4; + using glm::aligned_mat4x2; + using glm::aligned_mat4x3; + using glm::aligned_mat4x4; + using glm::packed_mat2x2; + using glm::packed_mat2x3; + using glm::packed_mat2x4; + using glm::packed_mat3x2; + using glm::packed_mat3x3; + using glm::packed_mat3x4; + using glm::packed_mat4x2; + using glm::packed_mat4x3; + using glm::packed_mat4x4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_DOUBLE)) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# else //defined(GLM_PRECISION_HIGHP_DOUBLE) + using glm::aligned_dvec1; + using glm::aligned_dvec2; + using glm::aligned_dvec3; + using glm::aligned_dvec4; + using glm::packed_dvec1; + using glm::packed_dvec2; + using glm::packed_dvec3; + using glm::packed_dvec4; + using glm::aligned_dmat2; + using glm::aligned_dmat3; + using glm::aligned_dmat4; + using glm::packed_dmat2; + using glm::packed_dmat3; + using glm::packed_dmat4; + using glm::aligned_dmat2x2; + using glm::aligned_dmat2x3; + using glm::aligned_dmat2x4; + using glm::aligned_dmat3x2; + using glm::aligned_dmat3x3; + using glm::aligned_dmat3x4; + using glm::aligned_dmat4x2; + using glm::aligned_dmat4x3; + using glm::aligned_dmat4x4; + using glm::packed_dmat2x2; + using glm::packed_dmat2x3; + using glm::packed_dmat2x4; + using glm::packed_dmat3x2; + using glm::packed_dmat3x3; + using glm::packed_dmat3x4; + using glm::packed_dmat4x2; + using glm::packed_dmat4x3; + using glm::packed_dmat4x4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_INT)) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; +# elif(defined(GLM_PRECISION_MEDIUMP_INT)) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; +# else //defined(GLM_PRECISION_HIGHP_INT) + using glm::aligned_ivec1; + using glm::aligned_ivec2; + using glm::aligned_ivec3; + using glm::aligned_ivec4; + using glm::packed_ivec1; + using glm::packed_ivec2; + using glm::packed_ivec3; + using glm::packed_ivec4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_UINT)) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; +# elif(defined(GLM_PRECISION_MEDIUMP_UINT)) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; +# else //defined(GLM_PRECISION_HIGHP_UINT) + using glm::aligned_uvec1; + using glm::aligned_uvec2; + using glm::aligned_uvec3; + using glm::aligned_uvec4; + using glm::packed_uvec1; + using glm::packed_uvec2; + using glm::packed_uvec3; + using glm::packed_uvec4; +# endif//GLM_PRECISION +# if(defined(GLM_PRECISION_LOWP_BOOL)) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; +# elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; +# else //defined(GLM_PRECISION_HIGHP_BOOL) + using glm::aligned_bvec1; + using glm::aligned_bvec2; + using glm::aligned_bvec3; + using glm::aligned_bvec4; + using glm::packed_bvec1; + using glm::packed_bvec2; + using glm::packed_bvec3; + using glm::packed_bvec4; +# endif//GLM_PRECISION +# endif + + + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::acot; + using glm::acoth; + using glm::acsc; + using glm::acsch; + using glm::affineInverse; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::asec; + using glm::asech; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::ballRand; + using glm::bitCount; + using glm::bitfieldDeinterleave; + using glm::bitfieldExtract; + using glm::bitfieldFillOne; + using glm::bitfieldFillZero; + using glm::bitfieldInsert; + using glm::bitfieldInterleave; + using glm::bitfieldReverse; + using glm::bitfieldRotateLeft; + using glm::bitfieldRotateRight; + using glm::ceil; + using glm::ceilMultiple; + using glm::ceilPowerOfTwo; + using glm::circularRand; + using glm::clamp; + using glm::column; + using glm::conjugate; + using glm::convertLinearToSRGB; + using glm::convertSRGBToLinear; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cot; + using glm::coth; + using glm::cross; + using glm::csc; + using glm::csch; + using glm::degrees; + using glm::determinant; + using glm::diskRand; + using glm::distance; + using glm::dot; + using glm::e; + using glm::epsilon; + using glm::epsilonEqual; + using glm::epsilonNotEqual; + using glm::equal; + using glm::euler; + using glm::eulerAngles; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::fclamp; + using glm::findLSB; + using glm::findMSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::float_distance; + using glm::floor; + using glm::floorMultiple; + using glm::floorPowerOfTwo; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::gaussRand; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::identity; + using glm::imulExtended; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inverseTranspose; + using glm::inversesqrt; + using glm::iround; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::linearRand; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::make_mat2; + using glm::make_mat2x2; + using glm::make_mat2x3; + using glm::make_mat2x4; + using glm::make_mat3; + using glm::make_mat3x2; + using glm::make_mat3x3; + using glm::make_mat3x4; + using glm::make_mat4; + using glm::make_mat4x2; + using glm::make_mat4x3; + using glm::make_mat4x4; + using glm::make_quat; + using glm::make_vec1; + using glm::make_vec2; + using glm::make_vec3; + using glm::make_vec4; + using glm::mask; + using glm::mat3_cast; + using glm::mat4_cast; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::next_float; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::outerProduct; + using glm::packF2x11_1x10; + using glm::packF3x9_E1x5; + using glm::packHalf; + using glm::packHalf1x16; + using glm::packHalf4x16; + using glm::packI3x10_1x2; + using glm::packInt2x16; + using glm::packInt2x32; + using glm::packInt2x8; + using glm::packInt4x16; + using glm::packInt4x8; + using glm::packRGBM; + using glm::packSnorm; + using glm::packSnorm1x16; + using glm::packSnorm1x8; + using glm::packSnorm2x8; + using glm::packSnorm3x10_1x2; + using glm::packSnorm4x16; + using glm::packU3x10_1x2; + using glm::packUint2x16; + using glm::packUint2x32; + using glm::packUint2x8; + using glm::packUint4x16; + using glm::packUint4x8; + using glm::packUnorm; + using glm::packUnorm1x16; + using glm::packUnorm1x5_1x6_1x5; + using glm::packUnorm1x8; + using glm::packUnorm2x3_1x2; + using glm::packUnorm2x4; + using glm::packUnorm2x8; + using glm::packUnorm3x10_1x2; + using glm::packUnorm3x5_1x1; + using glm::packUnorm4x16; + using glm::packUnorm4x4; + using glm::perlin; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pitch; + using glm::pow; + using glm::prev_float; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::quarter_pi; + using glm::quatLookAt; + using glm::quatLookAtLH; + using glm::quatLookAtRH; + using glm::quat_cast; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::roll; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::round; + using glm::roundEven; + using glm::roundMultiple; + using glm::roundPowerOfTwo; + using glm::row; + using glm::scale; + using glm::sec; + using glm::sech; + using glm::sign; + using glm::simplex; + using glm::sin; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sphericalRand; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::unpackF2x11_1x10; + using glm::unpackF3x9_E1x5; + using glm::unpackHalf; + using glm::unpackHalf1x16; + using glm::unpackHalf4x16; + using glm::unpackI3x10_1x2; + using glm::unpackInt2x16; + using glm::unpackInt2x32; + using glm::unpackInt2x8; + using glm::unpackInt4x16; + using glm::unpackInt4x8; + using glm::unpackRGBM; + using glm::unpackSnorm; + using glm::unpackSnorm1x16; + using glm::unpackSnorm1x8; + using glm::unpackSnorm2x8; + using glm::unpackSnorm3x10_1x2; + using glm::unpackSnorm4x16; + using glm::unpackU3x10_1x2; + using glm::unpackUint2x16; + using glm::unpackUint2x32; + using glm::unpackUint2x8; + using glm::unpackUint4x16; + using glm::unpackUint4x8; + using glm::unpackUnorm; + using glm::unpackUnorm1x16; + using glm::unpackUnorm1x5_1x6_1x5; + using glm::unpackUnorm1x8; + using glm::unpackUnorm2x3_1x2; + using glm::unpackUnorm2x4; + using glm::unpackUnorm2x8; + using glm::unpackUnorm3x10_1x2; + using glm::unpackUnorm3x5_1x1; + using glm::unpackUnorm4x16; + using glm::unpackUnorm4x4; + using glm::uround; + using glm::usubBorrow; + using glm::value_ptr; + using glm::yaw; + using glm::zero; + } + +# ifdef GLM_EXT_INLINE_NAMESPACE + inline +# endif + namespace ext { + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::acot; + using glm::acoth; + using glm::acsc; + using glm::acsch; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::asec; + using glm::asech; + using glm::asin; + using glm::asinh; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::ceil; + using glm::clamp; + using glm::conjugate; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cot; + using glm::coth; + using glm::cross; + using glm::csc; + using glm::csch; + using glm::degrees; + using glm::determinant; + using glm::distance; + using glm::dot; + using glm::e; + using glm::epsilon; + using glm::equal; + using glm::euler; + using glm::exp; + using glm::exp2; + using glm::faceforward; + using glm::fclamp; + using glm::findNSB; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floatDistance; + using glm::floor; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::identity; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::inverse; + using glm::inversesqrt; + using glm::iround; + using glm::isMultiple; + using glm::isPowerOfTwo; + using glm::isinf; + using glm::isnan; + using glm::ldexp; + using glm::length; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::matrixCompMult; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mod; + using glm::modf; + using glm::nextFloat; + using glm::nextMultiple; + using glm::nextPowerOfTwo; + using glm::normalize; + using glm::notEqual; + using glm::not_; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::outerProduct; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pow; + using glm::prevFloat; + using glm::prevMultiple; + using glm::prevPowerOfTwo; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::quarter_pi; + using glm::radians; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::round; + using glm::roundEven; + using glm::scale; + using glm::sec; + using glm::sech; + using glm::sign; + using glm::sin; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sqrt; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uintBitsToFloat; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::uround; + using glm::zero; + } + +# ifdef GLM_ENABLE_EXPERIMENTAL +# ifdef GLM_GTX_INLINE_NAMESPACE + inline +# endif + namespace gtx { + using glm::io::order_type; + using glm::io::format_punct; + using glm::io::basic_state_saver; + using glm::io::basic_format_saver; + using glm::io::precision; + using glm::io::width; + using glm::io::delimeter; + using glm::io::order; + using glm::io::get_facet; + using glm::io::formatted; + using glm::io::unformatted; + using glm::io::operator<<; + using glm::operator<<; + using glm::tdualquat; + +# if !((GLM_COMPILER & GLM_COMPILER_CUDA) || (GLM_COMPILER & GLM_COMPILER_HIP)) + using glm::to_string; +# endif +# if GLM_HAS_TEMPLATE_ALIASES + using glm::operator*; + using glm::operator/; +# endif +# if GLM_HAS_RANGE_FOR + using glm::components; + using glm::begin; + using glm::end; +# endif + + using glm::abs; + using glm::acos; + using glm::acosh; + using glm::adjugate; + using glm::all; + using glm::angle; + using glm::angleAxis; + using glm::any; + using glm::areCollinear; + using glm::areOrthogonal; + using glm::areOrthonormal; + using glm::asin; + using glm::asinh; + using glm::associatedMax; + using glm::associatedMin; + using glm::atan; + using glm::atanh; + using glm::axis; + using glm::axisAngle; + using glm::axisAngleMatrix; + using glm::backEaseIn; + using glm::backEaseInOut; + using glm::backEaseOut; + using glm::bitCount; + using glm::bitfieldDeinterleave; + using glm::bitfieldExtract; + using glm::bitfieldFillOne; + using glm::bitfieldFillZero; + using glm::bitfieldInsert; + using glm::bitfieldInterleave; + using glm::bitfieldReverse; + using glm::bitfieldRotateLeft; + using glm::bitfieldRotateRight; + using glm::bounceEaseIn; + using glm::bounceEaseInOut; + using glm::bounceEaseOut; + using glm::catmullRom; + using glm::ceil; + using glm::circularEaseIn; + using glm::circularEaseInOut; + using glm::circularEaseOut; + using glm::clamp; + using glm::closeBounded; + using glm::closestPointOnLine; + using glm::colMajor2; + using glm::colMajor3; + using glm::colMajor4; + using glm::compAdd; + using glm::compMax; + using glm::compMin; + using glm::compMul; + using glm::compNormalize; + using glm::compScale; + using glm::computeCovarianceMatrix; + using glm::conjugate; + using glm::convertD65XYZToD50XYZ; + using glm::convertD65XYZToLinearSRGB; + using glm::convertLinearSRGBToD50XYZ; + using glm::convertLinearSRGBToD65XYZ; + using glm::cos; + using glm::cos_one_over_two; + using glm::cosh; + using glm::cross; + using glm::cubic; + using glm::cubicEaseIn; + using glm::cubicEaseInOut; + using glm::cubicEaseOut; + using glm::decompose; + using glm::degrees; + using glm::derivedEulerAngleX; + using glm::derivedEulerAngleY; + using glm::derivedEulerAngleZ; + using glm::determinant; + using glm::diagonal2x2; + using glm::diagonal2x3; + using glm::diagonal2x4; + using glm::diagonal3x2; + using glm::diagonal3x3; + using glm::diagonal3x4; + using glm::diagonal4x2; + using glm::diagonal4x3; + using glm::diagonal4x4; + using glm::distance; + using glm::distance2; + using glm::dot; + using glm::dual_quat_identity; + using glm::dualquat_cast; + using glm::e; + using glm::elasticEaseIn; + using glm::elasticEaseInOut; + using glm::elasticEaseOut; + using glm::epsilon; + using glm::epsilonEqual; + using glm::epsilonNotEqual; + using glm::equal; + using glm::euclidean; + using glm::euler; + using glm::eulerAngleX; + using glm::eulerAngleXY; + using glm::eulerAngleXYX; + using glm::eulerAngleXYZ; + using glm::eulerAngleXZ; + using glm::eulerAngleXZX; + using glm::eulerAngleXZY; + using glm::eulerAngleY; + using glm::eulerAngleYX; + using glm::eulerAngleYXY; + using glm::eulerAngleYXZ; + using glm::eulerAngleYZ; + using glm::eulerAngleYZX; + using glm::eulerAngleYZY; + using glm::eulerAngleZ; + using glm::eulerAngleZX; + using glm::eulerAngleZXY; + using glm::eulerAngleZXZ; + using glm::eulerAngleZY; + using glm::eulerAngleZYX; + using glm::eulerAngleZYZ; + using glm::eulerAngles; + using glm::exp; + using glm::exp2; + using glm::exponentialEaseIn; + using glm::exponentialEaseInOut; + using glm::exponentialEaseOut; + using glm::extend; + using glm::extractEulerAngleXYX; + using glm::extractEulerAngleXYZ; + using glm::extractEulerAngleXZX; + using glm::extractEulerAngleXZY; + using glm::extractEulerAngleYXY; + using glm::extractEulerAngleYXZ; + using glm::extractEulerAngleYZX; + using glm::extractEulerAngleYZY; + using glm::extractEulerAngleZXY; + using glm::extractEulerAngleZXZ; + using glm::extractEulerAngleZYX; + using glm::extractEulerAngleZYZ; + using glm::extractMatrixRotation; + using glm::extractRealComponent; + using glm::faceforward; + using glm::factorial; + using glm::fastAcos; + using glm::fastAsin; + using glm::fastAtan; + using glm::fastCos; + using glm::fastDistance; + using glm::fastExp; + using glm::fastExp2; + using glm::fastInverseSqrt; + using glm::fastLength; + using glm::fastLog; + using glm::fastLog2; + using glm::fastMix; + using glm::fastNormalize; + using glm::fastNormalizeDot; + using glm::fastPow; + using glm::fastSin; + using glm::fastSqrt; + using glm::fastTan; + using glm::fclamp; + using glm::findLSB; + using glm::findMSB; + using glm::fliplr; + using glm::flipud; + using glm::floatBitsToInt; + using glm::floatBitsToUint; + using glm::floor; + using glm::floor_log2; + using glm::fma; + using glm::fmax; + using glm::fmin; + using glm::fmod; + using glm::four_over_pi; + using glm::fract; + using glm::frexp; + using glm::frustum; + using glm::frustumLH; + using glm::frustumLH_NO; + using glm::frustumLH_ZO; + using glm::frustumNO; + using glm::frustumRH; + using glm::frustumRH_NO; + using glm::frustumRH_ZO; + using glm::frustumZO; + using glm::gauss; + using glm::golden_ratio; + using glm::greaterThan; + using glm::greaterThanEqual; + using glm::half_pi; + using glm::hermite; + using glm::highestBitValue; + using glm::hsvColor; + using glm::identity; + using glm::imulExtended; + using glm::infinitePerspective; + using glm::infinitePerspectiveLH; + using glm::infinitePerspectiveRH; + using glm::intBitsToFloat; + using glm::intermediate; + using glm::interpolate; + using glm::intersectLineSphere; + using glm::intersectLineTriangle; + using glm::intersectRayPlane; + using glm::intersectRaySphere; + using glm::intersectRayTriangle; + using glm::inverse; + using glm::inversesqrt; + using glm::iround; + using glm::isCompNull; + using glm::isIdentity; + using glm::isNormalized; + using glm::isNull; + using glm::isOrthogonal; + using glm::isdenormal; + using glm::isfinite; + using glm::isinf; + using glm::isnan; + using glm::l1Norm; + using glm::l2Norm; + using glm::lMaxNorm; + using glm::ldexp; + using glm::leftHanded; + using glm::length; + using glm::length2; + using glm::lerp; + using glm::lessThan; + using glm::lessThanEqual; + using glm::linearGradient; + using glm::linearInterpolation; + using glm::ln_ln_two; + using glm::ln_ten; + using glm::ln_two; + using glm::log; + using glm::log2; + using glm::lookAt; + using glm::lookAtLH; + using glm::lookAtRH; + using glm::lowestBitValue; + using glm::luminosity; + using glm::lxNorm; + using glm::make_mat2; + using glm::make_mat2x2; + using glm::make_mat2x3; + using glm::make_mat2x4; + using glm::make_mat3; + using glm::make_mat3x2; + using glm::make_mat3x3; + using glm::make_mat3x4; + using glm::make_mat4; + using glm::make_mat4x2; + using glm::make_mat4x3; + using glm::make_mat4x4; + using glm::make_quat; + using glm::make_vec1; + using glm::make_vec2; + using glm::make_vec3; + using glm::make_vec4; + using glm::mask; + using glm::mat2x4_cast; + using glm::mat3_cast; + using glm::mat3x4_cast; + using glm::mat4_cast; + using glm::matrixCompMult; + using glm::matrixCross3; + using glm::matrixCross4; + using glm::max; + using glm::min; + using glm::mirrorClamp; + using glm::mirrorRepeat; + using glm::mix; + using glm::mixedProduct; + using glm::mod; + using glm::modf; + using glm::nlz; + using glm::normalize; + using glm::normalizeDot; + using glm::notEqual; + using glm::not_; + using glm::YCoCg2rgb; + using glm::YCoCgR2rgb; + using glm::one; + using glm::one_over_pi; + using glm::one_over_root_two; + using glm::one_over_two_pi; + using glm::openBounded; + using glm::orientate2; + using glm::orientate3; + using glm::orientate4; + using glm::orientation; + using glm::orientedAngle; + using glm::ortho; + using glm::orthoLH; + using glm::orthoLH_NO; + using glm::orthoLH_ZO; + using glm::orthoNO; + using glm::orthoRH; + using glm::orthoRH_NO; + using glm::orthoRH_ZO; + using glm::orthoZO; + using glm::orthonormalize; + using glm::outerProduct; + using glm::packDouble2x32; + using glm::packHalf2x16; + using glm::packSnorm2x16; + using glm::packSnorm4x8; + using glm::packUnorm2x16; + using glm::packUnorm4x8; + using glm::perp; + using glm::perspective; + using glm::perspectiveFov; + using glm::perspectiveFovLH; + using glm::perspectiveFovLH_NO; + using glm::perspectiveFovLH_ZO; + using glm::perspectiveFovNO; + using glm::perspectiveFovRH; + using glm::perspectiveFovRH_NO; + using glm::perspectiveFovRH_ZO; + using glm::perspectiveFovZO; + using glm::perspectiveLH; + using glm::perspectiveLH_NO; + using glm::perspectiveLH_ZO; + using glm::perspectiveNO; + using glm::perspectiveRH; + using glm::perspectiveRH_NO; + using glm::perspectiveRH_ZO; + using glm::perspectiveZO; + using glm::pi; + using glm::pickMatrix; + using glm::pitch; + using glm::polar; + using glm::pow; + using glm::pow2; + using glm::pow3; + using glm::pow4; + using glm::powerOfTwoAbove; + using glm::powerOfTwoBelow; + using glm::powerOfTwoNearest; + using glm::proj; + using glm::proj2D; + using glm::proj3D; + using glm::project; + using glm::projectNO; + using glm::projectZO; + using glm::qr_decompose; + using glm::quadraticEaseIn; + using glm::quadraticEaseInOut; + using glm::quadraticEaseOut; + using glm::quarter_pi; + using glm::quarticEaseIn; + using glm::quarticEaseInOut; + using glm::quarticEaseOut; + using glm::quatLookAt; + using glm::quatLookAtLH; + using glm::quatLookAtRH; + using glm::quat_cast; + using glm::quat_identity; + using glm::quinticEaseIn; + using glm::quinticEaseInOut; + using glm::quinticEaseOut; + using glm::radialGradient; + using glm::radians; + using glm::recompose; + using glm::reflect; + using glm::refract; + using glm::repeat; + using glm::rgb2YCoCg; + using glm::rgb2YCoCgR; + using glm::rgbColor; + using glm::rightHanded; + using glm::roll; + using glm::root_five; + using glm::root_half_pi; + using glm::root_ln_four; + using glm::root_pi; + using glm::root_three; + using glm::root_two; + using glm::root_two_pi; + using glm::rotate; + using glm::rotateNormalizedAxis; + using glm::rotateX; + using glm::rotateY; + using glm::rotateZ; + using glm::rotation; + using glm::round; + using glm::roundEven; + using glm::rowMajor2; + using glm::rowMajor3; + using glm::rowMajor4; + using glm::rq_decompose; + using glm::saturation; + using glm::scale; + using glm::scaleBias; + using glm::shearX2D; + using glm::shearX3D; + using glm::shearY2D; + using glm::shearY3D; + using glm::shearZ3D; + using glm::shortMix; + using glm::sign; + using glm::sin; + using glm::sineEaseIn; + using glm::sineEaseInOut; + using glm::sineEaseOut; + using glm::sinh; + using glm::slerp; + using glm::smoothstep; + using glm::sortEigenvalues; + using glm::sqrt; + using glm::squad; + using glm::step; + using glm::tan; + using glm::tanh; + using glm::third; + using glm::three_over_two_pi; + using glm::translate; + using glm::transpose; + using glm::triangleNormal; + using glm::trunc; + using glm::tweakedInfinitePerspective; + using glm::two_over_pi; + using glm::two_over_root_pi; + using glm::two_pi; + using glm::two_thirds; + using glm::uaddCarry; + using glm::uintBitsToFloat; + using glm::umulExtended; + using glm::unProject; + using glm::unProjectNO; + using glm::unProjectZO; + using glm::unpackDouble2x32; + using glm::unpackHalf2x16; + using glm::unpackSnorm2x16; + using glm::unpackSnorm4x8; + using glm::unpackUnorm2x16; + using glm::unpackUnorm4x8; + using glm::uround; + using glm::usubBorrow; + using glm::value_ptr; + using glm::wrapAngle; + using glm::wxyz; + using glm::yaw; + using glm::yawPitchRoll; + using glm::zero; + } +# endif +} + +#if defined(_MSC_VER) // Workaround +// Partial template specialization doesn't need to be exported explicitly, but this may not work otherwise on MSVC. +export namespace std { + using std::hash; // See GLM_GTX_hash +} +#endif diff --git a/includes/glm/glm.hpp b/includes/glm/glm.hpp new file mode 100644 index 0000000..8b37545 --- /dev/null +++ b/includes/glm/glm.hpp @@ -0,0 +1,137 @@ +/// @ref core +/// @file glm/glm.hpp +/// +/// @mainpage OpenGL Mathematics (GLM) +/// - Website: glm.g-truc.net +/// - GLM API documentation +/// - GLM Manual +/// +/// @defgroup core Core features +/// +/// @brief Features that implement in C++ the GLSL specification as closely as possible. +/// +/// The GLM core consists of C++ types that mirror GLSL types and +/// C++ functions that mirror the GLSL functions. +/// +/// The best documentation for GLM Core is the current GLSL specification, +/// version 4.2 +/// (pdf file). +/// +/// GLM core functionalities require to be included to be used. +/// +/// +/// @defgroup core_vector Vector types +/// +/// Vector types of two to four components with an exhaustive set of operators. +/// +/// @ingroup core +/// +/// +/// @defgroup core_vector_precision Vector types with precision qualifiers +/// +/// @brief Vector types with precision qualifiers which may result in various precision in term of ULPs +/// +/// GLSL allows defining qualifiers for particular variables. +/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, +/// with OpenGL ES's GLSL, these qualifiers do have an effect. +/// +/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: +/// a number of typedefs that use a particular qualifier. +/// +/// None of these types make any guarantees about the actual qualifier used. +/// +/// @ingroup core +/// +/// +/// @defgroup core_matrix Matrix types +/// +/// Matrix types of with C columns and R rows where C and R are values between 2 to 4 included. +/// These types have exhaustive sets of operators. +/// +/// @ingroup core +/// +/// +/// @defgroup core_matrix_precision Matrix types with precision qualifiers +/// +/// @brief Matrix types with precision qualifiers which may result in various precision in term of ULPs +/// +/// GLSL allows defining qualifiers for particular variables. +/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, +/// with OpenGL ES's GLSL, these qualifiers do have an effect. +/// +/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: +/// a number of typedefs that use a particular qualifier. +/// +/// None of these types make any guarantees about the actual qualifier used. +/// +/// @ingroup core +/// +/// +/// @defgroup ext Stable extensions +/// +/// @brief Additional features not specified by GLSL specification. +/// +/// EXT extensions are fully tested and documented. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions at once by +/// including . Otherwise, each extension needs to be included a specific file. +/// +/// +/// @defgroup gtc Recommended extensions +/// +/// @brief Additional features not specified by GLSL specification. +/// +/// GTC extensions aim to be stable with tests and documentation. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions at once by +/// including . Otherwise, each extension needs to be included a specific file. +/// +/// +/// @defgroup gtx Experimental extensions +/// +/// @brief Experimental features not specified by GLSL specification. +/// +/// Experimental extensions are useful functions and types, but the development of +/// their API and functionality is not necessarily stable. They can change +/// substantially between versions. Backwards compatibility is not much of an issue +/// for them. +/// +/// Even if it's highly unrecommended, it's possible to include all the extensions +/// at once by including . Otherwise, each extension needs to be +/// included a specific file. +/// + +#include "detail/_fixes.hpp" + +#include "detail/setup.hpp" + +#pragma once + +#include +#include +#include +#include +#include +#include "fwd.hpp" + +#include "vec2.hpp" +#include "vec3.hpp" +#include "vec4.hpp" +#include "mat2x2.hpp" +#include "mat2x3.hpp" +#include "mat2x4.hpp" +#include "mat3x2.hpp" +#include "mat3x3.hpp" +#include "mat3x4.hpp" +#include "mat4x2.hpp" +#include "mat4x3.hpp" +#include "mat4x4.hpp" + +#include "trigonometric.hpp" +#include "exponential.hpp" +#include "common.hpp" +#include "packing.hpp" +#include "geometric.hpp" +#include "matrix.hpp" +#include "vector_relational.hpp" +#include "integer.hpp" diff --git a/includes/glm/gtc/bitfield.hpp b/includes/glm/gtc/bitfield.hpp new file mode 100644 index 0000000..084fbe7 --- /dev/null +++ b/includes/glm/gtc/bitfield.hpp @@ -0,0 +1,266 @@ +/// @ref gtc_bitfield +/// @file glm/gtc/bitfield.hpp +/// +/// @see core (dependence) +/// @see gtc_bitfield (dependence) +/// +/// @defgroup gtc_bitfield GLM_GTC_bitfield +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#include "../detail/setup.hpp" + +#pragma once + +// Dependencies +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "type_precision.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_bitfield extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_bitfield + /// @{ + + /// Build a mask of 'count' bits + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType mask(genIUType Bits); + + /// Build a mask of 'count' bits + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec mask(vec const& v); + + /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift); + + /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldRotateRight(vec const& In, int Shift); + + /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift); + + /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldRotateLeft(vec const& In, int Shift); + + /// Set to 1 a range of bits. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount); + + /// Set to 1 a range of bits. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount); + + /// Set to 0 a range of bits. + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount); + + /// Set to 0 a range of bits. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Signed and unsigned integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_bitfield + template + GLM_FUNC_DECL vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave(glm::uint16 x); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave(glm::uint32 x); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of x followed by the first bit of y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y); + + /// Interleaves the bits of x and y. + /// The first bit is the first bit of v.x followed by the first bit of v.y. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v); + + /// Deinterleaves the bits of x. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z); + + /// Interleaves the bits of x, y and z. + /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w); + + /// Interleaves the bits of x, y, z and w. + /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. + /// The other bits are interleaved following the previous sequence. + /// + /// @see gtc_bitfield + GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w); + + /// @} +} //namespace glm + +#include "bitfield.inl" diff --git a/includes/glm/gtc/bitfield.inl b/includes/glm/gtc/bitfield.inl new file mode 100644 index 0000000..cbfd388 --- /dev/null +++ b/includes/glm/gtc/bitfield.inl @@ -0,0 +1,635 @@ +/// @ref gtc_bitfield + +#include "../simd/integer.h" + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y); + + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z); + + template + GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w); + + template<> + GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y) + { + glm::uint16 REG1(x); + glm::uint16 REG2(y); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555); + + return REG1 | static_cast(REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x33333333); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x33333333); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x55555555); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x55555555); + + return REG1 | (REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x0000FFFF0000FFFFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x0000FFFF0000FFFFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF00FF00FFull); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF00FF00FFull); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333333333333333ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333333333333333ull); + + REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555555555555555ull); + REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555555555555555ull); + + return REG1 | (REG2 << 1); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + glm::uint32 REG3(z); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0xFF0000FFu); + REG2 = ((REG2 << 16) | REG2) & static_cast(0xFF0000FFu); + REG3 = ((REG3 << 16) | REG3) & static_cast(0xFF0000FFu); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0x0F00F00Fu); + REG2 = ((REG2 << 8) | REG2) & static_cast(0x0F00F00Fu); + REG3 = ((REG3 << 8) | REG3) & static_cast(0x0F00F00Fu); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0xC30C30C3u); + REG2 = ((REG2 << 4) | REG2) & static_cast(0xC30C30C3u); + REG3 = ((REG3 << 4) | REG3) & static_cast(0xC30C30C3u); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x49249249u); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x49249249u); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x49249249u); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + + REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); + REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); + REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); + REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); + REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); + REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); + REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + + REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); + REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); + REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); + + REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); + REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); + REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); + + REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); + REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); + REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); + + REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); + REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); + REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); + + REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); + REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); + REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); + + return REG1 | (REG2 << 1) | (REG3 << 2); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w) + { + glm::uint32 REG1(x); + glm::uint32 REG2(y); + glm::uint32 REG3(z); + glm::uint32 REG4(w); + + REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000Fu); + REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000Fu); + REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000Fu); + REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000Fu); + + REG1 = ((REG1 << 6) | REG1) & static_cast(0x03030303u); + REG2 = ((REG2 << 6) | REG2) & static_cast(0x03030303u); + REG3 = ((REG3 << 6) | REG3) & static_cast(0x03030303u); + REG4 = ((REG4 << 6) | REG4) & static_cast(0x03030303u); + + REG1 = ((REG1 << 3) | REG1) & static_cast(0x11111111u); + REG2 = ((REG2 << 3) | REG2) & static_cast(0x11111111u); + REG3 = ((REG3 << 3) | REG3) & static_cast(0x11111111u); + REG4 = ((REG4 << 3) | REG4) & static_cast(0x11111111u); + + return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); + } + + template<> + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w) + { + glm::uint64 REG1(x); + glm::uint64 REG2(y); + glm::uint64 REG3(z); + glm::uint64 REG4(w); + + REG1 = ((REG1 << 24) | REG1) & static_cast(0x000000FF000000FFull); + REG2 = ((REG2 << 24) | REG2) & static_cast(0x000000FF000000FFull); + REG3 = ((REG3 << 24) | REG3) & static_cast(0x000000FF000000FFull); + REG4 = ((REG4 << 24) | REG4) & static_cast(0x000000FF000000FFull); + + REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000F000F000Full); + REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000F000F000Full); + REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000F000F000Full); + REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000F000F000Full); + + REG1 = ((REG1 << 6) | REG1) & static_cast(0x0303030303030303ull); + REG2 = ((REG2 << 6) | REG2) & static_cast(0x0303030303030303ull); + REG3 = ((REG3 << 6) | REG3) & static_cast(0x0303030303030303ull); + REG4 = ((REG4 << 6) | REG4) & static_cast(0x0303030303030303ull); + + REG1 = ((REG1 << 3) | REG1) & static_cast(0x1111111111111111ull); + REG2 = ((REG2 << 3) | REG2) & static_cast(0x1111111111111111ull); + REG3 = ((REG3 << 3) | REG3) & static_cast(0x1111111111111111ull); + REG4 = ((REG4 << 3) | REG4) & static_cast(0x1111111111111111ull); + + return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); + } +}//namespace detail + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wsign-compare" +#endif + + template + GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); + + return Bits >= static_cast(sizeof(genIUType) * 8) ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); + } + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +#endif + + template + GLM_FUNC_QUALIFIER vec mask(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); + + return detail::functor1::call(mask, v); + } + + template + GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); + + int const BitSize = static_cast(sizeof(genIType) * 8); + return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldRotateRight(vec const& In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); + + int const BitSize = static_cast(sizeof(T) * 8); + return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); + + int const BitSize = static_cast(sizeof(genIType) * 8); + return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldRotateLeft(vec const& In, int Shift) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); + + int const BitSize = static_cast(sizeof(T) * 8); + return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); + } + + template + GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount) + { + return Value | static_cast(mask(BitCount) << FirstBit); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount) + { + return Value | static_cast(mask(BitCount) << FirstBit); + } + + template + GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount) + { + return Value & static_cast(~(mask(BitCount) << FirstBit)); + } + + template + GLM_FUNC_QUALIFIER vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount) + { + return Value & static_cast(~(mask(BitCount) << FirstBit)); + } + + GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y; + + union sign16 + { + int16 i; + uint16 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(u8vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER u8vec2 bitfieldDeinterleave(glm::uint16 x) + { + uint16 REG1(x); + uint16 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x5555); + REG2 = REG2 & static_cast(0x5555); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0xFFFF); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0xFFFF); + + return glm::u8vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(u16vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave(glm::uint32 x) + { + glm::uint32 REG1(x); + glm::uint32 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x55555555); + REG2 = REG2 & static_cast(0x55555555); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x33333333); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x33333333); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF); + + return glm::u16vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y) + { + union sign32 + { + int32 i; + uint32 u; + } sign_x, sign_y; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + result.u = bitfieldInterleave(sign_x.u, sign_y.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y) + { + return detail::bitfieldInterleave(x, y); + } + + GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(u32vec2 const& v) + { + return detail::bitfieldInterleave(v.x, v.y); + } + + GLM_FUNC_QUALIFIER glm::u32vec2 bitfieldDeinterleave(glm::uint64 x) + { + glm::uint64 REG1(x); + glm::uint64 REG2(x >>= 1); + + REG1 = REG1 & static_cast(0x5555555555555555ull); + REG2 = REG2 & static_cast(0x5555555555555555ull); + + REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333333333333333ull); + REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333333333333333ull); + + REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); + REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); + + REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF00FF00FFull); + REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF00FF00FFull); + + REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF0000FFFFull); + REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF0000FFFFull); + + REG1 = ((REG1 >> 16) | REG1) & static_cast(0x00000000FFFFFFFFull); + REG2 = ((REG2 >> 16) | REG2) & static_cast(0x00000000FFFFFFFFull); + + return glm::u32vec2(REG1, REG2); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y, sign_z; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y, sign_z; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z) + { + union sign16 + { + int32 i; + uint32 u; + } sign_x, sign_y, sign_z; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z) + { + return detail::bitfieldInterleave(x, y, z); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u32vec3 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z); + } + + GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w) + { + union sign8 + { + int8 i; + uint8 u; + } sign_x, sign_y, sign_z, sign_w; + + union sign32 + { + int32 i; + uint32 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + sign_w.i = w; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w) + { + return detail::bitfieldInterleave(x, y, z, w); + } + + GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec4 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); + } + + GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w) + { + union sign16 + { + int16 i; + uint16 u; + } sign_x, sign_y, sign_z, sign_w; + + union sign64 + { + int64 i; + uint64 u; + } result; + + sign_x.i = x; + sign_y.i = y; + sign_z.i = z; + sign_w.i = w; + result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); + + return result.i; + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w) + { + return detail::bitfieldInterleave(x, y, z, w); + } + + GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec4 const& v) + { + return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); + } +}//namespace glm diff --git a/includes/glm/gtc/color_space.hpp b/includes/glm/gtc/color_space.hpp new file mode 100644 index 0000000..cffd9f0 --- /dev/null +++ b/includes/glm/gtc/color_space.hpp @@ -0,0 +1,56 @@ +/// @ref gtc_color_space +/// @file glm/gtc/color_space.hpp +/// +/// @see core (dependence) +/// @see gtc_color_space (dependence) +/// +/// @defgroup gtc_color_space GLM_GTC_color_space +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../exponential.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_color_space extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_color_space + /// @{ + + /// Convert a linear color to sRGB color using a standard gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear); + + /// Convert a linear color to sRGB color using a custom gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear, T Gamma); + + /// Convert a sRGB color to linear color using a standard gamma correction. + /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB); + + /// Convert a sRGB color to linear color using a custom gamma correction. + // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb + template + GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma); + + /// @} +} //namespace glm + +#include "color_space.inl" diff --git a/includes/glm/gtc/color_space.inl b/includes/glm/gtc/color_space.inl new file mode 100644 index 0000000..2a90004 --- /dev/null +++ b/includes/glm/gtc/color_space.inl @@ -0,0 +1,84 @@ +/// @ref gtc_color_space + +namespace glm{ +namespace detail +{ + template + struct compute_rgbToSrgb + { + GLM_FUNC_QUALIFIER static vec call(vec const& ColorRGB, T GammaCorrection) + { + vec const ClampedColor(clamp(ColorRGB, static_cast(0), static_cast(1))); + + return mix( + pow(ClampedColor, vec(GammaCorrection)) * static_cast(1.055) - static_cast(0.055), + ClampedColor * static_cast(12.92), + lessThan(ClampedColor, vec(static_cast(0.0031308)))); + } + }; + + template + struct compute_rgbToSrgb<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorRGB, T GammaCorrection) + { + return vec<4, T, Q>(compute_rgbToSrgb<3, T, Q>::call(vec<3, T, Q>(ColorRGB), GammaCorrection), ColorRGB.w); + } + }; + + template + struct compute_srgbToRgb + { + GLM_FUNC_QUALIFIER static vec call(vec const& ColorSRGB, T Gamma) + { + return mix( + pow((ColorSRGB + static_cast(0.055)) * static_cast(0.94786729857819905213270142180095), vec(Gamma)), + ColorSRGB * static_cast(0.07739938080495356037151702786378), + lessThanEqual(ColorSRGB, vec(static_cast(0.04045)))); + } + }; + + template + struct compute_srgbToRgb<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorSRGB, T Gamma) + { + return vec<4, T, Q>(compute_srgbToRgb<3, T, Q>::call(vec<3, T, Q>(ColorSRGB), Gamma), ColorSRGB.w); + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear) + { + return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(0.41666)); + } + + // Based on Ian Taylor http://chilliant.blogspot.fr/2012/08/srgb-approximations-for-hlsl.html + template<> + GLM_FUNC_QUALIFIER vec<3, float, lowp> convertLinearToSRGB(vec<3, float, lowp> const& ColorLinear) + { + vec<3, float, lowp> S1 = sqrt(ColorLinear); + vec<3, float, lowp> S2 = sqrt(S1); + vec<3, float, lowp> S3 = sqrt(S2); + return 0.662002687f * S1 + 0.684122060f * S2 - 0.323583601f * S3 - 0.0225411470f * ColorLinear; + } + + template + GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear, T Gamma) + { + return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(1) / Gamma); + } + + template + GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB) + { + return detail::compute_srgbToRgb::call(ColorSRGB, static_cast(2.4)); + } + + template + GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma) + { + return detail::compute_srgbToRgb::call(ColorSRGB, Gamma); + } +}//namespace glm diff --git a/includes/glm/gtc/constants.hpp b/includes/glm/gtc/constants.hpp new file mode 100644 index 0000000..6a1f37d --- /dev/null +++ b/includes/glm/gtc/constants.hpp @@ -0,0 +1,170 @@ +/// @ref gtc_constants +/// @file glm/gtc/constants.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_constants GLM_GTC_constants +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Provide a list of constants and precomputed useful values. + +#pragma once + +// Dependencies +#include "../ext/scalar_constants.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_constants extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_constants + /// @{ + + /// Return 0. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType zero(); + + /// Return 1. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one(); + + /// Return pi * 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi(); + + /// Return unit-circle circumference, or pi * 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType tau(); + + /// Return square root of pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi(); + + /// Return pi / 2. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi(); + + /// Return pi / 2 * 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi(); + + /// Return pi / 4. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi(); + + /// Return 1 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi(); + + /// Return 1 / (pi * 2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi(); + + /// Return 2 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi(); + + /// Return 4 / pi. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi(); + + /// Return 2 / sqrt(pi). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi(); + + /// Return 1 / sqrt(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two(); + + /// Return sqrt(pi / 2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi(); + + /// Return sqrt(2 * pi). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi(); + + /// Return sqrt(ln(4)). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four(); + + /// Return e constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType e(); + + /// Return Euler's constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType euler(); + + /// Return sqrt(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_two(); + + /// Return sqrt(3). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_three(); + + /// Return sqrt(5). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType root_five(); + + /// Return ln(2). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two(); + + /// Return ln(10). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten(); + + /// Return ln(ln(2)). + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two(); + + /// Return 1 / 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType third(); + + /// Return 2 / 3. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds(); + + /// Return the golden ratio constant. + /// @see gtc_constants + template + GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio(); + + /// @} +} //namespace glm + +#include "constants.inl" diff --git a/includes/glm/gtc/constants.inl b/includes/glm/gtc/constants.inl new file mode 100644 index 0000000..e9d3776 --- /dev/null +++ b/includes/glm/gtc/constants.inl @@ -0,0 +1,173 @@ +/// @ref gtc_constants + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType zero() + { + return genType(0); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one() + { + return genType(1); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_pi() + { + return genType(6.28318530717958647692528676655900576); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType tau() + { + return two_pi(); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_pi() + { + return genType(1.772453850905516027); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType half_pi() + { + return genType(1.57079632679489661923132169163975144); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType three_over_two_pi() + { + return genType(4.71238898038468985769396507491925432); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType quarter_pi() + { + return genType(0.785398163397448309615660845819875721); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_pi() + { + return genType(0.318309886183790671537767526745028724); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_two_pi() + { + return genType(0.159154943091895335768883763372514362); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_pi() + { + return genType(0.636619772367581343075535053490057448); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType four_over_pi() + { + return genType(1.273239544735162686151070106980114898); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_root_pi() + { + return genType(1.12837916709551257389615890312154517); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_root_two() + { + return genType(0.707106781186547524400844362104849039); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_half_pi() + { + return genType(1.253314137315500251); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two_pi() + { + return genType(2.506628274631000502); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_ln_four() + { + return genType(1.17741002251547469); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType e() + { + return genType(2.71828182845904523536); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType euler() + { + return genType(0.577215664901532860606); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two() + { + return genType(1.41421356237309504880168872420969808); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_three() + { + return genType(1.73205080756887729352744634150587236); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_five() + { + return genType(2.23606797749978969640917366873127623); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_two() + { + return genType(0.693147180559945309417232121458176568); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ten() + { + return genType(2.30258509299404568401799145468436421); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ln_two() + { + return genType(-0.3665129205816643); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType third() + { + return genType(0.3333333333333333333333333333333333333333); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_thirds() + { + return genType(0.666666666666666666666666666666666666667); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType golden_ratio() + { + return genType(1.61803398874989484820458683436563811); + } + +} //namespace glm diff --git a/includes/glm/gtc/epsilon.hpp b/includes/glm/gtc/epsilon.hpp new file mode 100644 index 0000000..640439b --- /dev/null +++ b/includes/glm/gtc/epsilon.hpp @@ -0,0 +1,60 @@ +/// @ref gtc_epsilon +/// @file glm/gtc/epsilon.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_epsilon GLM_GTC_epsilon +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Comparison functions for a user defined epsilon values. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_epsilon extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_epsilon + /// @{ + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL vec epsilonEqual(vec const& x, vec const& y, T const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon); + + /// Returns the component-wise comparison of |x - y| < epsilon. + /// True if this expression is not satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon); + + /// Returns the component-wise comparison of |x - y| >= epsilon. + /// True if this expression is not satisfied. + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon); + + /// @} +}//namespace glm + +#include "epsilon.inl" diff --git a/includes/glm/gtc/epsilon.inl b/includes/glm/gtc/epsilon.inl new file mode 100644 index 0000000..508b9f8 --- /dev/null +++ b/includes/glm/gtc/epsilon.inl @@ -0,0 +1,80 @@ +/// @ref gtc_epsilon + +// Dependency: +#include "../vector_relational.hpp" +#include "../common.hpp" + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER bool epsilonEqual + ( + float const& x, + float const& y, + float const& epsilon + ) + { + return abs(x - y) < epsilon; + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonEqual + ( + double const& x, + double const& y, + double const& epsilon + ) + { + return abs(x - y) < epsilon; + } + + template + GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, T const& epsilon) + { + return lessThan(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, vec const& epsilon) + { + return lessThan(abs(x - y), vec(epsilon)); + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonNotEqual(float const& x, float const& y, float const& epsilon) + { + return abs(x - y) >= epsilon; + } + + template<> + GLM_FUNC_QUALIFIER bool epsilonNotEqual(double const& x, double const& y, double const& epsilon) + { + return abs(x - y) >= epsilon; + } + + template + GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon) + { + return greaterThanEqual(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, vec const& epsilon) + { + return greaterThanEqual(abs(x - y), vec(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonEqual(qua const& x, qua const& y, T const& epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return lessThan(abs(v), vec<4, T, Q>(epsilon)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonNotEqual(qua const& x, qua const& y, T const& epsilon) + { + vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); + return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); + } +}//namespace glm diff --git a/includes/glm/gtc/integer.hpp b/includes/glm/gtc/integer.hpp new file mode 100644 index 0000000..cff08dc --- /dev/null +++ b/includes/glm/gtc/integer.hpp @@ -0,0 +1,43 @@ +/// @ref gtc_integer +/// @file glm/gtc/integer.hpp +/// +/// @see core (dependence) +/// @see gtc_integer (dependence) +/// +/// @defgroup gtc_integer GLM_GTC_integer +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../common.hpp" +#include "../integer.hpp" +#include "../exponential.hpp" +#include "../ext/scalar_common.hpp" +#include "../ext/vector_common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_integer + /// @{ + + /// Returns the log2 of x for integer values. Useful to compute mipmap count from the texture size. + /// @see gtc_integer + template + GLM_FUNC_DECL vec log2(vec const& v); + + /// @} +} //namespace glm + +#include "integer.inl" diff --git a/includes/glm/gtc/integer.inl b/includes/glm/gtc/integer.inl new file mode 100644 index 0000000..5f66dfe --- /dev/null +++ b/includes/glm/gtc/integer.inl @@ -0,0 +1,33 @@ +/// @ref gtc_integer + +namespace glm{ +namespace detail +{ + template + struct compute_log2 + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + //Equivalent to return findMSB(vec); but save one function call in ASM with VC + //return findMSB(vec); + return vec(detail::compute_findMSB_vec::call(v)); + } + }; + +# if GLM_HAS_BITSCAN_WINDOWS + template + struct compute_log2<4, int, Q, false, Aligned> + { + GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) + { + vec<4, int, Q> Result; + _BitScanReverse(reinterpret_cast(&Result.x), v.x); + _BitScanReverse(reinterpret_cast(&Result.y), v.y); + _BitScanReverse(reinterpret_cast(&Result.z), v.z); + _BitScanReverse(reinterpret_cast(&Result.w), v.w); + return Result; + } + }; +# endif//GLM_HAS_BITSCAN_WINDOWS +}//namespace detail +}//namespace glm diff --git a/includes/glm/gtc/matrix_access.hpp b/includes/glm/gtc/matrix_access.hpp new file mode 100644 index 0000000..4935ba7 --- /dev/null +++ b/includes/glm/gtc/matrix_access.hpp @@ -0,0 +1,60 @@ +/// @ref gtc_matrix_access +/// @file glm/gtc/matrix_access.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_access GLM_GTC_matrix_access +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines functions to access rows or columns of a matrix easily. + +#pragma once + +// Dependency: +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_access extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_access + /// @{ + + /// Get a specific row of a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL typename genType::row_type row( + genType const& m, + length_t index); + + /// Set a specific row to a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL genType row( + genType const& m, + length_t index, + typename genType::row_type const& x); + + /// Get a specific column of a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL typename genType::col_type column( + genType const& m, + length_t index); + + /// Set a specific column to a matrix. + /// @see gtc_matrix_access + template + GLM_FUNC_DECL genType column( + genType const& m, + length_t index, + typename genType::col_type const& x); + + /// @} +}//namespace glm + +#include "matrix_access.inl" diff --git a/includes/glm/gtc/matrix_access.inl b/includes/glm/gtc/matrix_access.inl new file mode 100644 index 0000000..09fcc10 --- /dev/null +++ b/includes/glm/gtc/matrix_access.inl @@ -0,0 +1,62 @@ +/// @ref gtc_matrix_access + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType row + ( + genType const& m, + length_t index, + typename genType::row_type const& x + ) + { + assert(index >= 0 && index < m[0].length()); + + genType Result = m; + for(length_t i = 0; i < m.length(); ++i) + Result[i][index] = x[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER typename genType::row_type row + ( + genType const& m, + length_t index + ) + { + assert(index >= 0 && index < m[0].length()); + + typename genType::row_type Result(0); + for(length_t i = 0; i < m.length(); ++i) + Result[i] = m[i][index]; + return Result; + } + + template + GLM_FUNC_QUALIFIER genType column + ( + genType const& m, + length_t index, + typename genType::col_type const& x + ) + { + assert(index >= 0 && index < m.length()); + + genType Result = m; + Result[index] = x; + return Result; + } + + template + GLM_FUNC_QUALIFIER typename genType::col_type column + ( + genType const& m, + length_t index + ) + { + assert(index >= 0 && index < m.length()); + + return m[index]; + } +}//namespace glm diff --git a/includes/glm/gtc/matrix_integer.hpp b/includes/glm/gtc/matrix_integer.hpp new file mode 100644 index 0000000..d7ebdc7 --- /dev/null +++ b/includes/glm/gtc/matrix_integer.hpp @@ -0,0 +1,433 @@ +/// @ref gtc_matrix_integer +/// @file glm/gtc/matrix_integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_integer GLM_GTC_matrix_integer +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines a number of matrices with integer types. + +#pragma once + +// Dependency: +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_integer + /// @{ + + /// High-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, highp> highp_imat2; + + /// High-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, highp> highp_imat3; + + /// High-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, highp> highp_imat4; + + /// High-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, highp> highp_imat2x2; + + /// High-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, highp> highp_imat2x3; + + /// High-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, highp> highp_imat2x4; + + /// High-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, highp> highp_imat3x2; + + /// High-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, highp> highp_imat3x3; + + /// High-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, highp> highp_imat3x4; + + /// High-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, highp> highp_imat4x2; + + /// High-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, highp> highp_imat4x3; + + /// High-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, highp> highp_imat4x4; + + + /// Medium-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, mediump> mediump_imat2; + + /// Medium-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, mediump> mediump_imat3; + + /// Medium-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, mediump> mediump_imat4; + + + /// Medium-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, mediump> mediump_imat2x2; + + /// Medium-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, mediump> mediump_imat2x3; + + /// Medium-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, mediump> mediump_imat2x4; + + /// Medium-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, mediump> mediump_imat3x2; + + /// Medium-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, mediump> mediump_imat3x3; + + /// Medium-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, mediump> mediump_imat3x4; + + /// Medium-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, mediump> mediump_imat4x2; + + /// Medium-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, mediump> mediump_imat4x3; + + /// Medium-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, mediump> mediump_imat4x4; + + + /// Low-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, lowp> lowp_imat2; + + /// Low-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, lowp> lowp_imat3; + + /// Low-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, lowp> lowp_imat4; + + + /// Low-qualifier signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, lowp> lowp_imat2x2; + + /// Low-qualifier signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, lowp> lowp_imat2x3; + + /// Low-qualifier signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, lowp> lowp_imat2x4; + + /// Low-qualifier signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, lowp> lowp_imat3x2; + + /// Low-qualifier signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, lowp> lowp_imat3x3; + + /// Low-qualifier signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, lowp> lowp_imat3x4; + + /// Low-qualifier signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, lowp> lowp_imat4x2; + + /// Low-qualifier signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, lowp> lowp_imat4x3; + + /// Low-qualifier signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, lowp> lowp_imat4x4; + + + /// High-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, highp> highp_umat2; + + /// High-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, highp> highp_umat3; + + /// High-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, highp> highp_umat4; + + /// High-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, highp> highp_umat2x2; + + /// High-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, highp> highp_umat2x3; + + /// High-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, highp> highp_umat2x4; + + /// High-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, highp> highp_umat3x2; + + /// High-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, highp> highp_umat3x3; + + /// High-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, highp> highp_umat3x4; + + /// High-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, highp> highp_umat4x2; + + /// High-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, highp> highp_umat4x3; + + /// High-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, highp> highp_umat4x4; + + + /// Medium-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, mediump> mediump_umat2; + + /// Medium-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, mediump> mediump_umat3; + + /// Medium-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, mediump> mediump_umat4; + + + /// Medium-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, mediump> mediump_umat2x2; + + /// Medium-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, mediump> mediump_umat2x3; + + /// Medium-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, mediump> mediump_umat2x4; + + /// Medium-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, mediump> mediump_umat3x2; + + /// Medium-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, mediump> mediump_umat3x3; + + /// Medium-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, mediump> mediump_umat3x4; + + /// Medium-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, mediump> mediump_umat4x2; + + /// Medium-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, mediump> mediump_umat4x3; + + /// Medium-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, mediump> mediump_umat4x4; + + + /// Low-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, lowp> lowp_umat2; + + /// Low-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, lowp> lowp_umat3; + + /// Low-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, lowp> lowp_umat4; + + + /// Low-qualifier unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, lowp> lowp_umat2x2; + + /// Low-qualifier unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, lowp> lowp_umat2x3; + + /// Low-qualifier unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, lowp> lowp_umat2x4; + + /// Low-qualifier unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, lowp> lowp_umat3x2; + + /// Low-qualifier unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, lowp> lowp_umat3x3; + + /// Low-qualifier unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, lowp> lowp_umat3x4; + + /// Low-qualifier unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, lowp> lowp_umat4x2; + + /// Low-qualifier unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, lowp> lowp_umat4x3; + + /// Low-qualifier unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, lowp> lowp_umat4x4; + + + + /// Signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, defaultp> imat2; + + /// Signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, defaultp> imat3; + + /// Signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, defaultp> imat4; + + /// Signed integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, int, defaultp> imat2x2; + + /// Signed integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, int, defaultp> imat2x3; + + /// Signed integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, int, defaultp> imat2x4; + + /// Signed integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, int, defaultp> imat3x2; + + /// Signed integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, int, defaultp> imat3x3; + + /// Signed integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, int, defaultp> imat3x4; + + /// Signed integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, int, defaultp> imat4x2; + + /// Signed integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, int, defaultp> imat4x3; + + /// Signed integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, int, defaultp> imat4x4; + + + + /// Unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, defaultp> umat2; + + /// Unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, defaultp> umat3; + + /// Unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, defaultp> umat4; + + /// Unsigned integer 2x2 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 2, uint, defaultp> umat2x2; + + /// Unsigned integer 2x3 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 3, uint, defaultp> umat2x3; + + /// Unsigned integer 2x4 matrix. + /// @see gtc_matrix_integer + typedef mat<2, 4, uint, defaultp> umat2x4; + + /// Unsigned integer 3x2 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 2, uint, defaultp> umat3x2; + + /// Unsigned integer 3x3 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 3, uint, defaultp> umat3x3; + + /// Unsigned integer 3x4 matrix. + /// @see gtc_matrix_integer + typedef mat<3, 4, uint, defaultp> umat3x4; + + /// Unsigned integer 4x2 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 2, uint, defaultp> umat4x2; + + /// Unsigned integer 4x3 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 3, uint, defaultp> umat4x3; + + /// Unsigned integer 4x4 matrix. + /// @see gtc_matrix_integer + typedef mat<4, 4, uint, defaultp> umat4x4; + + /// @} +}//namespace glm diff --git a/includes/glm/gtc/matrix_inverse.hpp b/includes/glm/gtc/matrix_inverse.hpp new file mode 100644 index 0000000..75d53f2 --- /dev/null +++ b/includes/glm/gtc/matrix_inverse.hpp @@ -0,0 +1,50 @@ +/// @ref gtc_matrix_inverse +/// @file glm/gtc/matrix_inverse.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_matrix_inverse GLM_GTC_matrix_inverse +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines additional matrix inverting functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../matrix.hpp" +#include "../mat2x2.hpp" +#include "../mat3x3.hpp" +#include "../mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_inverse extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_matrix_inverse + /// @{ + + /// Fast matrix inverse for affine matrix. + /// + /// @param m Input matrix to invert. + /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly inaccurate. + /// @see gtc_matrix_inverse + template + GLM_FUNC_DECL genType affineInverse(genType const& m); + + /// Compute the inverse transpose of a matrix. + /// + /// @param m Input matrix to invert transpose. + /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly inaccurate. + /// @see gtc_matrix_inverse + template + GLM_FUNC_DECL genType inverseTranspose(genType const& m); + + /// @} +}//namespace glm + +#include "matrix_inverse.inl" diff --git a/includes/glm/gtc/matrix_inverse.inl b/includes/glm/gtc/matrix_inverse.inl new file mode 100644 index 0000000..c004b9e --- /dev/null +++ b/includes/glm/gtc/matrix_inverse.inl @@ -0,0 +1,118 @@ +/// @ref gtc_matrix_inverse + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> affineInverse(mat<3, 3, T, Q> const& m) + { + mat<2, 2, T, Q> const Inv(inverse(mat<2, 2, T, Q>(m))); + + return mat<3, 3, T, Q>( + vec<3, T, Q>(Inv[0], static_cast(0)), + vec<3, T, Q>(Inv[1], static_cast(0)), + vec<3, T, Q>(-Inv * vec<2, T, Q>(m[2]), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> affineInverse(mat<4, 4, T, Q> const& m) + { + mat<3, 3, T, Q> const Inv(inverse(mat<3, 3, T, Q>(m))); + + return mat<4, 4, T, Q>( + vec<4, T, Q>(Inv[0], static_cast(0)), + vec<4, T, Q>(Inv[1], static_cast(0)), + vec<4, T, Q>(Inv[2], static_cast(0)), + vec<4, T, Q>(-Inv * vec<3, T, Q>(m[3]), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> inverseTranspose(mat<2, 2, T, Q> const& m) + { + T Determinant = m[0][0] * m[1][1] - m[1][0] * m[0][1]; + + mat<2, 2, T, Q> Inverse( + + m[1][1] / Determinant, + - m[0][1] / Determinant, + - m[1][0] / Determinant, + + m[0][0] / Determinant); + + return Inverse; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> inverseTranspose(mat<3, 3, T, Q> const& m) + { + T Determinant = + + m[0][0] * (m[1][1] * m[2][2] - m[1][2] * m[2][1]) + - m[0][1] * (m[1][0] * m[2][2] - m[1][2] * m[2][0]) + + m[0][2] * (m[1][0] * m[2][1] - m[1][1] * m[2][0]); + + mat<3, 3, T, Q> Inverse; + Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]); + Inverse[0][1] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]); + Inverse[0][2] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]); + Inverse[1][0] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]); + Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]); + Inverse[1][2] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]); + Inverse[2][0] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]); + Inverse[2][1] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]); + Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]); + Inverse /= Determinant; + + return Inverse; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> inverseTranspose(mat<4, 4, T, Q> const& m) + { + T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + T SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + T SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + T SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + T SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + T SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + T SubFactor11 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + T SubFactor12 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + T SubFactor13 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + T SubFactor14 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + T SubFactor15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + T SubFactor16 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + T SubFactor17 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + mat<4, 4, T, Q> Inverse; + Inverse[0][0] = + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02); + Inverse[0][1] = - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04); + Inverse[0][2] = + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05); + Inverse[0][3] = - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05); + + Inverse[1][0] = - (m[0][1] * SubFactor00 - m[0][2] * SubFactor01 + m[0][3] * SubFactor02); + Inverse[1][1] = + (m[0][0] * SubFactor00 - m[0][2] * SubFactor03 + m[0][3] * SubFactor04); + Inverse[1][2] = - (m[0][0] * SubFactor01 - m[0][1] * SubFactor03 + m[0][3] * SubFactor05); + Inverse[1][3] = + (m[0][0] * SubFactor02 - m[0][1] * SubFactor04 + m[0][2] * SubFactor05); + + Inverse[2][0] = + (m[0][1] * SubFactor06 - m[0][2] * SubFactor07 + m[0][3] * SubFactor08); + Inverse[2][1] = - (m[0][0] * SubFactor06 - m[0][2] * SubFactor09 + m[0][3] * SubFactor10); + Inverse[2][2] = + (m[0][0] * SubFactor07 - m[0][1] * SubFactor09 + m[0][3] * SubFactor11); + Inverse[2][3] = - (m[0][0] * SubFactor08 - m[0][1] * SubFactor10 + m[0][2] * SubFactor11); + + Inverse[3][0] = - (m[0][1] * SubFactor12 - m[0][2] * SubFactor13 + m[0][3] * SubFactor14); + Inverse[3][1] = + (m[0][0] * SubFactor12 - m[0][2] * SubFactor15 + m[0][3] * SubFactor16); + Inverse[3][2] = - (m[0][0] * SubFactor13 - m[0][1] * SubFactor15 + m[0][3] * SubFactor17); + Inverse[3][3] = + (m[0][0] * SubFactor14 - m[0][1] * SubFactor16 + m[0][2] * SubFactor17); + + T Determinant = + + m[0][0] * Inverse[0][0] + + m[0][1] * Inverse[0][1] + + m[0][2] * Inverse[0][2] + + m[0][3] * Inverse[0][3]; + + Inverse /= Determinant; + + return Inverse; + } +}//namespace glm diff --git a/includes/glm/gtc/matrix_transform.hpp b/includes/glm/gtc/matrix_transform.hpp new file mode 100644 index 0000000..612418f --- /dev/null +++ b/includes/glm/gtc/matrix_transform.hpp @@ -0,0 +1,36 @@ +/// @ref gtc_matrix_transform +/// @file glm/gtc/matrix_transform.hpp +/// +/// @see core (dependence) +/// @see gtx_transform +/// @see gtx_transform2 +/// +/// @defgroup gtc_matrix_transform GLM_GTC_matrix_transform +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines functions that generate common transformation matrices. +/// +/// The matrices generated by this extension use standard OpenGL fixed-function +/// conventions. For example, the lookAt function generates a transform from world +/// space into the specific eye space that the projective matrix functions +/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility +/// specifications defines the particular layout of this eye space. + +#pragma once + +// Dependencies +#include "../mat4x4.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../ext/matrix_projection.hpp" +#include "../ext/matrix_clip_space.hpp" +#include "../ext/matrix_transform.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_matrix_transform extension included") +#endif + +#include "matrix_transform.inl" diff --git a/includes/glm/gtc/matrix_transform.inl b/includes/glm/gtc/matrix_transform.inl new file mode 100644 index 0000000..15b46bc --- /dev/null +++ b/includes/glm/gtc/matrix_transform.inl @@ -0,0 +1,3 @@ +#include "../geometric.hpp" +#include "../trigonometric.hpp" +#include "../matrix.hpp" diff --git a/includes/glm/gtc/noise.hpp b/includes/glm/gtc/noise.hpp new file mode 100644 index 0000000..ab1772e --- /dev/null +++ b/includes/glm/gtc/noise.hpp @@ -0,0 +1,61 @@ +/// @ref gtc_noise +/// @file glm/gtc/noise.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_noise GLM_GTC_noise +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines 2D, 3D and 4D procedural noise functions +/// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": +/// https://github.com/ashima/webgl-noise +/// Following Stefan Gustavson's paper "Simplex noise demystified": +/// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_noise.hpp" +#include "../geometric.hpp" +#include "../common.hpp" +#include "../vector_relational.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_noise extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_noise + /// @{ + + /// Classic perlin noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T perlin( + vec const& p); + + /// Periodic perlin noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T perlin( + vec const& p, + vec const& rep); + + /// Simplex noise. + /// @see gtc_noise + template + GLM_FUNC_DECL T simplex( + vec const& p); + + /// @} +}//namespace glm + +#include "noise.inl" diff --git a/includes/glm/gtc/noise.inl b/includes/glm/gtc/noise.inl new file mode 100644 index 0000000..a1cf399 --- /dev/null +++ b/includes/glm/gtc/noise.inl @@ -0,0 +1,807 @@ +/// @ref gtc_noise +/// +// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": +// https://github.com/ashima/webgl-noise +// Following Stefan Gustavson's paper "Simplex noise demystified": +// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER vec<4, T, Q> grad4(T const& j, vec<4, T, Q> const& ip) + { + vec<3, T, Q> pXYZ = floor(fract(vec<3, T, Q>(j) * vec<3, T, Q>(ip)) * T(7)) * ip[2] - T(1); + T pW = static_cast(1.5) - dot(abs(pXYZ), vec<3, T, Q>(1)); + vec<4, T, Q> s = vec<4, T, Q>(lessThan(vec<4, T, Q>(pXYZ, pW), vec<4, T, Q>(0.0))); + pXYZ = pXYZ + (vec<3, T, Q>(s) * T(2) - T(1)) * s.w; + return vec<4, T, Q>(pXYZ, pW); + } +}//namespace detail + + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position) + { + vec<4, T, Q> Pi = glm::floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + vec<4, T, Q> Pf = glm::fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation + vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); + vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); + vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); + vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); + + vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); + + vec<4, T, Q> gx = static_cast(2) * glm::fract(i / T(41)) - T(1); + vec<4, T, Q> gy = glm::abs(gx) - T(0.5); + vec<4, T, Q> tx = glm::floor(gx + T(0.5)); + gx = gx - tx; + + vec<2, T, Q> g00(gx.x, gy.x); + vec<2, T, Q> g10(gx.y, gy.y); + vec<2, T, Q> g01(gx.z, gy.z); + vec<2, T, Q> g11(gx.w, gy.w); + + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); + T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); + T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); + T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); + + vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); + vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); + T n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return T(2.3) * n_xy; + } + + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position) + { + vec<3, T, Q> Pi0 = floor(Position); // Integer part for indexing + vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = detail::mod289(Pi0); + Pi1 = detail::mod289(Pi1); + vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(vec<2, T, Q>(Pi0.y), vec<2, T, Q>(Pi1.y)); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 * T(1.0 / 7.0); + vec<4, T, Q> gy0 = fract(floor(gx0) * T(1.0 / 7.0)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); + gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); + gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 * T(1.0 / 7.0); + vec<4, T, Q> gy1 = fract(floor(gx1) * T(1.0 / 7.0)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = detail::fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + /* + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& P) + { + vec<3, T, Q> Pi0 = floor(P); // Integer part for indexing + vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = mod(Pi0, T(289)); + Pi1 = mod(Pi1, T(289)); + vec<3, T, Q> Pf0 = fract(P); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = permute(permute(ix) + iy); + vec<4, T, Q> ixy0 = permute(ixy + iz0); + vec<4, T, Q> ixy1 = permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 / T(7); + vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); + gx0 -= sz0 * (step(0.0, gx0) - T(0.5)); + gy0 -= sz0 * (step(0.0, gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 / T(7); + vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix( + vec<2, T, Q>(n_z.x, n_z.y), + vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + */ + // Classic Perlin noise + template + GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position) + { + vec<4, T, Q> Pi0 = floor(Position); // Integer part for indexing + vec<4, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 + Pi0 = mod(Pi0, vec<4, T, Q>(289)); + Pi1 = mod(Pi1, vec<4, T, Q>(289)); + vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + vec<4, T, Q> iw0(Pi0.w); + vec<4, T, Q> iw1(Pi1.w); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); + vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); + vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); + vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); + + vec<4, T, Q> gx00 = ixy00 / T(7); + vec<4, T, Q> gy00 = floor(gx00) / T(7); + vec<4, T, Q> gz00 = floor(gy00) / T(6); + gx00 = fract(gx00) - T(0.5); + gy00 = fract(gy00) - T(0.5); + gz00 = fract(gz00) - T(0.5); + vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); + vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0.0)); + gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); + gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); + + vec<4, T, Q> gx01 = ixy01 / T(7); + vec<4, T, Q> gy01 = floor(gx01) / T(7); + vec<4, T, Q> gz01 = floor(gy01) / T(6); + gx01 = fract(gx01) - T(0.5); + gy01 = fract(gy01) - T(0.5); + gz01 = fract(gz01) - T(0.5); + vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); + vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); + gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); + gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); + + vec<4, T, Q> gx10 = ixy10 / T(7); + vec<4, T, Q> gy10 = floor(gx10) / T(7); + vec<4, T, Q> gz10 = floor(gy10) / T(6); + gx10 = fract(gx10) - T(0.5); + gy10 = fract(gy10) - T(0.5); + gz10 = fract(gz10) - T(0.5); + vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); + vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0)); + gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); + gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); + + vec<4, T, Q> gx11 = ixy11 / T(7); + vec<4, T, Q> gy11 = floor(gx11) / T(7); + vec<4, T, Q> gz11 = floor(gy11) / T(6); + gx11 = fract(gx11) - T(0.5); + gy11 = fract(gy11) - T(0.5); + gz11 = fract(gz11) - T(0.5); + vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); + vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(0.0)); + gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); + gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); + + vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); + vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); + vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); + vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); + vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); + vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); + vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); + vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); + vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); + vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); + vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); + vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); + vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); + vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); + vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); + vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); + + vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); + g0000 *= norm00.x; + g0100 *= norm00.y; + g1000 *= norm00.z; + g1100 *= norm00.w; + + vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); + g0001 *= norm01.x; + g0101 *= norm01.y; + g1001 *= norm01.z; + g1101 *= norm01.w; + + vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); + g0010 *= norm10.x; + g0110 *= norm10.y; + g1010 *= norm10.z; + g1110 *= norm10.w; + + vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); + g0011 *= norm11.x; + g0111 *= norm11.y; + g1011 *= norm11.z; + g1111 *= norm11.w; + + T n0000 = dot(g0000, Pf0); + T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); + T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); + T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); + T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); + T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); + T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); + T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); + T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); + T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); + T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); + T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); + T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); + T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); + T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); + T n1111 = dot(g1111, Pf1); + + vec<4, T, Q> fade_xyzw = detail::fade(Pf0); + vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); + vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); + vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); + vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); + T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); + return T(2.2) * n_xyzw; + } + + // Classic Perlin noise, periodic variant + template + GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position, vec<2, T, Q> const& rep) + { + vec<4, T, Q> Pi = floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + vec<4, T, Q> Pf = fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); + Pi = mod(Pi, vec<4, T, Q>(rep.x, rep.y, rep.x, rep.y)); // To create noise with explicit period + Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation + vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); + vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); + vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); + vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); + + vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); + + vec<4, T, Q> gx = static_cast(2) * fract(i / T(41)) - T(1); + vec<4, T, Q> gy = abs(gx) - T(0.5); + vec<4, T, Q> tx = floor(gx + T(0.5)); + gx = gx - tx; + + vec<2, T, Q> g00(gx.x, gy.x); + vec<2, T, Q> g10(gx.y, gy.y); + vec<2, T, Q> g01(gx.z, gy.z); + vec<2, T, Q> g11(gx.w, gy.w); + + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); + g00 *= norm.x; + g01 *= norm.y; + g10 *= norm.z; + g11 *= norm.w; + + T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); + T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); + T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); + T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); + + vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); + vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); + T n_xy = mix(n_x.x, n_x.y, fade_xy.y); + return T(2.3) * n_xy; + } + + // Classic Perlin noise, periodic variant + template + GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position, vec<3, T, Q> const& rep) + { + vec<3, T, Q> Pi0 = mod(floor(Position), rep); // Integer part, modulo period + vec<3, T, Q> Pi1 = mod(Pi0 + vec<3, T, Q>(T(1)), rep); // Integer part + 1, mod period + Pi0 = mod(Pi0, vec<3, T, Q>(289)); + Pi1 = mod(Pi1, vec<3, T, Q>(289)); + vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<3, T, Q> Pf1 = Pf0 - vec<3, T, Q>(T(1)); // Fractional part - 1.0 + vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + + vec<4, T, Q> gx0 = ixy0 / T(7); + vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); + gx0 = fract(gx0); + vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); + vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0)); + gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); + gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); + + vec<4, T, Q> gx1 = ixy1 / T(7); + vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); + gx1 = fract(gx1); + vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); + vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(T(0))); + gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); + gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); + + vec<3, T, Q> g000 = vec<3, T, Q>(gx0.x, gy0.x, gz0.x); + vec<3, T, Q> g100 = vec<3, T, Q>(gx0.y, gy0.y, gz0.y); + vec<3, T, Q> g010 = vec<3, T, Q>(gx0.z, gy0.z, gz0.z); + vec<3, T, Q> g110 = vec<3, T, Q>(gx0.w, gy0.w, gz0.w); + vec<3, T, Q> g001 = vec<3, T, Q>(gx1.x, gy1.x, gz1.x); + vec<3, T, Q> g101 = vec<3, T, Q>(gx1.y, gy1.y, gz1.y); + vec<3, T, Q> g011 = vec<3, T, Q>(gx1.z, gy1.z, gz1.z); + vec<3, T, Q> g111 = vec<3, T, Q>(gx1.w, gy1.w, gz1.w); + + vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); + g000 *= norm0.x; + g010 *= norm0.y; + g100 *= norm0.z; + g110 *= norm0.w; + vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); + g001 *= norm1.x; + g011 *= norm1.y; + g101 *= norm1.z; + g111 *= norm1.w; + + T n000 = dot(g000, Pf0); + T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); + T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); + T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); + T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); + T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); + T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); + T n111 = dot(g111, Pf1); + + vec<3, T, Q> fade_xyz = detail::fade(Pf0); + vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); + vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); + T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); + return T(2.2) * n_xyz; + } + + // Classic Perlin noise, periodic version + template + GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position, vec<4, T, Q> const& rep) + { + vec<4, T, Q> Pi0 = mod(floor(Position), rep); // Integer part modulo rep + vec<4, T, Q> Pi1 = mod(Pi0 + T(1), rep); // Integer part + 1 mod rep + vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation + vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 + vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); + vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); + vec<4, T, Q> iz0(Pi0.z); + vec<4, T, Q> iz1(Pi1.z); + vec<4, T, Q> iw0(Pi0.w); + vec<4, T, Q> iw1(Pi1.w); + + vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); + vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); + vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); + vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); + vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); + vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); + vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); + + vec<4, T, Q> gx00 = ixy00 / T(7); + vec<4, T, Q> gy00 = floor(gx00) / T(7); + vec<4, T, Q> gz00 = floor(gy00) / T(6); + gx00 = fract(gx00) - T(0.5); + gy00 = fract(gy00) - T(0.5); + gz00 = fract(gz00) - T(0.5); + vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); + vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0)); + gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); + gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); + + vec<4, T, Q> gx01 = ixy01 / T(7); + vec<4, T, Q> gy01 = floor(gx01) / T(7); + vec<4, T, Q> gz01 = floor(gy01) / T(6); + gx01 = fract(gx01) - T(0.5); + gy01 = fract(gy01) - T(0.5); + gz01 = fract(gz01) - T(0.5); + vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); + vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); + gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); + gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); + + vec<4, T, Q> gx10 = ixy10 / T(7); + vec<4, T, Q> gy10 = floor(gx10) / T(7); + vec<4, T, Q> gz10 = floor(gy10) / T(6); + gx10 = fract(gx10) - T(0.5); + gy10 = fract(gy10) - T(0.5); + gz10 = fract(gz10) - T(0.5); + vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); + vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0.0)); + gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); + gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); + + vec<4, T, Q> gx11 = ixy11 / T(7); + vec<4, T, Q> gy11 = floor(gx11) / T(7); + vec<4, T, Q> gz11 = floor(gy11) / T(6); + gx11 = fract(gx11) - T(0.5); + gy11 = fract(gy11) - T(0.5); + gz11 = fract(gz11) - T(0.5); + vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); + vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(T(0))); + gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); + gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); + + vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); + vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); + vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); + vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); + vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); + vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); + vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); + vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); + vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); + vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); + vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); + vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); + vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); + vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); + vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); + vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); + + vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); + g0000 *= norm00.x; + g0100 *= norm00.y; + g1000 *= norm00.z; + g1100 *= norm00.w; + + vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); + g0001 *= norm01.x; + g0101 *= norm01.y; + g1001 *= norm01.z; + g1101 *= norm01.w; + + vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); + g0010 *= norm10.x; + g0110 *= norm10.y; + g1010 *= norm10.z; + g1110 *= norm10.w; + + vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); + g0011 *= norm11.x; + g0111 *= norm11.y; + g1011 *= norm11.z; + g1111 *= norm11.w; + + T n0000 = dot(g0000, Pf0); + T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); + T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); + T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); + T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); + T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); + T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); + T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); + T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); + T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); + T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); + T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); + T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); + T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); + T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); + T n1111 = dot(g1111, Pf1); + + vec<4, T, Q> fade_xyzw = detail::fade(Pf0); + vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); + vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); + vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); + vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); + T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); + return T(2.2) * n_xyzw; + } + + template + GLM_FUNC_QUALIFIER T simplex(glm::vec<2, T, Q> const& v) + { + vec<4, T, Q> const C = vec<4, T, Q>( + T( 0.211324865405187), // (3.0 - sqrt(3.0)) / 6.0 + T( 0.366025403784439), // 0.5 * (sqrt(3.0) - 1.0) + T(-0.577350269189626), // -1.0 + 2.0 * C.x + T( 0.024390243902439)); // 1.0 / 41.0 + + // First corner + vec<2, T, Q> i = floor(v + dot(v, vec<2, T, Q>(C[1]))); + vec<2, T, Q> x0 = v - i + dot(i, vec<2, T, Q>(C[0])); + + // Other corners + //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 + //i1.y = 1.0 - i1.x; + vec<2, T, Q> i1 = (x0.x > x0.y) ? vec<2, T, Q>(1, 0) : vec<2, T, Q>(0, 1); + // x0 = x0 - 0.0 + 0.0 * C.xx ; + // x1 = x0 - i1 + 1.0 * C.xx ; + // x2 = x0 - 1.0 + 2.0 * C.xx ; + vec<4, T, Q> x12 = vec<4, T, Q>(x0.x, x0.y, x0.x, x0.y) + vec<4, T, Q>(C.x, C.x, C.z, C.z); + x12 = vec<4, T, Q>(vec<2, T, Q>(x12) - i1, x12.z, x12.w); + + // Permutations + i = mod(i, vec<2, T, Q>(289)); // Avoid truncation effects in permutation + vec<3, T, Q> p = detail::permute( + detail::permute(i.y + vec<3, T, Q>(T(0), i1.y, T(1))) + + i.x + vec<3, T, Q>(T(0), i1.x, T(1))); + + vec<3, T, Q> m = max(vec<3, T, Q>(0.5) - vec<3, T, Q>( + dot(x0, x0), + dot(vec<2, T, Q>(x12.x, x12.y), vec<2, T, Q>(x12.x, x12.y)), + dot(vec<2, T, Q>(x12.z, x12.w), vec<2, T, Q>(x12.z, x12.w))), vec<3, T, Q>(0)); + m = m * m ; + m = m * m ; + + // Gradients: 41 points uniformly over a line, mapped onto a diamond. + // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) + + vec<3, T, Q> x = static_cast(2) * fract(p * C.w) - T(1); + vec<3, T, Q> h = abs(x) - T(0.5); + vec<3, T, Q> ox = floor(x + T(0.5)); + vec<3, T, Q> a0 = x - ox; + + // Normalise gradients implicitly by scaling m + // Inlined for speed: m *= taylorInvSqrt( a0*a0 + h*h ); + m *= static_cast(1.79284291400159) - T(0.85373472095314) * (a0 * a0 + h * h); + + // Compute final noise value at P + vec<3, T, Q> g; + g.x = a0.x * x0.x + h.x * x0.y; + //g.yz = a0.yz * x12.xz + h.yz * x12.yw; + g.y = a0.y * x12.x + h.y * x12.y; + g.z = a0.z * x12.z + h.z * x12.w; + return T(130) * dot(m, g); + } + + template + GLM_FUNC_QUALIFIER T simplex(vec<3, T, Q> const& v) + { + vec<2, T, Q> const C(1.0 / 6.0, 1.0 / 3.0); + vec<4, T, Q> const D(0.0, 0.5, 1.0, 2.0); + + // First corner + vec<3, T, Q> i(floor(v + dot(v, vec<3, T, Q>(C.y)))); + vec<3, T, Q> x0(v - i + dot(i, vec<3, T, Q>(C.x))); + + // Other corners + vec<3, T, Q> g(step(vec<3, T, Q>(x0.y, x0.z, x0.x), x0)); + vec<3, T, Q> l(T(1) - g); + vec<3, T, Q> i1(min(g, vec<3, T, Q>(l.z, l.x, l.y))); + vec<3, T, Q> i2(max(g, vec<3, T, Q>(l.z, l.x, l.y))); + + // x0 = x0 - 0.0 + 0.0 * C.xxx; + // x1 = x0 - i1 + 1.0 * C.xxx; + // x2 = x0 - i2 + 2.0 * C.xxx; + // x3 = x0 - 1.0 + 3.0 * C.xxx; + vec<3, T, Q> x1(x0 - i1 + C.x); + vec<3, T, Q> x2(x0 - i2 + C.y); // 2.0*C.x = 1/3 = C.y + vec<3, T, Q> x3(x0 - D.y); // -1.0+3.0*C.x = -0.5 = -D.y + + // Permutations + i = detail::mod289(i); + vec<4, T, Q> p(detail::permute(detail::permute(detail::permute( + i.z + vec<4, T, Q>(T(0), i1.z, i2.z, T(1))) + + i.y + vec<4, T, Q>(T(0), i1.y, i2.y, T(1))) + + i.x + vec<4, T, Q>(T(0), i1.x, i2.x, T(1)))); + + // Gradients: 7x7 points over a square, mapped onto an octahedron. + // The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294) + T n_ = static_cast(0.142857142857); // 1.0/7.0 + vec<3, T, Q> ns(n_ * vec<3, T, Q>(D.w, D.y, D.z) - vec<3, T, Q>(D.x, D.z, D.x)); + + vec<4, T, Q> j(p - T(49) * floor(p * ns.z * ns.z)); // mod(p,7*7) + + vec<4, T, Q> x_(floor(j * ns.z)); + vec<4, T, Q> y_(floor(j - T(7) * x_)); // mod(j,N) + + vec<4, T, Q> x(x_ * ns.x + ns.y); + vec<4, T, Q> y(y_ * ns.x + ns.y); + vec<4, T, Q> h(T(1) - abs(x) - abs(y)); + + vec<4, T, Q> b0(x.x, x.y, y.x, y.y); + vec<4, T, Q> b1(x.z, x.w, y.z, y.w); + + // vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0; + // vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0; + vec<4, T, Q> s0(floor(b0) * T(2) + T(1)); + vec<4, T, Q> s1(floor(b1) * T(2) + T(1)); + vec<4, T, Q> sh(-step(h, vec<4, T, Q>(0.0))); + + vec<4, T, Q> a0 = vec<4, T, Q>(b0.x, b0.z, b0.y, b0.w) + vec<4, T, Q>(s0.x, s0.z, s0.y, s0.w) * vec<4, T, Q>(sh.x, sh.x, sh.y, sh.y); + vec<4, T, Q> a1 = vec<4, T, Q>(b1.x, b1.z, b1.y, b1.w) + vec<4, T, Q>(s1.x, s1.z, s1.y, s1.w) * vec<4, T, Q>(sh.z, sh.z, sh.w, sh.w); + + vec<3, T, Q> p0(a0.x, a0.y, h.x); + vec<3, T, Q> p1(a0.z, a0.w, h.y); + vec<3, T, Q> p2(a1.x, a1.y, h.z); + vec<3, T, Q> p3(a1.z, a1.w, h.w); + + // Normalise gradients + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); + p0 *= norm.x; + p1 *= norm.y; + p2 *= norm.z; + p3 *= norm.w; + + // Mix final noise value + vec<4, T, Q> m = max(T(0.6) - vec<4, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), vec<4, T, Q>(0)); + m = m * m; + return T(42) * dot(m * m, vec<4, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3))); + } + + template + GLM_FUNC_QUALIFIER T simplex(vec<4, T, Q> const& v) + { + vec<4, T, Q> const C( + 0.138196601125011, // (5 - sqrt(5))/20 G4 + 0.276393202250021, // 2 * G4 + 0.414589803375032, // 3 * G4 + -0.447213595499958); // -1 + 4 * G4 + + // (sqrt(5) - 1)/4 = F4, used once below + T const F4 = static_cast(0.309016994374947451); + + // First corner + vec<4, T, Q> i = floor(v + dot(v, vec<4, T, Q>(F4))); + vec<4, T, Q> x0 = v - i + dot(i, vec<4, T, Q>(C.x)); + + // Other corners + + // Rank sorting originally contributed by Bill Licea-Kane, AMD (formerly ATI) + vec<4, T, Q> i0; + vec<3, T, Q> isX = step(vec<3, T, Q>(x0.y, x0.z, x0.w), vec<3, T, Q>(x0.x)); + vec<3, T, Q> isYZ = step(vec<3, T, Q>(x0.z, x0.w, x0.w), vec<3, T, Q>(x0.y, x0.y, x0.z)); + // i0.x = dot(isX, vec3(1.0)); + //i0.x = isX.x + isX.y + isX.z; + //i0.yzw = static_cast(1) - isX; + i0 = vec<4, T, Q>(isX.x + isX.y + isX.z, T(1) - isX); + // i0.y += dot(isYZ.xy, vec2(1.0)); + i0.y += isYZ.x + isYZ.y; + //i0.zw += 1.0 - vec<2, T, Q>(isYZ.x, isYZ.y); + i0.z += static_cast(1) - isYZ.x; + i0.w += static_cast(1) - isYZ.y; + i0.z += isYZ.z; + i0.w += static_cast(1) - isYZ.z; + + // i0 now contains the unique values 0,1,2,3 in each channel + vec<4, T, Q> i3 = clamp(i0, T(0), T(1)); + vec<4, T, Q> i2 = clamp(i0 - T(1), T(0), T(1)); + vec<4, T, Q> i1 = clamp(i0 - T(2), T(0), T(1)); + + // x0 = x0 - 0.0 + 0.0 * C.xxxx + // x1 = x0 - i1 + 0.0 * C.xxxx + // x2 = x0 - i2 + 0.0 * C.xxxx + // x3 = x0 - i3 + 0.0 * C.xxxx + // x4 = x0 - 1.0 + 4.0 * C.xxxx + vec<4, T, Q> x1 = x0 - i1 + C.x; + vec<4, T, Q> x2 = x0 - i2 + C.y; + vec<4, T, Q> x3 = x0 - i3 + C.z; + vec<4, T, Q> x4 = x0 + C.w; + + // Permutations + i = mod(i, vec<4, T, Q>(289)); + T j0 = detail::permute(detail::permute(detail::permute(detail::permute(i.w) + i.z) + i.y) + i.x); + vec<4, T, Q> j1 = detail::permute(detail::permute(detail::permute(detail::permute( + i.w + vec<4, T, Q>(i1.w, i2.w, i3.w, T(1))) + + i.z + vec<4, T, Q>(i1.z, i2.z, i3.z, T(1))) + + i.y + vec<4, T, Q>(i1.y, i2.y, i3.y, T(1))) + + i.x + vec<4, T, Q>(i1.x, i2.x, i3.x, T(1))); + + // Gradients: 7x7x6 points over a cube, mapped onto a 4-cross polytope + // 7*7*6 = 294, which is close to the ring size 17*17 = 289. + vec<4, T, Q> ip = vec<4, T, Q>(T(1) / T(294), T(1) / T(49), T(1) / T(7), T(0)); + + vec<4, T, Q> p0 = detail::grad4(j0, ip); + vec<4, T, Q> p1 = detail::grad4(j1.x, ip); + vec<4, T, Q> p2 = detail::grad4(j1.y, ip); + vec<4, T, Q> p3 = detail::grad4(j1.z, ip); + vec<4, T, Q> p4 = detail::grad4(j1.w, ip); + + // Normalise gradients + vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); + p0 *= norm.x; + p1 *= norm.y; + p2 *= norm.z; + p3 *= norm.w; + p4 *= detail::taylorInvSqrt(dot(p4, p4)); + + // Mix contributions from the five corners + vec<3, T, Q> m0 = max(T(0.6) - vec<3, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2)), vec<3, T, Q>(0)); + vec<2, T, Q> m1 = max(T(0.6) - vec<2, T, Q>(dot(x3, x3), dot(x4, x4) ), vec<2, T, Q>(0)); + m0 = m0 * m0; + m1 = m1 * m1; + return T(49) * + (dot(m0 * m0, vec<3, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2))) + + dot(m1 * m1, vec<2, T, Q>(dot(p3, x3), dot(p4, x4)))); + } +}//namespace glm diff --git a/includes/glm/gtc/packing.hpp b/includes/glm/gtc/packing.hpp new file mode 100644 index 0000000..8e416b3 --- /dev/null +++ b/includes/glm/gtc/packing.hpp @@ -0,0 +1,728 @@ +/// @ref gtc_packing +/// @file glm/gtc/packing.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_packing GLM_GTC_packing +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// This extension provides a set of function to convert vertors to packed +/// formats. + +#pragma once + +// Dependency: +#include "type_precision.hpp" +#include "../ext/vector_packing.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_packing extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_packing + /// @{ + + /// First, converts the normalized floating-point value v into a 8-bit integer value. + /// Then, the results are packed into the returned 8-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm1x8: round(clamp(c, 0, +1) * 255.0) + /// + /// @see gtc_packing + /// @see uint16 packUnorm2x8(vec2 const& v) + /// @see uint32 packUnorm4x8(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint8 packUnorm1x8(float v); + + /// Convert a single 8-bit integer to a normalized floating-point value. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x8(uint16 p) + /// @see vec4 unpackUnorm4x8(uint32 p) + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackUnorm1x8(uint8 p); + + /// First, converts each component of the normalized floating-point value v into 8-bit integer values. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm2x8: round(clamp(c, 0, +1) * 255.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint8 packUnorm1x8(float const& v) + /// @see uint32 packUnorm4x8(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packUnorm2x8(vec2 const& v); + + /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit unsigned integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackUnorm1x8(uint8 v) + /// @see vec4 unpackUnorm4x8(uint32 p) + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackUnorm2x8(uint16 p); + + /// First, converts the normalized floating-point value v into 8-bit integer value. + /// Then, the results are packed into the returned 8-bit unsigned integer. + /// + /// The conversion to fixed point is done as follows: + /// packSnorm1x8: round(clamp(s, -1, +1) * 127.0) + /// + /// @see gtc_packing + /// @see uint16 packSnorm2x8(vec2 const& v) + /// @see uint32 packSnorm4x8(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint8 packSnorm1x8(float s); + + /// First, unpacks a single 8-bit unsigned integer p into a single 8-bit signed integers. + /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm1x8: clamp(f / 127.0, -1, +1) + /// + /// @see gtc_packing + /// @see vec2 unpackSnorm2x8(uint16 p) + /// @see vec4 unpackSnorm4x8(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackSnorm1x8(uint8 p); + + /// First, converts each component of the normalized floating-point value v into 8-bit integer values. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x8: round(clamp(c, -1, +1) * 127.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint8 packSnorm1x8(float const& v) + /// @see uint32 packSnorm4x8(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packSnorm2x8(vec2 const& v); + + /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm2x8: clamp(f / 127.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackSnorm1x8(uint8 p) + /// @see vec4 unpackSnorm4x8(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackSnorm2x8(uint16 p); + + /// First, converts the normalized floating-point value v into a 16-bit integer value. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm1x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// @see gtc_packing + /// @see uint16 packSnorm1x16(float const& v) + /// @see uint64 packSnorm4x16(vec4 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packUnorm1x16(float v); + + /// First, unpacks a single 16-bit unsigned integer p into a of 16-bit unsigned integers. + /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm1x16: f / 65535.0 + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x16(uint32 p) + /// @see vec4 unpackUnorm4x16(uint64 p) + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackUnorm1x16(uint16 p); + + /// First, converts each component of the normalized floating-point value v into 16-bit integer values. + /// Then, the results are packed into the returned 64-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm4x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint16 packUnorm1x16(float const& v) + /// @see uint32 packUnorm2x16(vec2 const& v) + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packUnorm4x16(vec4 const& v); + + /// First, unpacks a single 64-bit unsigned integer p into four 16-bit unsigned integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnormx4x16: f / 65535.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackUnorm1x16(uint16 p) + /// @see vec2 unpackUnorm2x16(uint32 p) + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackUnorm4x16(uint64 p); + + /// First, converts the normalized floating-point value v into 16-bit integer value. + /// Then, the results are packed into the returned 16-bit unsigned integer. + /// + /// The conversion to fixed point is done as follows: + /// packSnorm1x8: round(clamp(s, -1, +1) * 32767.0) + /// + /// @see gtc_packing + /// @see uint32 packSnorm2x16(vec2 const& v) + /// @see uint64 packSnorm4x16(vec4 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packSnorm1x16(float v); + + /// First, unpacks a single 16-bit unsigned integer p into a single 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned scalar. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm1x16: clamp(f / 32767.0, -1, +1) + /// + /// @see gtc_packing + /// @see vec2 unpackSnorm2x16(uint32 p) + /// @see vec4 unpackSnorm4x16(uint64 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackSnorm1x16(uint16 p); + + /// First, converts each component of the normalized floating-point value v into 16-bit integer values. + /// Then, the results are packed into the returned 64-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x8: round(clamp(c, -1, +1) * 32767.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see gtc_packing + /// @see uint16 packSnorm1x16(float const& v) + /// @see uint32 packSnorm2x16(vec2 const& v) + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packSnorm4x16(vec4 const& v); + + /// First, unpacks a single 64-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm4x16: clamp(f / 32767.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see float unpackSnorm1x16(uint16 p) + /// @see vec2 unpackSnorm2x16(uint32 p) + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackSnorm4x16(uint64 p); + + /// Returns an unsigned integer obtained by converting the components of a floating-point scalar + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing this 16-bit value into a 16-bit unsigned integer. + /// + /// @see gtc_packing + /// @see uint32 packHalf2x16(vec2 const& v) + /// @see uint64 packHalf4x16(vec4 const& v) + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint16 packHalf1x16(float v); + + /// Returns a floating-point scalar with components obtained by unpacking a 16-bit unsigned integer into a 16-bit value, + /// interpreted as a 16-bit floating-point number according to the OpenGL Specification, + /// and converting it to 32-bit floating-point values. + /// + /// @see gtc_packing + /// @see vec2 unpackHalf2x16(uint32 const& v) + /// @see vec4 unpackHalf4x16(uint64 const& v) + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL float unpackHalf1x16(uint16 v); + + /// Returns an unsigned integer obtained by converting the components of a four-component floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing these four 16-bit values into a 64-bit unsigned integer. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see uint16 packHalf1x16(float const& v) + /// @see uint32 packHalf2x16(vec2 const& v) + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint64 packHalf4x16(vec4 const& v); + + /// Returns a four-component floating-point vector with components obtained by unpacking a 64-bit unsigned integer into four 16-bit values, + /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, + /// and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see float unpackHalf1x16(uint16 const& v) + /// @see vec2 unpackHalf2x16(uint32 const& v) + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackHalf4x16(uint64 p); + + /// Returns an unsigned integer obtained by converting the components of a four-component signed integer vector + /// to the 10-10-10-2-bit signed integer representation found in the OpenGL Specification, + /// and then packing these four values into a 32-bit unsigned integer. + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see uint32 packI3x10_1x2(uvec4 const& v) + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see ivec4 unpackI3x10_1x2(uint32 const& p) + GLM_FUNC_DECL uint32 packI3x10_1x2(ivec4 const& v); + + /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit signed integers. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); + /// @see uvec4 unpackI3x10_1x2(uint32 const& p); + GLM_FUNC_DECL ivec4 unpackI3x10_1x2(uint32 p); + + /// Returns an unsigned integer obtained by converting the components of a four-component unsigned integer vector + /// to the 10-10-10-2-bit unsigned integer representation found in the OpenGL Specification, + /// and then packing these four values into a 32-bit unsigned integer. + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see uint32 packI3x10_1x2(ivec4 const& v) + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see ivec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL uint32 packU3x10_1x2(uvec4 const& v); + + /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit unsigned integers. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); + /// @see uvec4 unpackI3x10_1x2(uint32 const& p); + GLM_FUNC_DECL uvec4 unpackU3x10_1x2(uint32 p); + + /// First, converts the first three components of the normalized floating-point value v into 10-bit signed integer values. + /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm3x10_1x2(xyz): round(clamp(c, -1, +1) * 511.0) + /// packSnorm3x10_1x2(w): round(clamp(c, -1, +1) * 1.0) + /// + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see uint32 packI3x10_1x2(ivec4 const& v) + GLM_FUNC_DECL uint32 packSnorm3x10_1x2(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm3x10_1x2(xyz): clamp(f / 511.0, -1, +1) + /// unpackSnorm3x10_1x2(w): clamp(f / 511.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p)) + /// @see uvec4 unpackI3x10_1x2(uint32 const& p) + /// @see uvec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL vec4 unpackSnorm3x10_1x2(uint32 p); + + /// First, converts the first three components of the normalized floating-point value v into 10-bit unsigned integer values. + /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed uninteger values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm3x10_1x2(xyz): round(clamp(c, 0, +1) * 1023.0) + /// packUnorm3x10_1x2(w): round(clamp(c, 0, +1) * 3.0) + /// + /// The first vector component specifies the 10 least-significant bits of the result; + /// the forth component specifies the 2 most-significant bits. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p) + /// @see uint32 packUnorm3x10_1x2(vec4 const& v) + /// @see uint32 packU3x10_1x2(uvec4 const& v) + /// @see uint32 packI3x10_1x2(ivec4 const& v) + GLM_FUNC_DECL uint32 packUnorm3x10_1x2(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm3x10_1x2(xyz): clamp(f / 1023.0, 0, +1) + /// unpackSnorm3x10_1x2(w): clamp(f / 3.0, 0, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packSnorm3x10_1x2(vec4 const& v) + /// @see vec4 unpackInorm3x10_1x2(uint32 const& p)) + /// @see uvec4 unpackI3x10_1x2(uint32 const& p) + /// @see uvec4 unpackU3x10_1x2(uint32 const& p) + GLM_FUNC_DECL vec4 unpackUnorm3x10_1x2(uint32 p); + + /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. + /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The first vector component specifies the 11 least-significant bits of the result; + /// the last component specifies the 10 most-significant bits. + /// + /// @see gtc_packing + /// @see vec3 unpackF2x11_1x10(uint32 const& p) + GLM_FUNC_DECL uint32 packF2x11_1x10(vec3 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . + /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see gtc_packing + /// @see uint32 packF2x11_1x10(vec3 const& v) + GLM_FUNC_DECL vec3 unpackF2x11_1x10(uint32 p); + + + /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. + /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The first vector component specifies the 11 least-significant bits of the result; + /// the last component specifies the 10 most-significant bits. + /// + /// packF3x9_E1x5 allows encoding into RGBE / RGB9E5 format + /// + /// @see gtc_packing + /// @see vec3 unpackF3x9_E1x5(uint32 const& p) + GLM_FUNC_DECL uint32 packF3x9_E1x5(vec3 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . + /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// unpackF3x9_E1x5 allows decoding RGBE / RGB9E5 data + /// + /// @see gtc_packing + /// @see uint32 packF3x9_E1x5(vec3 const& v) + GLM_FUNC_DECL vec3 unpackF3x9_E1x5(uint32 p); + + /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& p) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb); + + /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see vec<4, T, Q> packRGBM(vec<3, float, Q> const& v) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm); + + /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the forth component specifies the 16 most-significant bits. + /// + /// @see gtc_packing + /// @see vec unpackHalf(vec const& p) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec packHalf(vec const& v); + + /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the forth component is obtained from the 16 most-significant bits of v. + /// + /// @see gtc_packing + /// @see vec packHalf(vec const& v) + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + template + GLM_FUNC_DECL vec unpackHalf(vec const& p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec unpackUnorm(vec const& p); + template + GLM_FUNC_DECL vec packUnorm(vec const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see vec packUnorm(vec const& v) + template + GLM_FUNC_DECL vec unpackUnorm(vec const& v); + + /// Convert each component of the normalized floating-point vector into signed integer values. + /// + /// @see gtc_packing + /// @see vec unpackSnorm(vec const& p); + template + GLM_FUNC_DECL vec packSnorm(vec const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see vec packSnorm(vec const& v) + template + GLM_FUNC_DECL vec unpackSnorm(vec const& v); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec2 unpackUnorm2x4(uint8 p) + GLM_FUNC_DECL uint8 packUnorm2x4(vec2 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint8 packUnorm2x4(vec2 const& v) + GLM_FUNC_DECL vec2 unpackUnorm2x4(uint8 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm4x4(uint16 p) + GLM_FUNC_DECL uint16 packUnorm4x4(vec4 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm4x4(vec4 const& v) + GLM_FUNC_DECL vec4 unpackUnorm4x4(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec3 unpackUnorm1x5_1x6_1x5(uint16 p) + GLM_FUNC_DECL uint16 packUnorm1x5_1x6_1x5(vec3 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm1x5_1x6_1x5(vec3 const& v) + GLM_FUNC_DECL vec3 unpackUnorm1x5_1x6_1x5(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec4 unpackUnorm3x5_1x1(uint16 p) + GLM_FUNC_DECL uint16 packUnorm3x5_1x1(vec4 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint16 packUnorm3x5_1x1(vec4 const& v) + GLM_FUNC_DECL vec4 unpackUnorm3x5_1x1(uint16 p); + + /// Convert each component of the normalized floating-point vector into unsigned integer values. + /// + /// @see gtc_packing + /// @see vec3 unpackUnorm2x3_1x2(uint8 p) + GLM_FUNC_DECL uint8 packUnorm2x3_1x2(vec3 const& v); + + /// Convert a packed integer to a normalized floating-point vector. + /// + /// @see gtc_packing + /// @see uint8 packUnorm2x3_1x2(vec3 const& v) + GLM_FUNC_DECL vec3 unpackUnorm2x3_1x2(uint8 p); + + + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i8vec2 unpackInt2x8(int16 p) + GLM_FUNC_DECL int16 packInt2x8(i8vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int16 packInt2x8(i8vec2 const& v) + GLM_FUNC_DECL i8vec2 unpackInt2x8(int16 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u8vec2 unpackInt2x8(uint16 p) + GLM_FUNC_DECL uint16 packUint2x8(u8vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint16 packInt2x8(u8vec2 const& v) + GLM_FUNC_DECL u8vec2 unpackUint2x8(uint16 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i8vec4 unpackInt4x8(int32 p) + GLM_FUNC_DECL int32 packInt4x8(i8vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int32 packInt2x8(i8vec4 const& v) + GLM_FUNC_DECL i8vec4 unpackInt4x8(int32 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u8vec4 unpackUint4x8(uint32 p) + GLM_FUNC_DECL uint32 packUint4x8(u8vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint32 packUint4x8(u8vec2 const& v) + GLM_FUNC_DECL u8vec4 unpackUint4x8(uint32 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i16vec2 unpackInt2x16(int p) + GLM_FUNC_DECL int packInt2x16(i16vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packInt2x16(i16vec2 const& v) + GLM_FUNC_DECL i16vec2 unpackInt2x16(int p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i16vec4 unpackInt4x16(int64 p) + GLM_FUNC_DECL int64 packInt4x16(i16vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int64 packInt4x16(i16vec4 const& v) + GLM_FUNC_DECL i16vec4 unpackInt4x16(int64 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u16vec2 unpackUint2x16(uint p) + GLM_FUNC_DECL uint packUint2x16(u16vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint packUint2x16(u16vec2 const& v) + GLM_FUNC_DECL u16vec2 unpackUint2x16(uint p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u16vec4 unpackUint4x16(uint64 p) + GLM_FUNC_DECL uint64 packUint4x16(u16vec4 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see uint64 packUint4x16(u16vec4 const& v) + GLM_FUNC_DECL u16vec4 unpackUint4x16(uint64 p); + + /// Convert each component from an integer vector into a packed integer. + /// + /// @see gtc_packing + /// @see i32vec2 unpackInt2x32(int p) + GLM_FUNC_DECL int64 packInt2x32(i32vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packInt2x16(i32vec2 const& v) + GLM_FUNC_DECL i32vec2 unpackInt2x32(int64 p); + + /// Convert each component from an integer vector into a packed unsigned integer. + /// + /// @see gtc_packing + /// @see u32vec2 unpackUint2x32(int p) + GLM_FUNC_DECL uint64 packUint2x32(u32vec2 const& v); + + /// Convert a packed integer into an integer vector. + /// + /// @see gtc_packing + /// @see int packUint2x16(u32vec2 const& v) + GLM_FUNC_DECL u32vec2 unpackUint2x32(uint64 p); + + /// @} +}// namespace glm + +#include "packing.inl" diff --git a/includes/glm/gtc/packing.inl b/includes/glm/gtc/packing.inl new file mode 100644 index 0000000..30462bc --- /dev/null +++ b/includes/glm/gtc/packing.inl @@ -0,0 +1,952 @@ +/// @ref gtc_packing + +#include "../ext/scalar_relational.hpp" +#include "../ext/vector_relational.hpp" +#include "../common.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../detail/type_half.hpp" +#include "type_ptr.hpp" +#include +#include + +namespace glm{ +namespace detail +{ + GLM_FUNC_QUALIFIER glm::uint16 float2half(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((f >> 16) & 0x8000) | // sign + ((((f & 0x7f800000) - 0x38000000) >> 13) & 0x7c00) | // exponential + ((f >> 13) & 0x03ff); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 float2packed11(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x000007c0 => 00000000 00000000 00000111 11000000 + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((f & 0x7f800000) - 0x38000000) >> 17) & 0x07c0) | // exponential + ((f >> 17) & 0x003f); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 packed11ToFloat(glm::uint32 p) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x000007c0 => 00000000 00000000 00000111 11000000 + // 0x00007c00 => 00000000 00000000 01111100 00000000 + // 0x000003ff => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((p & 0x07c0) << 17) + 0x38000000) & 0x7f800000) | // exponential + ((p & 0x003f) << 17); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 float2packed10(glm::uint32 f) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x0000001F => 00000000 00000000 00000000 00011111 + // 0x0000003F => 00000000 00000000 00000000 00111111 + // 0x000003E0 => 00000000 00000000 00000011 11100000 + // 0x000007C0 => 00000000 00000000 00000111 11000000 + // 0x00007C00 => 00000000 00000000 01111100 00000000 + // 0x000003FF => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((f & 0x7f800000) - 0x38000000) >> 18) & 0x03E0) | // exponential + ((f >> 18) & 0x001f); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint32 packed10ToFloat(glm::uint32 p) + { + // 10 bits => EE EEEFFFFF + // 11 bits => EEE EEFFFFFF + // Half bits => SEEEEEFF FFFFFFFF + // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF + + // 0x0000001F => 00000000 00000000 00000000 00011111 + // 0x0000003F => 00000000 00000000 00000000 00111111 + // 0x000003E0 => 00000000 00000000 00000011 11100000 + // 0x000007C0 => 00000000 00000000 00000111 11000000 + // 0x00007C00 => 00000000 00000000 01111100 00000000 + // 0x000003FF => 00000000 00000000 00000011 11111111 + // 0x38000000 => 00111000 00000000 00000000 00000000 + // 0x7f800000 => 01111111 10000000 00000000 00000000 + // 0x00008000 => 00000000 00000000 10000000 00000000 + return + ((((p & 0x03E0) << 18) + 0x38000000) & 0x7f800000) | // exponential + ((p & 0x001f) << 18); // Mantissa + } + + GLM_FUNC_QUALIFIER glm::uint half2float(glm::uint h) + { + return ((h & 0x8000) << 16) | ((( h & 0x7c00) + 0x1C000) << 13) | ((h & 0x03FF) << 13); + } + + GLM_FUNC_QUALIFIER glm::uint floatTo11bit(float x) + { + if(x == 0.0f) + return 0u; + else if(glm::isnan(x)) + return ~0u; + else if(glm::isinf(x)) + return 0x1Fu << 6u; + + uint Pack = 0u; + memcpy(&Pack, &x, sizeof(Pack)); + return float2packed11(Pack); + } + + GLM_FUNC_QUALIFIER float packed11bitToFloat(glm::uint x) + { + if(x == 0) + return 0.0f; + else if(x == ((1 << 11) - 1)) + return ~0;//NaN + else if(x == (0x1f << 6)) + return ~0;//Inf + + uint Result = packed11ToFloat(x); + + float Temp = 0; + memcpy(&Temp, &Result, sizeof(Temp)); + return Temp; + } + + GLM_FUNC_QUALIFIER glm::uint floatTo10bit(float x) + { + if(x == 0.0f) + return 0u; + else if(glm::isnan(x)) + return ~0u; + else if(glm::isinf(x)) + return 0x1Fu << 5u; + + uint Pack = 0; + memcpy(&Pack, &x, sizeof(Pack)); + return float2packed10(Pack); + } + + GLM_FUNC_QUALIFIER float packed10bitToFloat(glm::uint x) + { + if(x == 0) + return 0.0f; + else if(x == ((1 << 10) - 1)) + return ~0;//NaN + else if(x == (0x1f << 5)) + return ~0;//Inf + + uint Result = packed10ToFloat(x); + + float Temp = 0; + memcpy(&Temp, &Result, sizeof(Temp)); + return Temp; + } + +// GLM_FUNC_QUALIFIER glm::uint f11_f11_f10(float x, float y, float z) +// { +// return ((floatTo11bit(x) & ((1 << 11) - 1)) << 0) | ((floatTo11bit(y) & ((1 << 11) - 1)) << 11) | ((floatTo10bit(z) & ((1 << 10) - 1)) << 22); +// } + +#if GLM_SILENT_WARNINGS == GLM_ENABLE +# if defined(__clang__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wpadded" +# endif +#endif + + union u3u3u2 + { + struct Data + { + uint x : 3; + uint y : 3; + uint z : 2; + } data; + uint8 pack; + }; + + union u4u4 + { + struct Data + { + uint x : 4; + uint y : 4; + } data; + uint8 pack; + }; + + union u4u4u4u4 + { + struct Data + { + uint x : 4; + uint y : 4; + uint z : 4; + uint w : 4; + } data; + uint16 pack; + }; + + union u5u6u5 + { + struct Data + { + uint x : 5; + uint y : 6; + uint z : 5; + } data; + uint16 pack; + }; + + union u5u5u5u1 + { + struct Data + { + uint x : 5; + uint y : 5; + uint z : 5; + uint w : 1; + } data; + uint16 pack; + }; + +#if GLM_SILENT_WARNINGS == GLM_ENABLE +# if defined(__clang__) +# pragma clang diagnostic pop +# endif +#endif + + union u10u10u10u2 + { + struct Data + { + uint x : 10; + uint y : 10; + uint z : 10; + uint w : 2; + } data; + uint32 pack; + }; + + union i10i10i10i2 + { + struct Data + { + int x : 10; + int y : 10; + int z : 10; + int w : 2; + } data; + uint32 pack; + }; + + union u9u9u9e5 + { + struct Data + { + uint x : 9; + uint y : 9; + uint z : 9; + uint w : 5; + } data; + uint32 pack; + }; + + template + struct compute_half + {}; + + template + struct compute_half<1, Q> + { + GLM_FUNC_QUALIFIER static vec<1, uint16, Q> pack(vec<1, float, Q> const& v) + { + int16 const Unpack(detail::toFloat16(v.x)); + u16vec1 Packed; + memcpy(value_ptr(Packed), &Unpack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<1, float, Q> unpack(vec<1, uint16, Q> const& v) + { + i16vec1 Unpack; + memcpy(value_ptr(Unpack), value_ptr(v), sizeof(Unpack)); + return vec<1, float, Q>(detail::toFloat32(v.x)); + } + }; + + template + struct compute_half<2, Q> + { + GLM_FUNC_QUALIFIER static vec<2, uint16, Q> pack(vec<2, float, Q> const& v) + { + vec<2, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y)); + u16vec2 Packed; + memcpy(value_ptr(Packed), value_ptr(Unpack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<2, float, Q> unpack(vec<2, uint16, Q> const& v) + { + i16vec2 Unpack; + memcpy(value_ptr(Unpack), value_ptr(v), sizeof(Unpack)); + return vec<2, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y)); + } + }; + + template + struct compute_half<3, Q> + { + GLM_FUNC_QUALIFIER static vec<3, uint16, Q> pack(vec<3, float, Q> const& v) + { + vec<3, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z)); + u16vec3 Packed; + memcpy(value_ptr(Packed), value_ptr(Unpack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<3, float, Q> unpack(vec<3, uint16, Q> const& v) + { + i16vec3 Unpack; + memcpy(value_ptr(Unpack), &v, sizeof(Unpack)); + return vec<3, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z)); + } + }; + + template + struct compute_half<4, Q> + { + GLM_FUNC_QUALIFIER static vec<4, uint16, Q> pack(vec<4, float, Q> const& v) + { + vec<4, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z), detail::toFloat16(v.w)); + u16vec4 Packed; + memcpy(value_ptr(Packed), value_ptr(Unpack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER static vec<4, float, Q> unpack(vec<4, uint16, Q> const& v) + { + i16vec4 Unpack; + memcpy(value_ptr(Unpack), &v, sizeof(Unpack)); + return vec<4, float, Q>(detail::toFloat32(Unpack.x), detail::toFloat32(Unpack.y), detail::toFloat32(Unpack.z), detail::toFloat32(Unpack.w)); + } + }; +}//namespace detail + + GLM_FUNC_QUALIFIER uint8 packUnorm1x8(float v) + { + return static_cast(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + } + + GLM_FUNC_QUALIFIER float unpackUnorm1x8(uint8 p) + { + float const Unpack(p); + return Unpack * static_cast(0.0039215686274509803921568627451); // 1 / 255 + } + + GLM_FUNC_QUALIFIER uint16 packUnorm2x8(vec2 const& v) + { + u8vec2 const Topack(round(clamp(v, 0.0f, 1.0f) * 255.0f)); + + uint16 Unpack = 0; + memcpy(&Unpack, &Topack, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x8(uint16 p) + { + u8vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return vec2(Unpack) * float(0.0039215686274509803921568627451); // 1 / 255 + } + + GLM_FUNC_QUALIFIER uint8 packSnorm1x8(float v) + { + int8 const Topack(static_cast(round(clamp(v ,-1.0f, 1.0f) * 127.0f))); + uint8 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackSnorm1x8(uint8 p) + { + int8 Unpack = 0; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + static_cast(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packSnorm2x8(vec2 const& v) + { + i8vec2 const Topack(round(clamp(v, -1.0f, 1.0f) * 127.0f)); + uint16 Packed = 0; + memcpy(&Packed, value_ptr(Topack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec2 unpackSnorm2x8(uint16 p) + { + i8vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return clamp( + vec2(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packUnorm1x16(float s) + { + return static_cast(round(clamp(s, 0.0f, 1.0f) * 65535.0f)); + } + + GLM_FUNC_QUALIFIER float unpackUnorm1x16(uint16 p) + { + float const Unpack(p); + return Unpack * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 + } + + GLM_FUNC_QUALIFIER uint64 packUnorm4x16(vec4 const& v) + { + u16vec4 const Topack(round(clamp(v , 0.0f, 1.0f) * 65535.0f)); + uint64 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x16(uint64 p) + { + u16vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return vec4(Unpack) * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 + } + + GLM_FUNC_QUALIFIER uint16 packSnorm1x16(float v) + { + int16 const Topack = static_cast(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); + uint16 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackSnorm1x16(uint16 p) + { + int16 Unpack = 0; + memcpy(&Unpack, &p, sizeof(Unpack)); + return clamp( + static_cast(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint64 packSnorm4x16(vec4 const& v) + { + i16vec4 const Topack(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); + uint64 Packed = 0; + memcpy(&Packed, value_ptr(Topack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER vec4 unpackSnorm4x16(uint64 p) + { + i16vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return clamp( + vec4(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, + -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint16 packHalf1x16(float v) + { + int16 const Topack(detail::toFloat16(v)); + uint16 Packed = 0; + memcpy(&Packed, &Topack, sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER float unpackHalf1x16(uint16 v) + { + int16 Unpack = 0; + memcpy(&Unpack, &v, sizeof(Unpack)); + return detail::toFloat32(Unpack); + } + + GLM_FUNC_QUALIFIER uint64 packHalf4x16(glm::vec4 const& v) + { + i16vec4 const Unpack( + detail::toFloat16(v.x), + detail::toFloat16(v.y), + detail::toFloat16(v.z), + detail::toFloat16(v.w)); + uint64 Packed = 0; + memcpy(&Packed, value_ptr(Unpack), sizeof(Packed)); + return Packed; + } + + GLM_FUNC_QUALIFIER glm::vec4 unpackHalf4x16(uint64 v) + { + i16vec4 Unpack; + memcpy(value_ptr(Unpack), &v, sizeof(Unpack)); + return vec4( + detail::toFloat32(Unpack.x), + detail::toFloat32(Unpack.y), + detail::toFloat32(Unpack.z), + detail::toFloat32(Unpack.w)); + } + + GLM_FUNC_QUALIFIER uint32 packI3x10_1x2(ivec4 const& v) + { + detail::i10i10i10i2 Result; + Result.data.x = v.x; + Result.data.y = v.y; + Result.data.z = v.z; + Result.data.w = v.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER ivec4 unpackI3x10_1x2(uint32 v) + { + detail::i10i10i10i2 Unpack; + Unpack.pack = v; + return ivec4( + Unpack.data.x, + Unpack.data.y, + Unpack.data.z, + Unpack.data.w); + } + + GLM_FUNC_QUALIFIER uint32 packU3x10_1x2(uvec4 const& v) + { + detail::u10u10u10u2 Result; + Result.data.x = v.x; + Result.data.y = v.y; + Result.data.z = v.z; + Result.data.w = v.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER uvec4 unpackU3x10_1x2(uint32 v) + { + detail::u10u10u10u2 Unpack; + Unpack.pack = v; + return uvec4( + Unpack.data.x, + Unpack.data.y, + Unpack.data.z, + Unpack.data.w); + } + + GLM_FUNC_QUALIFIER uint32 packSnorm3x10_1x2(vec4 const& v) + { + ivec4 const Pack(round(clamp(v,-1.0f, 1.0f) * vec4(511.f, 511.f, 511.f, 1.f))); + + detail::i10i10i10i2 Result; + Result.data.x = Pack.x; + Result.data.y = Pack.y; + Result.data.z = Pack.z; + Result.data.w = Pack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackSnorm3x10_1x2(uint32 v) + { + detail::i10i10i10i2 Unpack; + Unpack.pack = v; + + vec4 const Result(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w); + + return clamp(Result * vec4(1.f / 511.f, 1.f / 511.f, 1.f / 511.f, 1.f), -1.0f, 1.0f); + } + + GLM_FUNC_QUALIFIER uint32 packUnorm3x10_1x2(vec4 const& v) + { + uvec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(1023.f, 1023.f, 1023.f, 3.f))); + + detail::u10u10u10u2 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm3x10_1x2(uint32 v) + { + vec4 const ScaleFactors(1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 3.f); + + detail::u10u10u10u2 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactors; + } + + GLM_FUNC_QUALIFIER uint32 packF2x11_1x10(vec3 const& v) + { + return + ((detail::floatTo11bit(v.x) & ((1 << 11) - 1)) << 0) | + ((detail::floatTo11bit(v.y) & ((1 << 11) - 1)) << 11) | + ((detail::floatTo10bit(v.z) & ((1 << 10) - 1)) << 22); + } + + GLM_FUNC_QUALIFIER vec3 unpackF2x11_1x10(uint32 v) + { + return vec3( + detail::packed11bitToFloat(v >> 0), + detail::packed11bitToFloat(v >> 11), + detail::packed10bitToFloat(v >> 22)); + } + + GLM_FUNC_QUALIFIER uint32 packF3x9_E1x5(vec3 const& v) + { + float const SharedExpMax = (pow(2.0f, 9.0f - 1.0f) / pow(2.0f, 9.0f)) * pow(2.0f, 31.f - 15.f); + vec3 const Color = clamp(v, 0.0f, SharedExpMax); + float const MaxColor = max(Color.x, max(Color.y, Color.z)); + + float const ExpSharedP = max(-15.f - 1.f, floor(log2(MaxColor))) + 1.0f + 15.f; + float const MaxShared = floor(MaxColor / pow(2.0f, (ExpSharedP - 15.f - 9.f)) + 0.5f); + float const ExpShared = equal(MaxShared, pow(2.0f, 9.0f), epsilon()) ? ExpSharedP + 1.0f : ExpSharedP; + + uvec3 const ColorComp(floor(Color / pow(2.f, (ExpShared - 15.f - 9.f)) + 0.5f)); + + detail::u9u9u9e5 Unpack; + Unpack.data.x = ColorComp.x; + Unpack.data.y = ColorComp.y; + Unpack.data.z = ColorComp.z; + Unpack.data.w = uint(ExpShared); + return Unpack.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackF3x9_E1x5(uint32 v) + { + detail::u9u9u9e5 Unpack; + Unpack.pack = v; + + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * pow(2.0f, static_cast(Unpack.data.w) - 15.f - 9.f); + } + + // Based on Brian Karis http://graphicrants.blogspot.fr/2009/04/rgbm-color-encoding.html + template + GLM_FUNC_QUALIFIER vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb) + { + vec<3, T, Q> const Color(rgb * static_cast(1.0 / 6.0)); + T Alpha = clamp(max(max(Color.x, Color.y), max(Color.z, static_cast(1e-6))), static_cast(0), static_cast(1)); + Alpha = ceil(Alpha * static_cast(255.0)) / static_cast(255.0); + return vec<4, T, Q>(Color / Alpha, Alpha); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm) + { + return vec<3, T, Q>(rgbm.x, rgbm.y, rgbm.z) * rgbm.w * static_cast(6); + } + + template + GLM_FUNC_QUALIFIER vec packHalf(vec const& v) + { + return detail::compute_half::pack(v); + } + + template + GLM_FUNC_QUALIFIER vec unpackHalf(vec const& v) + { + return detail::compute_half::unpack(v); + } + + template + GLM_FUNC_QUALIFIER vec packUnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(round(clamp(v, static_cast(0), static_cast(1)) * static_cast(std::numeric_limits::max()))); + } + + template + GLM_FUNC_QUALIFIER vec unpackUnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())); + } + + template + GLM_FUNC_QUALIFIER vec packSnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return vec(round(clamp(v , static_cast(-1), static_cast(1)) * static_cast(std::numeric_limits::max()))); + } + + template + GLM_FUNC_QUALIFIER vec unpackSnorm(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); + + return clamp(vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())), static_cast(-1), static_cast(1)); + } + + GLM_FUNC_QUALIFIER uint8 packUnorm2x4(vec2 const& v) + { + u32vec2 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); + detail::u4u4 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec2 unpackUnorm2x4(uint8 v) + { + float const ScaleFactor(1.f / 15.f); + detail::u4u4 Unpack; + Unpack.pack = v; + return vec2(Unpack.data.x, Unpack.data.y) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm4x4(vec4 const& v) + { + u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); + detail::u4u4u4u4 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm4x4(uint16 v) + { + float const ScaleFactor(1.f / 15.f); + detail::u4u4u4u4 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm1x5_1x6_1x5(vec3 const& v) + { + u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(31.f, 63.f, 31.f))); + detail::u5u6u5 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackUnorm1x5_1x6_1x5(uint16 v) + { + vec3 const ScaleFactor(1.f / 31.f, 1.f / 63.f, 1.f / 31.f); + detail::u5u6u5 Unpack; + Unpack.pack = v; + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint16 packUnorm3x5_1x1(vec4 const& v) + { + u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(31.f, 31.f, 31.f, 1.f))); + detail::u5u5u5u1 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + Result.data.w = Unpack.w; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec4 unpackUnorm3x5_1x1(uint16 v) + { + vec4 const ScaleFactor(1.f / 31.f, 1.f / 31.f, 1.f / 31.f, 1.f); + detail::u5u5u5u1 Unpack; + Unpack.pack = v; + return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER uint8 packUnorm2x3_1x2(vec3 const& v) + { + u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(7.f, 7.f, 3.f))); + detail::u3u3u2 Result; + Result.data.x = Unpack.x; + Result.data.y = Unpack.y; + Result.data.z = Unpack.z; + return Result.pack; + } + + GLM_FUNC_QUALIFIER vec3 unpackUnorm2x3_1x2(uint8 v) + { + vec3 const ScaleFactor(1.f / 7.f, 1.f / 7.f, 1.f / 3.f); + detail::u3u3u2 Unpack; + Unpack.pack = v; + return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; + } + + GLM_FUNC_QUALIFIER int16 packInt2x8(i8vec2 const& v) + { + int16 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i8vec2 unpackInt2x8(int16 p) + { + i8vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint16 packUint2x8(u8vec2 const& v) + { + uint16 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u8vec2 unpackUint2x8(uint16 p) + { + u8vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int32 packInt4x8(i8vec4 const& v) + { + int32 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i8vec4 unpackInt4x8(int32 p) + { + i8vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint32 packUint4x8(u8vec4 const& v) + { + uint32 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u8vec4 unpackUint4x8(uint32 p) + { + u8vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int packInt2x16(i16vec2 const& v) + { + int Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i16vec2 unpackInt2x16(int p) + { + i16vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int64 packInt4x16(i16vec4 const& v) + { + int64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i16vec4 unpackInt4x16(int64 p) + { + i16vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint packUint2x16(u16vec2 const& v) + { + uint Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u16vec2 unpackUint2x16(uint p) + { + u16vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint64 packUint4x16(u16vec4 const& v) + { + uint64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u16vec4 unpackUint4x16(uint64 p) + { + u16vec4 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER int64 packInt2x32(i32vec2 const& v) + { + int64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER i32vec2 unpackInt2x32(int64 p) + { + i32vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } + + GLM_FUNC_QUALIFIER uint64 packUint2x32(u32vec2 const& v) + { + uint64 Pack = 0; + memcpy(&Pack, &v, sizeof(Pack)); + return Pack; + } + + GLM_FUNC_QUALIFIER u32vec2 unpackUint2x32(uint64 p) + { + u32vec2 Unpack; + memcpy(value_ptr(Unpack), &p, sizeof(Unpack)); + return Unpack; + } +}//namespace glm + diff --git a/includes/glm/gtc/quaternion.hpp b/includes/glm/gtc/quaternion.hpp new file mode 100644 index 0000000..314449e --- /dev/null +++ b/includes/glm/gtc/quaternion.hpp @@ -0,0 +1,173 @@ +/// @ref gtc_quaternion +/// @file glm/gtc/quaternion.hpp +/// +/// @see core (dependence) +/// @see gtc_constants (dependence) +/// +/// @defgroup gtc_quaternion GLM_GTC_quaternion +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines a templated quaternion type and several quaternion operations. + +#pragma once + +// Dependency: +#include "../gtc/constants.hpp" +#include "../gtc/matrix_transform.hpp" +#include "../ext/vector_relational.hpp" +#include "../ext/quaternion_common.hpp" +#include "../ext/quaternion_float.hpp" +#include "../ext/quaternion_float_precision.hpp" +#include "../ext/quaternion_double.hpp" +#include "../ext/quaternion_double_precision.hpp" +#include "../ext/quaternion_relational.hpp" +#include "../ext/quaternion_geometric.hpp" +#include "../ext/quaternion_trigonometric.hpp" +#include "../ext/quaternion_transform.hpp" +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat4x4.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_quaternion extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_quaternion + /// @{ + + /// Returns euler angles, pitch as x, yaw as y, roll as z. + /// The result is expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL vec<3, T, Q> eulerAngles(qua const& x); + + /// Returns roll value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T roll(qua const& x); + + /// Returns pitch value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T pitch(qua const& x); + + /// Returns yaw value of euler angles expressed in radians. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL T yaw(qua const& x); + + /// Converts a quaternion to a 3 * 3 matrix. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL mat<3, 3, T, Q> mat3_cast(qua const& x); + + /// Converts a quaternion to a 4 * 4 matrix. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL mat<4, 4, T, Q> mat4_cast(qua const& x); + + /// Converts a pure rotation 3 * 3 matrix to a quaternion. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL qua quat_cast(mat<3, 3, T, Q> const& x); + + /// Converts a pure rotation 4 * 4 matrix to a quaternion. + /// + /// @tparam T Floating-point scalar types. + /// + /// @see gtc_quaternion + template + GLM_FUNC_DECL qua quat_cast(mat<4, 4, T, Q> const& x); + + /// Returns the component-wise comparison result of x < y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> lessThan(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x <= y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x > y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> greaterThan(qua const& x, qua const& y); + + /// Returns the component-wise comparison of result x >= y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_quaternion_relational + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y); + + /// Build a look at quaternion based on the default handedness. + /// + /// @param direction Desired forward direction. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAt( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + + /// Build a right-handed look at quaternion. + /// + /// @param direction Desired forward direction onto which the -z-axis gets mapped. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAtRH( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + + /// Build a left-handed look at quaternion. + /// + /// @param direction Desired forward direction onto which the +z-axis gets mapped. Needs to be normalized. + /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). + template + GLM_FUNC_DECL qua quatLookAtLH( + vec<3, T, Q> const& direction, + vec<3, T, Q> const& up); + /// @} +} //namespace glm + +#include "quaternion.inl" diff --git a/includes/glm/gtc/quaternion.inl b/includes/glm/gtc/quaternion.inl new file mode 100644 index 0000000..ea159f2 --- /dev/null +++ b/includes/glm/gtc/quaternion.inl @@ -0,0 +1,208 @@ +#include "../trigonometric.hpp" +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "epsilon.hpp" +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> eulerAngles(qua const& x) + { + return vec<3, T, Q>(pitch(x), yaw(x), roll(x)); + } + + template + GLM_FUNC_QUALIFIER T roll(qua const& q) + { + T const y = static_cast(2) * (q.x * q.y + q.w * q.z); + T const x = q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z; + + if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis + return static_cast(0); + + return static_cast(atan(y, x)); + } + + template + GLM_FUNC_QUALIFIER T pitch(qua const& q) + { + //return T(atan(T(2) * (q.y * q.z + q.w * q.x), q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z)); + T const y = static_cast(2) * (q.y * q.z + q.w * q.x); + T const x = q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z; + + if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis + return static_cast(static_cast(2) * atan(q.x, q.w)); + + return static_cast(atan(y, x)); + } + + template + GLM_FUNC_QUALIFIER T yaw(qua const& q) + { + return asin(clamp(static_cast(-2) * (q.x * q.z - q.w * q.y), static_cast(-1), static_cast(1))); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat3_cast(qua const& q) + { + mat<3, 3, T, Q> Result(T(1)); + T qxx(q.x * q.x); + T qyy(q.y * q.y); + T qzz(q.z * q.z); + T qxz(q.x * q.z); + T qxy(q.x * q.y); + T qyz(q.y * q.z); + T qwx(q.w * q.x); + T qwy(q.w * q.y); + T qwz(q.w * q.z); + + Result[0][0] = T(1) - T(2) * (qyy + qzz); + Result[0][1] = T(2) * (qxy + qwz); + Result[0][2] = T(2) * (qxz - qwy); + + Result[1][0] = T(2) * (qxy - qwz); + Result[1][1] = T(1) - T(2) * (qxx + qzz); + Result[1][2] = T(2) * (qyz + qwx); + + Result[2][0] = T(2) * (qxz + qwy); + Result[2][1] = T(2) * (qyz - qwx); + Result[2][2] = T(1) - T(2) * (qxx + qyy); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat4_cast(qua const& q) + { + return mat<4, 4, T, Q>(mat3_cast(q)); + } + + template + GLM_FUNC_QUALIFIER qua quat_cast(mat<3, 3, T, Q> const& m) + { + T fourXSquaredMinus1 = m[0][0] - m[1][1] - m[2][2]; + T fourYSquaredMinus1 = m[1][1] - m[0][0] - m[2][2]; + T fourZSquaredMinus1 = m[2][2] - m[0][0] - m[1][1]; + T fourWSquaredMinus1 = m[0][0] + m[1][1] + m[2][2]; + + int biggestIndex = 0; + T fourBiggestSquaredMinus1 = fourWSquaredMinus1; + if(fourXSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourXSquaredMinus1; + biggestIndex = 1; + } + if(fourYSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourYSquaredMinus1; + biggestIndex = 2; + } + if(fourZSquaredMinus1 > fourBiggestSquaredMinus1) + { + fourBiggestSquaredMinus1 = fourZSquaredMinus1; + biggestIndex = 3; + } + + T biggestVal = sqrt(fourBiggestSquaredMinus1 + static_cast(1)) * static_cast(0.5); + T mult = static_cast(0.25) / biggestVal; + + switch(biggestIndex) + { + case 0: + return qua::wxyz(biggestVal, (m[1][2] - m[2][1]) * mult, (m[2][0] - m[0][2]) * mult, (m[0][1] - m[1][0]) * mult); + case 1: + return qua::wxyz((m[1][2] - m[2][1]) * mult, biggestVal, (m[0][1] + m[1][0]) * mult, (m[2][0] + m[0][2]) * mult); + case 2: + return qua::wxyz((m[2][0] - m[0][2]) * mult, (m[0][1] + m[1][0]) * mult, biggestVal, (m[1][2] + m[2][1]) * mult); + case 3: + return qua::wxyz((m[0][1] - m[1][0]) * mult, (m[2][0] + m[0][2]) * mult, (m[1][2] + m[2][1]) * mult, biggestVal); + default: // Silence a -Wswitch-default warning in GCC. Should never actually get here. Assert is just for sanity. + assert(false); + return qua::wxyz(1, 0, 0, 0); + } + } + + template + GLM_FUNC_QUALIFIER qua quat_cast(mat<4, 4, T, Q> const& m4) + { + return quat_cast(mat<3, 3, T, Q>(m4)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> lessThan(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] < y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] <= y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> greaterThan(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] > y[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y) + { + vec<4, bool, Q> Result(false, false, false, false); + for(length_t i = 0; i < x.length(); ++i) + Result[i] = x[i] >= y[i]; + return Result; + } + + + template + GLM_FUNC_QUALIFIER qua quatLookAt(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { +# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT + return quatLookAtLH(direction, up); +# else + return quatLookAtRH(direction, up); +# endif + } + + template + GLM_FUNC_QUALIFIER qua quatLookAtRH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { + mat<3, 3, T, Q> Result; + + Result[2] = -direction; + vec<3, T, Q> const& Right = cross(up, Result[2]); + Result[0] = Right * inversesqrt(max(static_cast(0.00001), dot(Right, Right))); + Result[1] = cross(Result[2], Result[0]); + + return quat_cast(Result); + } + + template + GLM_FUNC_QUALIFIER qua quatLookAtLH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) + { + mat<3, 3, T, Q> Result; + + Result[2] = direction; + vec<3, T, Q> const& Right = cross(up, Result[2]); + Result[0] = Right * inversesqrt(max(static_cast(0.00001), dot(Right, Right))); + Result[1] = cross(Result[2], Result[0]); + + return quat_cast(Result); + } +}//namespace glm + +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "quaternion_simd.inl" +#endif + diff --git a/includes/glm/gtc/quaternion_simd.inl b/includes/glm/gtc/quaternion_simd.inl new file mode 100644 index 0000000..e69de29 diff --git a/includes/glm/gtc/random.hpp b/includes/glm/gtc/random.hpp new file mode 100644 index 0000000..c6485bf --- /dev/null +++ b/includes/glm/gtc/random.hpp @@ -0,0 +1,82 @@ +/// @ref gtc_random +/// @file glm/gtc/random.hpp +/// +/// @see core (dependence) +/// @see gtx_random (extended) +/// +/// @defgroup gtc_random GLM_GTC_random +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Generate random number from various distribution methods. + +#pragma once + +// Dependency: +#include "../ext/scalar_int_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/qualifier.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_random extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_random + /// @{ + + /// Generate random numbers in the interval [Min, Max], according a linear distribution + /// + /// @param Min Minimum value included in the sampling + /// @param Max Maximum value included in the sampling + /// @tparam genType Value type. Currently supported: float or double scalars. + /// @see gtc_random + template + GLM_FUNC_DECL genType linearRand(genType Min, genType Max); + + /// Generate random numbers in the interval [Min, Max], according a linear distribution + /// + /// @param Min Minimum value included in the sampling + /// @param Max Maximum value included in the sampling + /// @tparam T Value type. Currently supported: float or double. + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec linearRand(vec const& Min, vec const& Max); + + /// Generate random numbers in the interval [Min, Max], according a gaussian distribution + /// + /// @see gtc_random + template + GLM_FUNC_DECL genType gaussRand(genType Mean, genType Deviation); + + /// Generate a random 2D vector which coordinates are regularly distributed on a circle of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<2, T, defaultp> circularRand(T Radius); + + /// Generate a random 3D vector which coordinates are regularly distributed on a sphere of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<3, T, defaultp> sphericalRand(T Radius); + + /// Generate a random 2D vector which coordinates are regularly distributed within the area of a disk of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<2, T, defaultp> diskRand(T Radius); + + /// Generate a random 3D vector which coordinates are regularly distributed within the volume of a ball of a given radius + /// + /// @see gtc_random + template + GLM_FUNC_DECL vec<3, T, defaultp> ballRand(T Radius); + + /// @} +}//namespace glm + +#include "random.inl" diff --git a/includes/glm/gtc/random.inl b/includes/glm/gtc/random.inl new file mode 100644 index 0000000..5724368 --- /dev/null +++ b/includes/glm/gtc/random.inl @@ -0,0 +1,303 @@ +#include "../geometric.hpp" +#include "../exponential.hpp" +#include "../trigonometric.hpp" +#include "../detail/type_vec1.hpp" +#include +#include +#include +#include + +namespace glm{ +namespace detail +{ + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call(); + }; + + template + struct compute_rand<1, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<1, uint8, P> call() + { + return vec<1, uint8, P>( + static_cast(std::rand() % std::numeric_limits::max())); + } + }; + + template + struct compute_rand<2, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<2, uint8, P> call() + { + return vec<2, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand<3, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<3, uint8, P> call() + { + return vec<3, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand<4, uint8, P> + { + GLM_FUNC_QUALIFIER static vec<4, uint8, P> call() + { + return vec<4, uint8, P>( + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max(), + std::rand() % std::numeric_limits::max()); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(8)) | + (vec(compute_rand::call())); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(16)) | + (vec(compute_rand::call())); + } + }; + + template + struct compute_rand + { + GLM_FUNC_QUALIFIER static vec call() + { + return + (vec(compute_rand::call()) << static_cast(32)) | + (vec(compute_rand::call())); + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max); + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; + + template + struct compute_linearRand + { + GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) + { + return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER genType linearRand(genType Min, genType Max) + { + return detail::compute_linearRand<1, genType, highp>::call( + vec<1, genType, highp>(Min), + vec<1, genType, highp>(Max)).x; + } + + template + GLM_FUNC_QUALIFIER vec linearRand(vec const& Min, vec const& Max) + { + return detail::compute_linearRand::call(Min, Max); + } + + template + GLM_FUNC_QUALIFIER genType gaussRand(genType Mean, genType Deviation) + { + genType w, x1, x2; + + do + { + x1 = linearRand(genType(-1), genType(1)); + x2 = linearRand(genType(-1), genType(1)); + + w = x1 * x1 + x2 * x2; + } while(w > genType(1)); + + return static_cast(x2 * Deviation * Deviation * sqrt((genType(-2) * log(w)) / w) + Mean); + } + + template + GLM_FUNC_QUALIFIER vec gaussRand(vec const& Mean, vec const& Deviation) + { + return detail::functor2::call(gaussRand, Mean, Deviation); + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> diskRand(T Radius) + { + assert(Radius > static_cast(0)); + + vec<2, T, defaultp> Result(T(0)); + T LenRadius(T(0)); + + do + { + Result = linearRand( + vec<2, T, defaultp>(-Radius), + vec<2, T, defaultp>(Radius)); + LenRadius = length(Result); + } + while(LenRadius > Radius); + + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> ballRand(T Radius) + { + assert(Radius > static_cast(0)); + + vec<3, T, defaultp> Result(T(0)); + T LenRadius(T(0)); + + do + { + Result = linearRand( + vec<3, T, defaultp>(-Radius), + vec<3, T, defaultp>(Radius)); + LenRadius = length(Result); + } + while(LenRadius > Radius); + + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> circularRand(T Radius) + { + assert(Radius > static_cast(0)); + + T a = linearRand(T(0), static_cast(6.283185307179586476925286766559)); + return vec<2, T, defaultp>(glm::cos(a), glm::sin(a)) * Radius; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> sphericalRand(T Radius) + { + assert(Radius > static_cast(0)); + + T theta = linearRand(T(0), T(6.283185307179586476925286766559f)); + T phi = std::acos(linearRand(T(-1.0f), T(1.0f))); + + T x = std::sin(phi) * std::cos(theta); + T y = std::sin(phi) * std::sin(theta); + T z = std::cos(phi); + + return vec<3, T, defaultp>(x, y, z) * Radius; + } +}//namespace glm diff --git a/includes/glm/gtc/reciprocal.hpp b/includes/glm/gtc/reciprocal.hpp new file mode 100644 index 0000000..4d0fc91 --- /dev/null +++ b/includes/glm/gtc/reciprocal.hpp @@ -0,0 +1,24 @@ +/// @ref gtc_reciprocal +/// @file glm/gtc/reciprocal.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_reciprocal GLM_GTC_reciprocal +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Define secant, cosecant and cotangent functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_reciprocal extension included") +#endif + +#include "../ext/scalar_reciprocal.hpp" +#include "../ext/vector_reciprocal.hpp" + diff --git a/includes/glm/gtc/round.hpp b/includes/glm/gtc/round.hpp new file mode 100644 index 0000000..56edbbc --- /dev/null +++ b/includes/glm/gtc/round.hpp @@ -0,0 +1,160 @@ +/// @ref gtc_round +/// @file glm/gtc/round.hpp +/// +/// @see core (dependence) +/// @see gtc_round (dependence) +/// +/// @defgroup gtc_round GLM_GTC_round +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Rounding value to specific boundings + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../vector_relational.hpp" +#include "../common.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_round extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_round + /// @{ + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType ceilPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just higher the input value, + /// round up to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec ceilPowerOfTwo(vec const& v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType floorPowerOfTwo(genIUType v); + + /// Return the power of two number which value is just lower the input value, + /// round down to a power of two. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec floorPowerOfTwo(vec const& v); + + /// Return the power of two number which value is the closet to the input value. + /// + /// @see gtc_round + template + GLM_FUNC_DECL genIUType roundPowerOfTwo(genIUType v); + + /// Return the power of two number which value is the closet to the input value. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec roundPowerOfTwo(vec const& v); + + /// Higher multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType ceilMultiple(genType v, genType Multiple); + + /// Higher multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec ceilMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType floorMultiple(genType v, genType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec floorMultiple(vec const& v, vec const& Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam genType Floating-point or integer scalar or vector types. + /// + /// @param v Source value to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL genType roundMultiple(genType v, genType Multiple); + + /// Lower multiple number of Source. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @param v Source values to which is applied the function + /// @param Multiple Must be a null or positive value + /// + /// @see gtc_round + template + GLM_FUNC_DECL vec roundMultiple(vec const& v, vec const& Multiple); + + /// @} +} //namespace glm + +#include "round.inl" diff --git a/includes/glm/gtc/round.inl b/includes/glm/gtc/round.inl new file mode 100644 index 0000000..48411e4 --- /dev/null +++ b/includes/glm/gtc/round.inl @@ -0,0 +1,155 @@ +/// @ref gtc_round + +#include "../integer.hpp" +#include "../ext/vector_integer.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_roundMultiple {}; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - std::fmod(Source, Multiple); + else + { + genType Tmp = Source + genType(1); + return Tmp - std::fmod(Tmp, Multiple) - Multiple; + } + } + }; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; + + template<> + struct compute_roundMultiple + { + template + GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) + { + if (Source >= genType(0)) + return Source - Source % Multiple; + else + { + genType Tmp = Source + genType(1); + return Tmp - Tmp % Multiple - Multiple; + } + } + }; +}//namespace detail + + ////////////////// + // ceilPowerOfTwo + + template + GLM_FUNC_QUALIFIER genType ceilPowerOfTwo(genType value) + { + return detail::compute_ceilPowerOfTwo<1, genType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genType, defaultp>(value)).x; + } + + template + GLM_FUNC_QUALIFIER vec ceilPowerOfTwo(vec const& v) + { + return detail::compute_ceilPowerOfTwo::is_signed>::call(v); + } + + /////////////////// + // floorPowerOfTwo + + template + GLM_FUNC_QUALIFIER genType floorPowerOfTwo(genType value) + { + return isPowerOfTwo(value) ? value : static_cast(1) << findMSB(value); + } + + template + GLM_FUNC_QUALIFIER vec floorPowerOfTwo(vec const& v) + { + return detail::functor1::call(floorPowerOfTwo, v); + } + + /////////////////// + // roundPowerOfTwo + + template + GLM_FUNC_QUALIFIER genIUType roundPowerOfTwo(genIUType value) + { + if(isPowerOfTwo(value)) + return value; + + genIUType const prev = static_cast(1) << findMSB(value); + genIUType const next = prev << static_cast(1); + return (next - value) < (value - prev) ? next : prev; + } + + template + GLM_FUNC_QUALIFIER vec roundPowerOfTwo(vec const& v) + { + return detail::functor1::call(roundPowerOfTwo, v); + } + + ////////////////////// + // ceilMultiple + + template + GLM_FUNC_QUALIFIER genType ceilMultiple(genType Source, genType Multiple) + { + return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec ceilMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(ceilMultiple, Source, Multiple); + } + + ////////////////////// + // floorMultiple + + template + GLM_FUNC_QUALIFIER genType floorMultiple(genType Source, genType Multiple) + { + return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec floorMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(floorMultiple, Source, Multiple); + } + + ////////////////////// + // roundMultiple + + template + GLM_FUNC_QUALIFIER genType roundMultiple(genType Source, genType Multiple) + { + return detail::compute_roundMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); + } + + template + GLM_FUNC_QUALIFIER vec roundMultiple(vec const& Source, vec const& Multiple) + { + return detail::functor2::call(roundMultiple, Source, Multiple); + } +}//namespace glm diff --git a/includes/glm/gtc/type_aligned.hpp b/includes/glm/gtc/type_aligned.hpp new file mode 100644 index 0000000..5403abf --- /dev/null +++ b/includes/glm/gtc/type_aligned.hpp @@ -0,0 +1,1315 @@ +/// @ref gtc_type_aligned +/// @file glm/gtc/type_aligned.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_type_aligned GLM_GTC_type_aligned +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Aligned types allowing SIMD optimizations of vectors and matrices types + +#pragma once + +#if (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) +# error "GLM: Aligned gentypes require to enable C++ language extensions. Define GLM_FORCE_ALIGNED_GENTYPES before including GLM headers to use aligned types." +#endif + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_aligned extension included") +#endif + +#include "../mat4x4.hpp" +#include "../mat4x3.hpp" +#include "../mat4x2.hpp" +#include "../mat3x4.hpp" +#include "../mat3x3.hpp" +#include "../mat3x2.hpp" +#include "../mat2x4.hpp" +#include "../mat2x3.hpp" +#include "../mat2x2.hpp" +#include "../gtc/vec1.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" + +namespace glm +{ + /// @addtogroup gtc_type_aligned + /// @{ + + // -- *vec1 -- + + /// 1 component vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_highp> aligned_highp_vec1; + + /// 1 component vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_mediump> aligned_mediump_vec1; + + /// 1 component vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, aligned_lowp> aligned_lowp_vec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_highp> aligned_highp_dvec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_mediump> aligned_mediump_dvec1; + + /// 1 component vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, aligned_lowp> aligned_lowp_dvec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_highp> aligned_highp_ivec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_mediump> aligned_mediump_ivec1; + + /// 1 component vector aligned in memory of signed integer numbers. + typedef vec<1, int, aligned_lowp> aligned_lowp_ivec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_highp> aligned_highp_uvec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_mediump> aligned_mediump_uvec1; + + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef vec<1, uint, aligned_lowp> aligned_lowp_uvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_highp> aligned_highp_bvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_mediump> aligned_mediump_bvec1; + + /// 1 component vector aligned in memory of bool values. + typedef vec<1, bool, aligned_lowp> aligned_lowp_bvec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, float, packed_highp> packed_highp_vec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, float, packed_mediump> packed_mediump_vec1; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, float, packed_lowp> packed_lowp_vec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<1, double, packed_highp> packed_highp_dvec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<1, double, packed_mediump> packed_mediump_dvec1; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<1, double, packed_lowp> packed_lowp_dvec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_highp> packed_highp_ivec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_mediump> packed_mediump_ivec1; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef vec<1, int, packed_lowp> packed_lowp_ivec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_highp> packed_highp_uvec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_mediump> packed_mediump_uvec1; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef vec<1, uint, packed_lowp> packed_lowp_uvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_highp> packed_highp_bvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_mediump> packed_mediump_bvec1; + + /// 1 component vector tightly packed in memory of bool values. + typedef vec<1, bool, packed_lowp> packed_lowp_bvec1; + + // -- *vec2 -- + + /// 2 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_highp> aligned_highp_vec2; + + /// 2 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_mediump> aligned_mediump_vec2; + + /// 2 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, float, aligned_lowp> aligned_lowp_vec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_highp> aligned_highp_dvec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_mediump> aligned_mediump_dvec2; + + /// 2 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, double, aligned_lowp> aligned_lowp_dvec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_highp> aligned_highp_ivec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_mediump> aligned_mediump_ivec2; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef vec<2, int, aligned_lowp> aligned_lowp_ivec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_highp> aligned_highp_uvec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_mediump> aligned_mediump_uvec2; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef vec<2, uint, aligned_lowp> aligned_lowp_uvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_highp> aligned_highp_bvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_mediump> aligned_mediump_bvec2; + + /// 2 components vector aligned in memory of bool values. + typedef vec<2, bool, aligned_lowp> aligned_lowp_bvec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, float, packed_highp> packed_highp_vec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, float, packed_mediump> packed_mediump_vec2; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, float, packed_lowp> packed_lowp_vec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<2, double, packed_highp> packed_highp_dvec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<2, double, packed_mediump> packed_mediump_dvec2; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<2, double, packed_lowp> packed_lowp_dvec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_highp> packed_highp_ivec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_mediump> packed_mediump_ivec2; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef vec<2, int, packed_lowp> packed_lowp_ivec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_highp> packed_highp_uvec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_mediump> packed_mediump_uvec2; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<2, uint, packed_lowp> packed_lowp_uvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_highp> packed_highp_bvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_mediump> packed_mediump_bvec2; + + /// 2 components vector tightly packed in memory of bool values. + typedef vec<2, bool, packed_lowp> packed_lowp_bvec2; + + // -- *vec3 -- + + /// 3 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_highp> aligned_highp_vec3; + + /// 3 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_mediump> aligned_mediump_vec3; + + /// 3 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, float, aligned_lowp> aligned_lowp_vec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_highp> aligned_highp_dvec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_mediump> aligned_mediump_dvec3; + + /// 3 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, double, aligned_lowp> aligned_lowp_dvec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_highp> aligned_highp_ivec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_mediump> aligned_mediump_ivec3; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef vec<3, int, aligned_lowp> aligned_lowp_ivec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_highp> aligned_highp_uvec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_mediump> aligned_mediump_uvec3; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef vec<3, uint, aligned_lowp> aligned_lowp_uvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_highp> aligned_highp_bvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_mediump> aligned_mediump_bvec3; + + /// 3 components vector aligned in memory of bool values. + typedef vec<3, bool, aligned_lowp> aligned_lowp_bvec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, float, packed_highp> packed_highp_vec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, float, packed_mediump> packed_mediump_vec3; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, float, packed_lowp> packed_lowp_vec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<3, double, packed_highp> packed_highp_dvec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<3, double, packed_mediump> packed_mediump_dvec3; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<3, double, packed_lowp> packed_lowp_dvec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_highp> packed_highp_ivec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_mediump> packed_mediump_ivec3; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef vec<3, int, packed_lowp> packed_lowp_ivec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_highp> packed_highp_uvec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_mediump> packed_mediump_uvec3; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<3, uint, packed_lowp> packed_lowp_uvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_highp> packed_highp_bvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_mediump> packed_mediump_bvec3; + + /// 3 components vector tightly packed in memory of bool values. + typedef vec<3, bool, packed_lowp> packed_lowp_bvec3; + + // -- *vec4 -- + + /// 4 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_highp> aligned_highp_vec4; + + /// 4 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_mediump> aligned_mediump_vec4; + + /// 4 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, float, aligned_lowp> aligned_lowp_vec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_highp> aligned_highp_dvec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_mediump> aligned_mediump_dvec4; + + /// 4 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, double, aligned_lowp> aligned_lowp_dvec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_highp> aligned_highp_ivec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_mediump> aligned_mediump_ivec4; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef vec<4, int, aligned_lowp> aligned_lowp_ivec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_highp> aligned_highp_uvec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_mediump> aligned_mediump_uvec4; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef vec<4, uint, aligned_lowp> aligned_lowp_uvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_highp> aligned_highp_bvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_mediump> aligned_mediump_bvec4; + + /// 4 components vector aligned in memory of bool values. + typedef vec<4, bool, aligned_lowp> aligned_lowp_bvec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, float, packed_highp> packed_highp_vec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, float, packed_mediump> packed_mediump_vec4; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, float, packed_lowp> packed_lowp_vec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef vec<4, double, packed_highp> packed_highp_dvec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef vec<4, double, packed_mediump> packed_mediump_dvec4; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef vec<4, double, packed_lowp> packed_lowp_dvec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_highp> packed_highp_ivec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_mediump> packed_mediump_ivec4; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef vec<4, int, packed_lowp> packed_lowp_ivec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_highp> packed_highp_uvec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_mediump> packed_mediump_uvec4; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef vec<4, uint, packed_lowp> packed_lowp_uvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_highp> packed_highp_bvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_mediump> packed_mediump_bvec4; + + /// 4 components vector tightly packed in memory of bool values. + typedef vec<4, bool, packed_lowp> packed_lowp_bvec4; + + // -- *mat2 -- + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_highp> packed_highp_mat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_highp> packed_highp_dmat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2; + + // -- *mat3 -- + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_highp> packed_highp_mat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_highp> packed_highp_dmat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3; + + // -- *mat4 -- + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_highp> packed_highp_mat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_highp> packed_highp_dmat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4; + + // -- *mat2x2 -- + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2x2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2x2; + + /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2x2; + + /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_highp> packed_highp_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_highp> packed_highp_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2x2; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2x2; + + // -- *mat2x3 -- + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_highp> aligned_highp_mat2x3; + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_mediump> aligned_mediump_mat2x3; + + /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, float, aligned_lowp> aligned_lowp_mat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_highp> aligned_highp_dmat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_mediump> aligned_mediump_dmat2x3; + + /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, double, aligned_lowp> aligned_lowp_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_highp> packed_highp_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_mediump> packed_mediump_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, float, packed_lowp> packed_lowp_mat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_highp> packed_highp_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_mediump> packed_mediump_dmat2x3; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 3, double, packed_lowp> packed_lowp_dmat2x3; + + // -- *mat2x4 -- + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_highp> aligned_highp_mat2x4; + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_mediump> aligned_mediump_mat2x4; + + /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, float, aligned_lowp> aligned_lowp_mat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_highp> aligned_highp_dmat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_mediump> aligned_mediump_dmat2x4; + + /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, double, aligned_lowp> aligned_lowp_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_highp> packed_highp_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_mediump> packed_mediump_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, float, packed_lowp> packed_lowp_mat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_highp> packed_highp_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_mediump> packed_mediump_dmat2x4; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<2, 4, double, packed_lowp> packed_lowp_dmat2x4; + + // -- *mat3x2 -- + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_highp> aligned_highp_mat3x2; + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_mediump> aligned_mediump_mat3x2; + + /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, float, aligned_lowp> aligned_lowp_mat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_highp> aligned_highp_dmat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_mediump> aligned_mediump_dmat3x2; + + /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, double, aligned_lowp> aligned_lowp_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_highp> packed_highp_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_mediump> packed_mediump_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, float, packed_lowp> packed_lowp_mat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_highp> packed_highp_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_mediump> packed_mediump_dmat3x2; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 2, double, packed_lowp> packed_lowp_dmat3x2; + + // -- *mat3x3 -- + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3x3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3x3; + + /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3x3; + + /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_highp> packed_highp_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_highp> packed_highp_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3x3; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3x3; + + // -- *mat3x4 -- + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_highp> aligned_highp_mat3x4; + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_mediump> aligned_mediump_mat3x4; + + /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, float, aligned_lowp> aligned_lowp_mat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_highp> aligned_highp_dmat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_mediump> aligned_mediump_dmat3x4; + + /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, double, aligned_lowp> aligned_lowp_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_highp> packed_highp_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_mediump> packed_mediump_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, float, packed_lowp> packed_lowp_mat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_highp> packed_highp_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_mediump> packed_mediump_dmat3x4; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<3, 4, double, packed_lowp> packed_lowp_dmat3x4; + + // -- *mat4x2 -- + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_highp> aligned_highp_mat4x2; + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_mediump> aligned_mediump_mat4x2; + + /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, float, aligned_lowp> aligned_lowp_mat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_highp> aligned_highp_dmat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_mediump> aligned_mediump_dmat4x2; + + /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, double, aligned_lowp> aligned_lowp_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_highp> packed_highp_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_mediump> packed_mediump_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, float, packed_lowp> packed_lowp_mat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_highp> packed_highp_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_mediump> packed_mediump_dmat4x2; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 2, double, packed_lowp> packed_lowp_dmat4x2; + + // -- *mat4x3 -- + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_highp> aligned_highp_mat4x3; + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_mediump> aligned_mediump_mat4x3; + + /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, float, aligned_lowp> aligned_lowp_mat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_highp> aligned_highp_dmat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_mediump> aligned_mediump_dmat4x3; + + /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, double, aligned_lowp> aligned_lowp_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_highp> packed_highp_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_mediump> packed_mediump_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, float, packed_lowp> packed_lowp_mat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_highp> packed_highp_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_mediump> packed_mediump_dmat4x3; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 3, double, packed_lowp> packed_lowp_dmat4x3; + + // -- *mat4x4 -- + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4x4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4x4; + + /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4x4; + + /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_highp> packed_highp_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_highp> packed_highp_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4x4; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. + typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4x4; + + // -- default -- + +#if(defined(GLM_PRECISION_LOWP_FLOAT)) + typedef aligned_lowp_vec1 aligned_vec1; + typedef aligned_lowp_vec2 aligned_vec2; + typedef aligned_lowp_vec3 aligned_vec3; + typedef aligned_lowp_vec4 aligned_vec4; + typedef packed_lowp_vec1 packed_vec1; + typedef packed_lowp_vec2 packed_vec2; + typedef packed_lowp_vec3 packed_vec3; + typedef packed_lowp_vec4 packed_vec4; + + typedef aligned_lowp_mat2 aligned_mat2; + typedef aligned_lowp_mat3 aligned_mat3; + typedef aligned_lowp_mat4 aligned_mat4; + typedef packed_lowp_mat2 packed_mat2; + typedef packed_lowp_mat3 packed_mat3; + typedef packed_lowp_mat4 packed_mat4; + + typedef aligned_lowp_mat2x2 aligned_mat2x2; + typedef aligned_lowp_mat2x3 aligned_mat2x3; + typedef aligned_lowp_mat2x4 aligned_mat2x4; + typedef aligned_lowp_mat3x2 aligned_mat3x2; + typedef aligned_lowp_mat3x3 aligned_mat3x3; + typedef aligned_lowp_mat3x4 aligned_mat3x4; + typedef aligned_lowp_mat4x2 aligned_mat4x2; + typedef aligned_lowp_mat4x3 aligned_mat4x3; + typedef aligned_lowp_mat4x4 aligned_mat4x4; + typedef packed_lowp_mat2x2 packed_mat2x2; + typedef packed_lowp_mat2x3 packed_mat2x3; + typedef packed_lowp_mat2x4 packed_mat2x4; + typedef packed_lowp_mat3x2 packed_mat3x2; + typedef packed_lowp_mat3x3 packed_mat3x3; + typedef packed_lowp_mat3x4 packed_mat3x4; + typedef packed_lowp_mat4x2 packed_mat4x2; + typedef packed_lowp_mat4x3 packed_mat4x3; + typedef packed_lowp_mat4x4 packed_mat4x4; +#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + typedef aligned_mediump_vec1 aligned_vec1; + typedef aligned_mediump_vec2 aligned_vec2; + typedef aligned_mediump_vec3 aligned_vec3; + typedef aligned_mediump_vec4 aligned_vec4; + typedef packed_mediump_vec1 packed_vec1; + typedef packed_mediump_vec2 packed_vec2; + typedef packed_mediump_vec3 packed_vec3; + typedef packed_mediump_vec4 packed_vec4; + + typedef aligned_mediump_mat2 aligned_mat2; + typedef aligned_mediump_mat3 aligned_mat3; + typedef aligned_mediump_mat4 aligned_mat4; + typedef packed_mediump_mat2 packed_mat2; + typedef packed_mediump_mat3 packed_mat3; + typedef packed_mediump_mat4 packed_mat4; + + typedef aligned_mediump_mat2x2 aligned_mat2x2; + typedef aligned_mediump_mat2x3 aligned_mat2x3; + typedef aligned_mediump_mat2x4 aligned_mat2x4; + typedef aligned_mediump_mat3x2 aligned_mat3x2; + typedef aligned_mediump_mat3x3 aligned_mat3x3; + typedef aligned_mediump_mat3x4 aligned_mat3x4; + typedef aligned_mediump_mat4x2 aligned_mat4x2; + typedef aligned_mediump_mat4x3 aligned_mat4x3; + typedef aligned_mediump_mat4x4 aligned_mat4x4; + typedef packed_mediump_mat2x2 packed_mat2x2; + typedef packed_mediump_mat2x3 packed_mat2x3; + typedef packed_mediump_mat2x4 packed_mat2x4; + typedef packed_mediump_mat3x2 packed_mat3x2; + typedef packed_mediump_mat3x3 packed_mat3x3; + typedef packed_mediump_mat3x4 packed_mat3x4; + typedef packed_mediump_mat4x2 packed_mat4x2; + typedef packed_mediump_mat4x3 packed_mat4x3; + typedef packed_mediump_mat4x4 packed_mat4x4; +#else //defined(GLM_PRECISION_HIGHP_FLOAT) + /// 1 component vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec1 aligned_vec1; + + /// 2 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec2 aligned_vec2; + + /// 3 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec3 aligned_vec3; + + /// 4 components vector aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_vec4 aligned_vec4; + + /// 1 component vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec1 packed_vec1; + + /// 2 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec2 packed_vec2; + + /// 3 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec3 packed_vec3; + + /// 4 components vector tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_vec4 packed_vec4; + + /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2 aligned_mat2; + + /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3 aligned_mat3; + + /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4 aligned_mat4; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2 packed_mat2; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3 packed_mat3; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4 packed_mat4; + + /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x2 aligned_mat2x2; + + /// 2 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x3 aligned_mat2x3; + + /// 2 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat2x4 aligned_mat2x4; + + /// 3 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x2 aligned_mat3x2; + + /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x3 aligned_mat3x3; + + /// 3 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat3x4 aligned_mat3x4; + + /// 4 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x2 aligned_mat4x2; + + /// 4 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x3 aligned_mat4x3; + + /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. + typedef aligned_highp_mat4x4 aligned_mat4x4; + + /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x2 packed_mat2x2; + + /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x3 packed_mat2x3; + + /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat2x4 packed_mat2x4; + + /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x2 packed_mat3x2; + + /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x3 packed_mat3x3; + + /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat3x4 packed_mat3x4; + + /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x2 packed_mat4x2; + + /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x3 packed_mat4x3; + + /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. + typedef packed_highp_mat4x4 packed_mat4x4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef aligned_lowp_dvec1 aligned_dvec1; + typedef aligned_lowp_dvec2 aligned_dvec2; + typedef aligned_lowp_dvec3 aligned_dvec3; + typedef aligned_lowp_dvec4 aligned_dvec4; + typedef packed_lowp_dvec1 packed_dvec1; + typedef packed_lowp_dvec2 packed_dvec2; + typedef packed_lowp_dvec3 packed_dvec3; + typedef packed_lowp_dvec4 packed_dvec4; + + typedef aligned_lowp_dmat2 aligned_dmat2; + typedef aligned_lowp_dmat3 aligned_dmat3; + typedef aligned_lowp_dmat4 aligned_dmat4; + typedef packed_lowp_dmat2 packed_dmat2; + typedef packed_lowp_dmat3 packed_dmat3; + typedef packed_lowp_dmat4 packed_dmat4; + + typedef aligned_lowp_dmat2x2 aligned_dmat2x2; + typedef aligned_lowp_dmat2x3 aligned_dmat2x3; + typedef aligned_lowp_dmat2x4 aligned_dmat2x4; + typedef aligned_lowp_dmat3x2 aligned_dmat3x2; + typedef aligned_lowp_dmat3x3 aligned_dmat3x3; + typedef aligned_lowp_dmat3x4 aligned_dmat3x4; + typedef aligned_lowp_dmat4x2 aligned_dmat4x2; + typedef aligned_lowp_dmat4x3 aligned_dmat4x3; + typedef aligned_lowp_dmat4x4 aligned_dmat4x4; + typedef packed_lowp_dmat2x2 packed_dmat2x2; + typedef packed_lowp_dmat2x3 packed_dmat2x3; + typedef packed_lowp_dmat2x4 packed_dmat2x4; + typedef packed_lowp_dmat3x2 packed_dmat3x2; + typedef packed_lowp_dmat3x3 packed_dmat3x3; + typedef packed_lowp_dmat3x4 packed_dmat3x4; + typedef packed_lowp_dmat4x2 packed_dmat4x2; + typedef packed_lowp_dmat4x3 packed_dmat4x3; + typedef packed_lowp_dmat4x4 packed_dmat4x4; +#elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) + typedef aligned_mediump_dvec1 aligned_dvec1; + typedef aligned_mediump_dvec2 aligned_dvec2; + typedef aligned_mediump_dvec3 aligned_dvec3; + typedef aligned_mediump_dvec4 aligned_dvec4; + typedef packed_mediump_dvec1 packed_dvec1; + typedef packed_mediump_dvec2 packed_dvec2; + typedef packed_mediump_dvec3 packed_dvec3; + typedef packed_mediump_dvec4 packed_dvec4; + + typedef aligned_mediump_dmat2 aligned_dmat2; + typedef aligned_mediump_dmat3 aligned_dmat3; + typedef aligned_mediump_dmat4 aligned_dmat4; + typedef packed_mediump_dmat2 packed_dmat2; + typedef packed_mediump_dmat3 packed_dmat3; + typedef packed_mediump_dmat4 packed_dmat4; + + typedef aligned_mediump_dmat2x2 aligned_dmat2x2; + typedef aligned_mediump_dmat2x3 aligned_dmat2x3; + typedef aligned_mediump_dmat2x4 aligned_dmat2x4; + typedef aligned_mediump_dmat3x2 aligned_dmat3x2; + typedef aligned_mediump_dmat3x3 aligned_dmat3x3; + typedef aligned_mediump_dmat3x4 aligned_dmat3x4; + typedef aligned_mediump_dmat4x2 aligned_dmat4x2; + typedef aligned_mediump_dmat4x3 aligned_dmat4x3; + typedef aligned_mediump_dmat4x4 aligned_dmat4x4; + typedef packed_mediump_dmat2x2 packed_dmat2x2; + typedef packed_mediump_dmat2x3 packed_dmat2x3; + typedef packed_mediump_dmat2x4 packed_dmat2x4; + typedef packed_mediump_dmat3x2 packed_dmat3x2; + typedef packed_mediump_dmat3x3 packed_dmat3x3; + typedef packed_mediump_dmat3x4 packed_dmat3x4; + typedef packed_mediump_dmat4x2 packed_dmat4x2; + typedef packed_mediump_dmat4x3 packed_dmat4x3; + typedef packed_mediump_dmat4x4 packed_dmat4x4; +#else //defined(GLM_PRECISION_HIGHP_DOUBLE) + /// 1 component vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec1 aligned_dvec1; + + /// 2 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec2 aligned_dvec2; + + /// 3 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec3 aligned_dvec3; + + /// 4 components vector aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dvec4 aligned_dvec4; + + /// 1 component vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec1 packed_dvec1; + + /// 2 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec2 packed_dvec2; + + /// 3 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec3 packed_dvec3; + + /// 4 components vector tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dvec4 packed_dvec4; + + /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2 aligned_dmat2; + + /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3 aligned_dmat3; + + /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4 aligned_dmat4; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2 packed_dmat2; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3 packed_dmat3; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4 packed_dmat4; + + /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x2 aligned_dmat2x2; + + /// 2 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x3 aligned_dmat2x3; + + /// 2 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat2x4 aligned_dmat2x4; + + /// 3 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x2 aligned_dmat3x2; + + /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x3 aligned_dmat3x3; + + /// 3 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat3x4 aligned_dmat3x4; + + /// 4 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x2 aligned_dmat4x2; + + /// 4 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x3 aligned_dmat4x3; + + /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. + typedef aligned_highp_dmat4x4 aligned_dmat4x4; + + /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x2 packed_dmat2x2; + + /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x3 packed_dmat2x3; + + /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat2x4 packed_dmat2x4; + + /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x2 packed_dmat3x2; + + /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x3 packed_dmat3x3; + + /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat3x4 packed_dmat3x4; + + /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x2 packed_dmat4x2; + + /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x3 packed_dmat4x3; + + /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. + typedef packed_highp_dmat4x4 packed_dmat4x4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_INT)) + typedef aligned_lowp_ivec1 aligned_ivec1; + typedef aligned_lowp_ivec2 aligned_ivec2; + typedef aligned_lowp_ivec3 aligned_ivec3; + typedef aligned_lowp_ivec4 aligned_ivec4; +#elif(defined(GLM_PRECISION_MEDIUMP_INT)) + typedef aligned_mediump_ivec1 aligned_ivec1; + typedef aligned_mediump_ivec2 aligned_ivec2; + typedef aligned_mediump_ivec3 aligned_ivec3; + typedef aligned_mediump_ivec4 aligned_ivec4; +#else //defined(GLM_PRECISION_HIGHP_INT) + /// 1 component vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec1 aligned_ivec1; + + /// 2 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec2 aligned_ivec2; + + /// 3 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec3 aligned_ivec3; + + /// 4 components vector aligned in memory of signed integer numbers. + typedef aligned_highp_ivec4 aligned_ivec4; + + /// 1 component vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec1 packed_ivec1; + + /// 2 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec2 packed_ivec2; + + /// 3 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec3 packed_ivec3; + + /// 4 components vector tightly packed in memory of signed integer numbers. + typedef packed_highp_ivec4 packed_ivec4; +#endif//GLM_PRECISION + + // -- Unsigned integer definition -- + +#if(defined(GLM_PRECISION_LOWP_UINT)) + typedef aligned_lowp_uvec1 aligned_uvec1; + typedef aligned_lowp_uvec2 aligned_uvec2; + typedef aligned_lowp_uvec3 aligned_uvec3; + typedef aligned_lowp_uvec4 aligned_uvec4; +#elif(defined(GLM_PRECISION_MEDIUMP_UINT)) + typedef aligned_mediump_uvec1 aligned_uvec1; + typedef aligned_mediump_uvec2 aligned_uvec2; + typedef aligned_mediump_uvec3 aligned_uvec3; + typedef aligned_mediump_uvec4 aligned_uvec4; +#else //defined(GLM_PRECISION_HIGHP_UINT) + /// 1 component vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec1 aligned_uvec1; + + /// 2 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec2 aligned_uvec2; + + /// 3 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec3 aligned_uvec3; + + /// 4 components vector aligned in memory of unsigned integer numbers. + typedef aligned_highp_uvec4 aligned_uvec4; + + /// 1 component vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec1 packed_uvec1; + + /// 2 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec2 packed_uvec2; + + /// 3 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec3 packed_uvec3; + + /// 4 components vector tightly packed in memory of unsigned integer numbers. + typedef packed_highp_uvec4 packed_uvec4; +#endif//GLM_PRECISION + +#if(defined(GLM_PRECISION_LOWP_BOOL)) + typedef aligned_lowp_bvec1 aligned_bvec1; + typedef aligned_lowp_bvec2 aligned_bvec2; + typedef aligned_lowp_bvec3 aligned_bvec3; + typedef aligned_lowp_bvec4 aligned_bvec4; +#elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) + typedef aligned_mediump_bvec1 aligned_bvec1; + typedef aligned_mediump_bvec2 aligned_bvec2; + typedef aligned_mediump_bvec3 aligned_bvec3; + typedef aligned_mediump_bvec4 aligned_bvec4; +#else //defined(GLM_PRECISION_HIGHP_BOOL) + /// 1 component vector aligned in memory of bool values. + typedef aligned_highp_bvec1 aligned_bvec1; + + /// 2 components vector aligned in memory of bool values. + typedef aligned_highp_bvec2 aligned_bvec2; + + /// 3 components vector aligned in memory of bool values. + typedef aligned_highp_bvec3 aligned_bvec3; + + /// 4 components vector aligned in memory of bool values. + typedef aligned_highp_bvec4 aligned_bvec4; + + /// 1 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec1 packed_bvec1; + + /// 2 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec2 packed_bvec2; + + /// 3 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec3 packed_bvec3; + + /// 4 components vector tightly packed in memory of bool values. + typedef packed_highp_bvec4 packed_bvec4; +#endif//GLM_PRECISION + + /// @} +}//namespace glm diff --git a/includes/glm/gtc/type_precision.hpp b/includes/glm/gtc/type_precision.hpp new file mode 100644 index 0000000..775e2f4 --- /dev/null +++ b/includes/glm/gtc/type_precision.hpp @@ -0,0 +1,2094 @@ +/// @ref gtc_type_precision +/// @file glm/gtc/type_precision.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_type_precision GLM_GTC_type_precision +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Defines specific C++-based qualifier types. + +#pragma once + +// Dependency: +#include "../gtc/quaternion.hpp" +#include "../gtc/vec1.hpp" +#include "../ext/vector_int1_sized.hpp" +#include "../ext/vector_int2_sized.hpp" +#include "../ext/vector_int3_sized.hpp" +#include "../ext/vector_int4_sized.hpp" +#include "../ext/scalar_int_sized.hpp" +#include "../ext/vector_uint1_sized.hpp" +#include "../ext/vector_uint2_sized.hpp" +#include "../ext/vector_uint3_sized.hpp" +#include "../ext/vector_uint4_sized.hpp" +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/type_vec2.hpp" +#include "../detail/type_vec3.hpp" +#include "../detail/type_vec4.hpp" +#include "../detail/type_mat2x2.hpp" +#include "../detail/type_mat2x3.hpp" +#include "../detail/type_mat2x4.hpp" +#include "../detail/type_mat3x2.hpp" +#include "../detail/type_mat3x3.hpp" +#include "../detail/type_mat3x4.hpp" +#include "../detail/type_mat4x2.hpp" +#include "../detail/type_mat4x3.hpp" +#include "../detail/type_mat4x4.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_precision extension included") +#endif + +namespace glm +{ + /////////////////////////// + // Signed int vector types + + /// @addtogroup gtc_type_precision + /// @{ + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_int8; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_int16; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_int32; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_int64; + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_int8_t; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_int16_t; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_int32_t; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_int64_t; + + /// Low qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 lowp_i8; + + /// Low qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 lowp_i16; + + /// Low qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 lowp_i32; + + /// Low qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 lowp_i64; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_int8; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_int16; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_int32; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_int64; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_int8_t; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_int16_t; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_int32_t; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_int64_t; + + /// Medium qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 mediump_i8; + + /// Medium qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 mediump_i16; + + /// Medium qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 mediump_i32; + + /// Medium qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 mediump_i64; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_int8; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_int16; + + /// High qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_int32; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_int64; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_int8_t; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_int16_t; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_int32_t; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_int64_t; + + /// High qualifier 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 highp_i8; + + /// High qualifier 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 highp_i16; + + /// High qualifier 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 highp_i32; + + /// High qualifier 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 highp_i64; + + +#if GLM_HAS_EXTENDED_INTEGER_TYPE + using std::int8_t; + using std::int16_t; + using std::int32_t; + using std::int64_t; +#else + /// 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 int8_t; + + /// 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 int16_t; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 int32_t; + + /// 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 int64_t; +#endif + + /// 8 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int8 i8; + + /// 16 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int16 i16; + + /// 32 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int32 i32; + + /// 64 bit signed integer type. + /// @see gtc_type_precision + typedef detail::int64 i64; + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_uint8; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_uint16; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_uint32; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_uint64; + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_uint8_t; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_uint16_t; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_uint32_t; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_uint64_t; + + /// Low qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 lowp_u8; + + /// Low qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 lowp_u16; + + /// Low qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 lowp_u32; + + /// Low qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 lowp_u64; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_uint8; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_uint16; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_uint32; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_uint64; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_uint8_t; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_uint16_t; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_uint32_t; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_uint64_t; + + /// Medium qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 mediump_u8; + + /// Medium qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 mediump_u16; + + /// Medium qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 mediump_u32; + + /// Medium qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 mediump_u64; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_uint8; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_uint16; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_uint32; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_uint64; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_uint8_t; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_uint16_t; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_uint32_t; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_uint64_t; + + /// High qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 highp_u8; + + /// High qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 highp_u16; + + /// High qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 highp_u32; + + /// High qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 highp_u64; + +#if GLM_HAS_EXTENDED_INTEGER_TYPE + using std::uint8_t; + using std::uint16_t; + using std::uint32_t; + using std::uint64_t; +#else + /// Default qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 uint8_t; + + /// Default qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 uint16_t; + + /// Default qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 uint32_t; + + /// Default qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 uint64_t; +#endif + + /// Default qualifier 8 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint8 u8; + + /// Default qualifier 16 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint16 u16; + + /// Default qualifier 32 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint32 u32; + + /// Default qualifier 64 bit unsigned integer type. + /// @see gtc_type_precision + typedef detail::uint64 u64; + + + + + + ////////////////////// + // Float vector types + + /// Single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float float32; + + /// Double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef double float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_float32_t; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_float64_t; + + /// Low 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 lowp_f32; + + /// Low 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 lowp_f64; + + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_float32; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_float64; + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_float32_t; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_float64_t; + + /// Medium 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 mediump_f32; + + /// Medium 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 mediump_f64; + + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_float32; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_float64; + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_float32_t; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_float64_t; + + /// High 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 highp_f32; + + /// High 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 highp_f64; + + +#if(defined(GLM_PRECISION_LOWP_FLOAT)) + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_float32_t float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_float64_t float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_f32 f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef lowp_f64 f64; + +#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float32 float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float64 float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float32 f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef mediump_float64 f64; + +#else//(defined(GLM_PRECISION_HIGHP_FLOAT)) + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float32_t float32_t; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float64_t float64_t; + + /// Default 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float32_t f32; + + /// Default 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef highp_float64_t f64; +#endif + + + /// Low single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, lowp> lowp_fvec1; + + /// Low single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, lowp> lowp_fvec2; + + /// Low single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, lowp> lowp_fvec3; + + /// Low single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, lowp> lowp_fvec4; + + + /// Medium single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, mediump> mediump_fvec1; + + /// Medium Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, mediump> mediump_fvec2; + + /// Medium Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, mediump> mediump_fvec3; + + /// Medium Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, mediump> mediump_fvec4; + + + /// High single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, highp> highp_fvec1; + + /// High Single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, float, highp> highp_fvec2; + + /// High Single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, float, highp> highp_fvec3; + + /// High Single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, float, highp> highp_fvec4; + + + /// Low single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, lowp> lowp_f32vec1; + + /// Low single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, f32, lowp> lowp_f32vec2; + + /// Low single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, f32, lowp> lowp_f32vec3; + + /// Low single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, f32, lowp> lowp_f32vec4; + + /// Medium single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, mediump> mediump_f32vec1; + + /// Medium single-qualifier floating-point vector of 2 components. + /// @see core_precision + typedef vec<2, f32, mediump> mediump_f32vec2; + + /// Medium single-qualifier floating-point vector of 3 components. + /// @see core_precision + typedef vec<3, f32, mediump> mediump_f32vec3; + + /// Medium single-qualifier floating-point vector of 4 components. + /// @see core_precision + typedef vec<4, f32, mediump> mediump_f32vec4; + + /// High single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, highp> highp_f32vec1; + + /// High single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f32, highp> highp_f32vec2; + + /// High single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f32, highp> highp_f32vec3; + + /// High single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f32, highp> highp_f32vec4; + + + /// Low double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, lowp> lowp_f64vec1; + + /// Low double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, lowp> lowp_f64vec2; + + /// Low double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, lowp> lowp_f64vec3; + + /// Low double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, lowp> lowp_f64vec4; + + /// Medium double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, mediump> mediump_f64vec1; + + /// Medium double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, mediump> mediump_f64vec2; + + /// Medium double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, mediump> mediump_f64vec3; + + /// Medium double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, mediump> mediump_f64vec4; + + /// High double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, highp> highp_f64vec1; + + /// High double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, highp> highp_f64vec2; + + /// High double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, highp> highp_f64vec3; + + /// High double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, highp> highp_f64vec4; + + + + ////////////////////// + // Float matrix types + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_f32 lowp_fmat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, lowp> lowp_fmat2x2; + + /// Low single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, lowp> lowp_fmat2x3; + + /// Low single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, lowp> lowp_fmat2x4; + + /// Low single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, lowp> lowp_fmat3x2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, lowp> lowp_fmat3x3; + + /// Low single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, lowp> lowp_fmat3x4; + + /// Low single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, lowp> lowp_fmat4x2; + + /// Low single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, lowp> lowp_fmat4x3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, lowp> lowp_fmat4x4; + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_fmat1x1 lowp_fmat1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_fmat2x2 lowp_fmat2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_fmat3x3 lowp_fmat3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_fmat4x4 lowp_fmat4; + + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_f32 mediump_fmat1x1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, mediump> mediump_fmat2x2; + + /// Medium single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, mediump> mediump_fmat2x3; + + /// Medium single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, mediump> mediump_fmat2x4; + + /// Medium single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, mediump> mediump_fmat3x2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, mediump> mediump_fmat3x3; + + /// Medium single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, mediump> mediump_fmat3x4; + + /// Medium single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, mediump> mediump_fmat4x2; + + /// Medium single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, mediump> mediump_fmat4x3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, mediump> mediump_fmat4x4; + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_fmat1x1 mediump_fmat1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_fmat2x2 mediump_fmat2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_fmat3x3 mediump_fmat3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_fmat4x4 mediump_fmat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_f32 highp_fmat1x1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, highp> highp_fmat2x2; + + /// High single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, highp> highp_fmat2x3; + + /// High single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, highp> highp_fmat2x4; + + /// High single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, highp> highp_fmat3x2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, highp> highp_fmat3x3; + + /// High single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, highp> highp_fmat3x4; + + /// High single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, highp> highp_fmat4x2; + + /// High single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, highp> highp_fmat4x3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, highp> highp_fmat4x4; + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_fmat1x1 highp_fmat1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_fmat2x2 highp_fmat2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_fmat3x3 highp_fmat3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_fmat4x4 highp_fmat4; + + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 lowp_f32mat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; + + /// Low single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; + + /// Low single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; + + /// Low single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; + + /// Low single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; + + /// Low single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; + + /// Low single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; + + /// Low single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 lowp_f32mat1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat2x2 lowp_f32mat2; + + /// Low single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat3x3 lowp_f32mat3; + + /// Low single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_f32mat4x4 lowp_f32mat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 mediump_f32mat1x1; + + /// Low single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; + + /// Medium single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; + + /// Medium single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; + + /// Medium single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; + + /// Medium single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; + + /// Medium single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; + + /// Medium single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; + + /// Medium single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// Medium single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat2x2 mediump_f32mat2; + + /// Medium single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat3x3 mediump_f32mat3; + + /// Medium single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_f32mat4x4 mediump_f32mat4; + + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 highp_f32mat1x1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, highp> highp_f32mat2x2; + + /// High single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, highp> highp_f32mat2x3; + + /// High single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, highp> highp_f32mat2x4; + + /// High single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, highp> highp_f32mat3x2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, highp> highp_f32mat3x3; + + /// High single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, highp> highp_f32mat3x4; + + /// High single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, highp> highp_f32mat4x2; + + /// High single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, highp> highp_f32mat4x3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, highp> highp_f32mat4x4; + + /// High single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// High single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_f32mat2x2 highp_f32mat2; + + /// High single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_f32mat3x3 highp_f32mat3; + + /// High single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_f32mat4x4 highp_f32mat4; + + + /// Low double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 lowp_f64mat1x1; + + /// Low double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; + + /// Low double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; + + /// Low double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; + + /// Low double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; + + /// Low double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; + + /// Low double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; + + /// Low double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; + + /// Low double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; + + /// Low double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; + + /// Low double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef lowp_f64mat1x1 lowp_f64mat1; + + /// Low double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat2x2 lowp_f64mat2; + + /// Low double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat3x3 lowp_f64mat3; + + /// Low double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef lowp_f64mat4x4 lowp_f64mat4; + + + /// Medium double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 Highp_f64mat1x1; + + /// Medium double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; + + /// Medium double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; + + /// Medium double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; + + /// Medium double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; + + /// Medium double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; + + /// Medium double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; + + /// Medium double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; + + /// Medium double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; + + /// Medium double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; + + /// Medium double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef mediump_f64mat1x1 mediump_f64mat1; + + /// Medium double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat2x2 mediump_f64mat2; + + /// Medium double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat3x3 mediump_f64mat3; + + /// Medium double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mediump_f64mat4x4 mediump_f64mat4; + + /// High double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 highp_f64mat1x1; + + /// High double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, highp> highp_f64mat2x2; + + /// High double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, highp> highp_f64mat2x3; + + /// High double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, highp> highp_f64mat2x4; + + /// High double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, highp> highp_f64mat3x2; + + /// High double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, highp> highp_f64mat3x3; + + /// High double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, highp> highp_f64mat3x4; + + /// High double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, highp> highp_f64mat4x2; + + /// High double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, highp> highp_f64mat4x3; + + /// High double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, highp> highp_f64mat4x4; + + /// High double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef highp_f64mat1x1 highp_f64mat1; + + /// High double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef highp_f64mat2x2 highp_f64mat2; + + /// High double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef highp_f64mat3x3 highp_f64mat3; + + /// High double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef highp_f64mat4x4 highp_f64mat4; + + + ///////////////////////////// + // Signed int vector types + + /// Low qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, lowp> lowp_ivec1; + + /// Low qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, lowp> lowp_ivec2; + + /// Low qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, lowp> lowp_ivec3; + + /// Low qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, lowp> lowp_ivec4; + + + /// Medium qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, mediump> mediump_ivec1; + + /// Medium qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, mediump> mediump_ivec2; + + /// Medium qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, mediump> mediump_ivec3; + + /// Medium qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, mediump> mediump_ivec4; + + + /// High qualifier signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, int, highp> highp_ivec1; + + /// High qualifier signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, int, highp> highp_ivec2; + + /// High qualifier signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, int, highp> highp_ivec3; + + /// High qualifier signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, int, highp> highp_ivec4; + + + /// Low qualifier 8 bit signed integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, i8, lowp> lowp_i8vec1; + + /// Low qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, lowp> lowp_i8vec2; + + /// Low qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, lowp> lowp_i8vec3; + + /// Low qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, lowp> lowp_i8vec4; + + + /// Medium qualifier 8 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i8, mediump> mediump_i8vec1; + + /// Medium qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, mediump> mediump_i8vec2; + + /// Medium qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, mediump> mediump_i8vec3; + + /// Medium qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, mediump> mediump_i8vec4; + + + /// High qualifier 8 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i8, highp> highp_i8vec1; + + /// High qualifier 8 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i8, highp> highp_i8vec2; + + /// High qualifier 8 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i8, highp> highp_i8vec3; + + /// High qualifier 8 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i8, highp> highp_i8vec4; + + + /// Low qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, lowp> lowp_i16vec1; + + /// Low qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, lowp> lowp_i16vec2; + + /// Low qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, lowp> lowp_i16vec3; + + /// Low qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, lowp> lowp_i16vec4; + + + /// Medium qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, mediump> mediump_i16vec1; + + /// Medium qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, mediump> mediump_i16vec2; + + /// Medium qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, mediump> mediump_i16vec3; + + /// Medium qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, mediump> mediump_i16vec4; + + + /// High qualifier 16 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i16, highp> highp_i16vec1; + + /// High qualifier 16 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i16, highp> highp_i16vec2; + + /// High qualifier 16 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i16, highp> highp_i16vec3; + + /// High qualifier 16 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i16, highp> highp_i16vec4; + + + /// Low qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, lowp> lowp_i32vec1; + + /// Low qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, lowp> lowp_i32vec2; + + /// Low qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, lowp> lowp_i32vec3; + + /// Low qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, lowp> lowp_i32vec4; + + + /// Medium qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, mediump> mediump_i32vec1; + + /// Medium qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, mediump> mediump_i32vec2; + + /// Medium qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, mediump> mediump_i32vec3; + + /// Medium qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, mediump> mediump_i32vec4; + + + /// High qualifier 32 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i32, highp> highp_i32vec1; + + /// High qualifier 32 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i32, highp> highp_i32vec2; + + /// High qualifier 32 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i32, highp> highp_i32vec3; + + /// High qualifier 32 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i32, highp> highp_i32vec4; + + + /// Low qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, lowp> lowp_i64vec1; + + /// Low qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, lowp> lowp_i64vec2; + + /// Low qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, lowp> lowp_i64vec3; + + /// Low qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, lowp> lowp_i64vec4; + + + /// Medium qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, mediump> mediump_i64vec1; + + /// Medium qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, mediump> mediump_i64vec2; + + /// Medium qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, mediump> mediump_i64vec3; + + /// Medium qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, mediump> mediump_i64vec4; + + + /// High qualifier 64 bit signed integer scalar type. + /// @see gtc_type_precision + typedef vec<1, i64, highp> highp_i64vec1; + + /// High qualifier 64 bit signed integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, i64, highp> highp_i64vec2; + + /// High qualifier 64 bit signed integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, i64, highp> highp_i64vec3; + + /// High qualifier 64 bit signed integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, i64, highp> highp_i64vec4; + + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, lowp> lowp_uvec1; + + /// Low qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, lowp> lowp_uvec2; + + /// Low qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, lowp> lowp_uvec3; + + /// Low qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, lowp> lowp_uvec4; + + + /// Medium qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, mediump> mediump_uvec1; + + /// Medium qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, mediump> mediump_uvec2; + + /// Medium qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, mediump> mediump_uvec3; + + /// Medium qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, mediump> mediump_uvec4; + + + /// High qualifier unsigned integer vector of 1 component type. + /// @see gtc_type_precision + typedef vec<1, uint, highp> highp_uvec1; + + /// High qualifier unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, uint, highp> highp_uvec2; + + /// High qualifier unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, uint, highp> highp_uvec3; + + /// High qualifier unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, uint, highp> highp_uvec4; + + + /// Low qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, lowp> lowp_u8vec1; + + /// Low qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, lowp> lowp_u8vec2; + + /// Low qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, lowp> lowp_u8vec3; + + /// Low qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, lowp> lowp_u8vec4; + + + /// Medium qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, mediump> mediump_u8vec1; + + /// Medium qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, mediump> mediump_u8vec2; + + /// Medium qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, mediump> mediump_u8vec3; + + /// Medium qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, mediump> mediump_u8vec4; + + + /// High qualifier 8 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u8, highp> highp_u8vec1; + + /// High qualifier 8 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u8, highp> highp_u8vec2; + + /// High qualifier 8 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u8, highp> highp_u8vec3; + + /// High qualifier 8 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u8, highp> highp_u8vec4; + + + /// Low qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, lowp> lowp_u16vec1; + + /// Low qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, lowp> lowp_u16vec2; + + /// Low qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, lowp> lowp_u16vec3; + + /// Low qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, lowp> lowp_u16vec4; + + + /// Medium qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, mediump> mediump_u16vec1; + + /// Medium qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, mediump> mediump_u16vec2; + + /// Medium qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, mediump> mediump_u16vec3; + + /// Medium qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, mediump> mediump_u16vec4; + + + /// High qualifier 16 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u16, highp> highp_u16vec1; + + /// High qualifier 16 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u16, highp> highp_u16vec2; + + /// High qualifier 16 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u16, highp> highp_u16vec3; + + /// High qualifier 16 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u16, highp> highp_u16vec4; + + + /// Low qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, lowp> lowp_u32vec1; + + /// Low qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, lowp> lowp_u32vec2; + + /// Low qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, lowp> lowp_u32vec3; + + /// Low qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, lowp> lowp_u32vec4; + + + /// Medium qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, mediump> mediump_u32vec1; + + /// Medium qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, mediump> mediump_u32vec2; + + /// Medium qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, mediump> mediump_u32vec3; + + /// Medium qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, mediump> mediump_u32vec4; + + + /// High qualifier 32 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u32, highp> highp_u32vec1; + + /// High qualifier 32 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u32, highp> highp_u32vec2; + + /// High qualifier 32 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u32, highp> highp_u32vec3; + + /// High qualifier 32 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u32, highp> highp_u32vec4; + + + /// Low qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, lowp> lowp_u64vec1; + + /// Low qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, lowp> lowp_u64vec2; + + /// Low qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, lowp> lowp_u64vec3; + + /// Low qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, lowp> lowp_u64vec4; + + + /// Medium qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, mediump> mediump_u64vec1; + + /// Medium qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, mediump> mediump_u64vec2; + + /// Medium qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, mediump> mediump_u64vec3; + + /// Medium qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, mediump> mediump_u64vec4; + + + /// High qualifier 64 bit unsigned integer scalar type. + /// @see gtc_type_precision + typedef vec<1, u64, highp> highp_u64vec1; + + /// High qualifier 64 bit unsigned integer vector of 2 components type. + /// @see gtc_type_precision + typedef vec<2, u64, highp> highp_u64vec2; + + /// High qualifier 64 bit unsigned integer vector of 3 components type. + /// @see gtc_type_precision + typedef vec<3, u64, highp> highp_u64vec3; + + /// High qualifier 64 bit unsigned integer vector of 4 components type. + /// @see gtc_type_precision + typedef vec<4, u64, highp> highp_u64vec4; + + + ////////////////////// + // Float vector types + + /// 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 float32_t; + + /// 32 bit single-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float32 f32; + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 float64_t; + + /// 64 bit double-qualifier floating-point scalar. + /// @see gtc_type_precision + typedef float64 f64; +# endif//GLM_FORCE_SINGLE_ONLY + + /// Single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, float, defaultp> fvec1; + + /// Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, float, defaultp> fvec2; + + /// Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, float, defaultp> fvec3; + + /// Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, float, defaultp> fvec4; + + + /// Single-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f32, defaultp> f32vec1; + + /// Single-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f32, defaultp> f32vec2; + + /// Single-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f32, defaultp> f32vec3; + + /// Single-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f32, defaultp> f32vec4; + +# ifndef GLM_FORCE_SINGLE_ONLY + /// Double-qualifier floating-point vector of 1 component. + /// @see gtc_type_precision + typedef vec<1, f64, defaultp> f64vec1; + + /// Double-qualifier floating-point vector of 2 components. + /// @see gtc_type_precision + typedef vec<2, f64, defaultp> f64vec2; + + /// Double-qualifier floating-point vector of 3 components. + /// @see gtc_type_precision + typedef vec<3, f64, defaultp> f64vec3; + + /// Double-qualifier floating-point vector of 4 components. + /// @see gtc_type_precision + typedef vec<4, f64, defaultp> f64vec4; +# endif//GLM_FORCE_SINGLE_ONLY + + + ////////////////////// + // Float matrix types + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 fmat1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> fmat2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> fmat3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> fmat4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 fmat1x1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> fmat2x2; + + /// Single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, defaultp> fmat2x3; + + /// Single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, defaultp> fmat2x4; + + /// Single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, defaultp> fmat3x2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> fmat3x3; + + /// Single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, defaultp> fmat3x4; + + /// Single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, defaultp> fmat4x2; + + /// Single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, defaultp> fmat4x3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> fmat4x4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f32mat1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> f32mat2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> f32mat3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> f32mat4; + + + /// Single-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f32 f32mat1x1; + + /// Single-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f32, defaultp> f32mat2x2; + + /// Single-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f32, defaultp> f32mat2x3; + + /// Single-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f32, defaultp> f32mat2x4; + + /// Single-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f32, defaultp> f32mat3x2; + + /// Single-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f32, defaultp> f32mat3x3; + + /// Single-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f32, defaultp> f32mat3x4; + + /// Single-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f32, defaultp> f32mat4x2; + + /// Single-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f32, defaultp> f32mat4x3; + + /// Single-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f32, defaultp> f32mat4x4; + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef detail::tmat1x1 f64mat1; + + /// Double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, defaultp> f64mat2; + + /// Double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, defaultp> f64mat3; + + /// Double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, defaultp> f64mat4; + + + /// Double-qualifier floating-point 1x1 matrix. + /// @see gtc_type_precision + //typedef f64 f64mat1x1; + + /// Double-qualifier floating-point 2x2 matrix. + /// @see gtc_type_precision + typedef mat<2, 2, f64, defaultp> f64mat2x2; + + /// Double-qualifier floating-point 2x3 matrix. + /// @see gtc_type_precision + typedef mat<2, 3, f64, defaultp> f64mat2x3; + + /// Double-qualifier floating-point 2x4 matrix. + /// @see gtc_type_precision + typedef mat<2, 4, f64, defaultp> f64mat2x4; + + /// Double-qualifier floating-point 3x2 matrix. + /// @see gtc_type_precision + typedef mat<3, 2, f64, defaultp> f64mat3x2; + + /// Double-qualifier floating-point 3x3 matrix. + /// @see gtc_type_precision + typedef mat<3, 3, f64, defaultp> f64mat3x3; + + /// Double-qualifier floating-point 3x4 matrix. + /// @see gtc_type_precision + typedef mat<3, 4, f64, defaultp> f64mat3x4; + + /// Double-qualifier floating-point 4x2 matrix. + /// @see gtc_type_precision + typedef mat<4, 2, f64, defaultp> f64mat4x2; + + /// Double-qualifier floating-point 4x3 matrix. + /// @see gtc_type_precision + typedef mat<4, 3, f64, defaultp> f64mat4x3; + + /// Double-qualifier floating-point 4x4 matrix. + /// @see gtc_type_precision + typedef mat<4, 4, f64, defaultp> f64mat4x4; + +# endif//GLM_FORCE_SINGLE_ONLY + + ////////////////////////// + // Quaternion types + + /// Single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua f32quat; + + /// Low single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua lowp_f32quat; + + /// Low double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua lowp_f64quat; + + /// Medium single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua mediump_f32quat; + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Medium double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua mediump_f64quat; + + /// High single-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua highp_f32quat; + + /// High double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua highp_f64quat; + + /// Double-qualifier floating-point quaternion. + /// @see gtc_type_precision + typedef qua f64quat; + +# endif//GLM_FORCE_SINGLE_ONLY + + /// @} +}//namespace glm + +#include "type_precision.inl" diff --git a/includes/glm/gtc/type_precision.inl b/includes/glm/gtc/type_precision.inl new file mode 100644 index 0000000..ae80912 --- /dev/null +++ b/includes/glm/gtc/type_precision.inl @@ -0,0 +1,6 @@ +/// @ref gtc_precision + +namespace glm +{ + +} diff --git a/includes/glm/gtc/type_ptr.hpp b/includes/glm/gtc/type_ptr.hpp new file mode 100644 index 0000000..d7e625a --- /dev/null +++ b/includes/glm/gtc/type_ptr.hpp @@ -0,0 +1,230 @@ +/// @ref gtc_type_ptr +/// @file glm/gtc/type_ptr.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtc_type_ptr GLM_GTC_type_ptr +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Handles the interaction between pointers and vector, matrix types. +/// +/// This extension defines an overloaded function, glm::value_ptr. It returns +/// a pointer to the memory layout of the object. Matrix types store their values +/// in column-major order. +/// +/// This is useful for uploading data to matrices or copying data to buffer objects. +/// +/// Example: +/// @code +/// #include +/// #include +/// +/// glm::vec3 aVector(3); +/// glm::mat4 someMatrix(1.0); +/// +/// glUniform3fv(uniformLoc, 1, glm::value_ptr(aVector)); +/// glUniformMatrix4fv(uniformMatrixLoc, 1, GL_FALSE, glm::value_ptr(someMatrix)); +/// @endcode +/// +/// need to be included to use the features of this extension. + +#pragma once + +// Dependency: +#include "../gtc/quaternion.hpp" +#include "../gtc/vec1.hpp" +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" +#include + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_type_ptr extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_type_ptr + /// @{ + + /// Return the constant address to the data of the input parameter. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL typename genType::value_type const * value_ptr(genType const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<2, T, defaultp> make_vec2(T const * const ptr); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<3, T, defaultp> make_vec3(T const * const ptr); + + /// Build a vector from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL vec<4, T, defaultp> make_vec4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 3, T, defaultp> make_mat2x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 4, T, defaultp> make_mat2x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 2, T, defaultp> make_mat3x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 4, T, defaultp> make_mat3x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 2, T, defaultp> make_mat4x2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 3, T, defaultp> make_mat4x3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4x4(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3(T const * const ptr); + + /// Build a matrix from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4(T const * const ptr); + + /// Build a quaternion from a pointer. + /// @see gtc_type_ptr + template + GLM_FUNC_DECL qua make_quat(T const * const ptr); + + /// @} +}//namespace glm + +#include "type_ptr.inl" diff --git a/includes/glm/gtc/type_ptr.inl b/includes/glm/gtc/type_ptr.inl new file mode 100644 index 0000000..4e9ed72 --- /dev/null +++ b/includes/glm/gtc/type_ptr.inl @@ -0,0 +1,398 @@ +/// @ref gtc_type_ptr + +#include + +namespace glm +{ + /// @addtogroup gtc_type_ptr + /// @{ + + template + GLM_FUNC_QUALIFIER T const* value_ptr(vec<1, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<1, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(vec<2, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<2, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const * value_ptr(vec<3, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<3, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(vec<4, T, Q> const& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(vec<4, T, Q>& v) + { + return &(v.x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 2, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 2, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 4, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 4, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 3, T, Q> const& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T * value_ptr(mat<4, 3, T, Q>& m) + { + return &(m[0].x); + } + + template + GLM_FUNC_QUALIFIER T const * value_ptr(qua const& q) + { + return &(q[0]); + } + + template + GLM_FUNC_QUALIFIER T* value_ptr(qua& q) + { + return &(q[0]); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v) + { + return vec<1, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v) + { + return vec<2, T, Q>(v.x, static_cast(0)); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v) + { + return vec<2, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v) + { + return vec<2, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v) + { + return vec<3, T, Q>(v.x, static_cast(0), static_cast(0)); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v) + { + return vec<3, T, Q>(v.x, v.y, static_cast(0)); + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v) + { + return vec<3, T, Q>(v); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v) + { + return vec<4, T, Q>(v.x, static_cast(0), static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v) + { + return vec<4, T, Q>(v.x, v.y, static_cast(0), static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v) + { + return vec<4, T, Q>(v.x, v.y, v.z, static_cast(1)); + } + + template + GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v) + { + return v; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, defaultp> make_vec2(T const *const ptr) + { + vec<2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, defaultp> make_vec3(T const *const ptr) + { + vec<3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, defaultp> make_vec4(T const *const ptr) + { + vec<4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(vec<4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2x2(T const *const ptr) + { + mat<2, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 3, T, defaultp> make_mat2x3(T const *const ptr) + { + mat<2, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, defaultp> make_mat2x4(T const *const ptr) + { + mat<2, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<2, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 2, T, defaultp> make_mat3x2(T const *const ptr) + { + mat<3, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3x3(T const *const ptr) + { + mat<3, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, defaultp> make_mat3x4(T const *const ptr) + { + mat<3, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<3, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 2, T, defaultp> make_mat4x2(T const *const ptr) + { + mat<4, 2, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 2, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 3, T, defaultp> make_mat4x3(T const *const ptr) + { + mat<4, 3, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 3, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4x4(T const *const ptr) + { + mat<4, 4, T, defaultp> Result; + memcpy(value_ptr(Result), ptr, sizeof(mat<4, 4, T, defaultp>)); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2(T const *const ptr) + { + return make_mat2x2(ptr); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3(T const *const ptr) + { + return make_mat3x3(ptr); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4(T const *const ptr) + { + return make_mat4x4(ptr); + } + + template + GLM_FUNC_QUALIFIER qua make_quat(T const *const ptr) + { + qua Result; + memcpy(value_ptr(Result), ptr, sizeof(qua)); + return Result; + } + + /// @} +}//namespace glm + diff --git a/includes/glm/gtc/ulp.hpp b/includes/glm/gtc/ulp.hpp new file mode 100644 index 0000000..7b918f0 --- /dev/null +++ b/includes/glm/gtc/ulp.hpp @@ -0,0 +1,155 @@ +/// @ref gtc_ulp +/// @file glm/gtc/ulp.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_ulp GLM_GTC_ulp +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Allow the measurement of the accuracy of a function against a reference +/// implementation. This extension works on floating-point data and provide results +/// in ULP. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/_vectorize.hpp" +#include "../ext/scalar_int_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_ulp extension included") +#endif + +namespace glm +{ + /// @addtogroup gtc_ulp + /// @{ + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType next_float(genType x); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType prev_float(genType x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType next_float(genType x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam genType A floating-point scalar type. + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL genType prev_float(genType x, int ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @see gtc_ulp + GLM_FUNC_DECL int float_distance(float x, float y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @see gtc_ulp + GLM_FUNC_DECL int64 float_distance(double x, double y); + + /// Return the next ULP value(s) after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x, int ULPs); + + /// Return the value(s) ULP distance after the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec next_float(vec const& x, vec const& ULPs); + + /// Return the previous ULP value(s) before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x, int ULPs); + + /// Return the value(s) ULP distance before the input value(s). + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec prev_float(vec const& x, vec const& ULPs); + + /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); + + /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam Q Value from qualifier enum + /// + /// @see gtc_ulp + template + GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "ulp.inl" diff --git a/includes/glm/gtc/ulp.inl b/includes/glm/gtc/ulp.inl new file mode 100644 index 0000000..836c84b --- /dev/null +++ b/includes/glm/gtc/ulp.inl @@ -0,0 +1,173 @@ +/// @ref gtc_ulp + +#include "../ext/scalar_ulp.hpp" + +namespace glm +{ + template<> + GLM_FUNC_QUALIFIER float next_float(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MAX); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MAX); +# else + return nextafterf(x, FLT_MAX); +# endif + } + + template<> + GLM_FUNC_QUALIFIER double next_float(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::max()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafter(x, std::numeric_limits::max()); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MAX); +# else + return nextafter(x, DBL_MAX); +# endif + } + + template + GLM_FUNC_QUALIFIER T next_float(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'next_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for (int i = 0; i < ULPs; ++i) + temp = next_float(temp); + return temp; + } + + GLM_FUNC_QUALIFIER float prev_float(float x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return detail::nextafterf(x, FLT_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafterf(x, FLT_MIN); +# else + return nextafterf(x, FLT_MIN); +# endif + } + + GLM_FUNC_QUALIFIER double prev_float(double x) + { +# if GLM_HAS_CXX11_STL + return std::nextafter(x, std::numeric_limits::min()); +# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) + return _nextafter(x, DBL_MIN); +# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) + return __builtin_nextafter(x, DBL_MIN); +# else + return nextafter(x, DBL_MIN); +# endif + } + + template + GLM_FUNC_QUALIFIER T prev_float(T x, int ULPs) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'prev_float' only accept floating-point input"); + assert(ULPs >= 0); + + T temp = x; + for (int i = 0; i < ULPs; ++i) + temp = prev_float(temp); + return temp; + } + + GLM_FUNC_QUALIFIER int float_distance(float x, float y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + GLM_FUNC_QUALIFIER int64 float_distance(double x, double y) + { + detail::float_t const a(x); + detail::float_t const b(y); + + return abs(a.i - b.i); + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x, int ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec next_float(vec const& x, vec const& ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = next_float(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x, int ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i], ULPs); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec prev_float(vec const& x, vec const& ULPs) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = prev_float(x[i], ULPs[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = float_distance(x[i], y[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) + { + vec Result; + for (length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = float_distance(x[i], y[i]); + return Result; + } +}//namespace glm + diff --git a/includes/glm/gtc/vec1.hpp b/includes/glm/gtc/vec1.hpp new file mode 100644 index 0000000..63697a2 --- /dev/null +++ b/includes/glm/gtc/vec1.hpp @@ -0,0 +1,30 @@ +/// @ref gtc_vec1 +/// @file glm/gtc/vec1.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtc_vec1 GLM_GTC_vec1 +/// @ingroup gtc +/// +/// Include to use the features of this extension. +/// +/// Add vec1, ivec1, uvec1 and bvec1 types. + +#pragma once + +// Dependency: +#include "../ext/vector_bool1.hpp" +#include "../ext/vector_bool1_precision.hpp" +#include "../ext/vector_float1.hpp" +#include "../ext/vector_float1_precision.hpp" +#include "../ext/vector_double1.hpp" +#include "../ext/vector_double1_precision.hpp" +#include "../ext/vector_int1.hpp" +#include "../ext/vector_int1_sized.hpp" +#include "../ext/vector_uint1.hpp" +#include "../ext/vector_uint1_sized.hpp" + +#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_vec1 extension included") +#endif + diff --git a/includes/glm/gtx/associated_min_max.hpp b/includes/glm/gtx/associated_min_max.hpp new file mode 100644 index 0000000..435230d --- /dev/null +++ b/includes/glm/gtx/associated_min_max.hpp @@ -0,0 +1,205 @@ +/// @ref gtx_associated_min_max +/// @file glm/gtx/associated_min_max.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_associated_min_max GLM_GTX_associated_min_max +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Min and max functions that return associated values not the compared ones. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_associated_min_max extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_associated_min_max + /// @{ + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + T x, const vec& a, + T y, const vec& b); + + /// Minimum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, U a, + vec const& y, U b); + + /// Minimum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin( + T x, U a, + T y, U b, + T z, U c); + + /// Minimum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMin( + T x, U a, + T y, U b, + T z, U c, + T w, U d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d); + + /// Minimum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMin( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b); + + /// Maximum comparison between 2 variables and returns 2 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax( + T x, U a, + T y, U b, + T z, U c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c); + + /// Maximum comparison between 3 variables and returns 3 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL U associatedMax( + T x, U a, + T y, U b, + T z, U c, + T w, U d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d); + + /// Maximum comparison between 4 variables and returns 4 associated variable values + /// @see gtx_associated_min_max + template + GLM_FUNC_DECL vec associatedMax( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d); + + /// @} +} //namespace glm + +#include "associated_min_max.inl" diff --git a/includes/glm/gtx/associated_min_max.inl b/includes/glm/gtx/associated_min_max.inl new file mode 100644 index 0000000..f09f5bb --- /dev/null +++ b/includes/glm/gtx/associated_min_max.inl @@ -0,0 +1,354 @@ +/// @ref gtx_associated_min_max + +namespace glm{ + +// Min comparison between 2 variables +template +GLM_FUNC_QUALIFIER U associatedMin(T x, U a, T y, U b) +{ + return x < y ? a : b; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? a[i] : b[i]; + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + T x, const vec& a, + T y, const vec& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x < y ? a[i] : b[i]; + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, U a, + vec const& y, U b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? a : b; + return Result; +} + +// Min comparison between 3 variables +template +GLM_FUNC_QUALIFIER U associatedMin +( + T x, U a, + T y, U b, + T z, U c +) +{ + U Result = x < y ? (x < z ? a : c) : (y < z ? b : c); + return Result; +} + +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] < y[i] ? (x[i] < z[i] ? a[i] : c[i]) : (y[i] < z[i] ? b[i] : c[i]); + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER U associatedMin +( + T x, U a, + T y, U b, + T z, U c, + T w, U d +) +{ + T Test1 = min(x, y); + T Test2 = min(z, w); + U Result1 = x < y ? a : b; + U Result2 = z < w ? c : d; + U Result = Test1 < Test2 ? Result1 : Result2; + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = min(x[i], y[i]); + T Test2 = min(z[i], w[i]); + U Result1 = x[i] < y[i] ? a[i] : b[i]; + U Result2 = z[i] < w[i] ? c[i] : d[i]; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d +) +{ + T Test1 = min(x, y); + T Test2 = min(z, w); + + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + U Result1 = x < y ? a[i] : b[i]; + U Result2 = z < w ? c[i] : d[i]; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Min comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMin +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = min(x[i], y[i]); + T Test2 = min(z[i], w[i]); + U Result1 = x[i] < y[i] ? a : b; + U Result2 = z[i] < w[i] ? c : d; + Result[i] = Test1 < Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER U associatedMax(T x, U a, T y, U b) +{ + return x > y ? a : b; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? a[i] : b[i]; + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x > y ? a[i] : b[i]; + return Result; +} + +// Max comparison between 2 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? a : b; + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER U associatedMax +( + T x, U a, + T y, U b, + T z, U c +) +{ + U Result = x > y ? (x > z ? a : c) : (y > z ? b : c); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a[i] : c[i]) : (y[i] > z[i] ? b[i] : c[i]); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x > y ? (x > z ? a[i] : c[i]) : (y > z ? b[i] : c[i]); + return Result; +} + +// Max comparison between 3 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a : c) : (y[i] > z[i] ? b : c); + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER U associatedMax +( + T x, U a, + T y, U b, + T z, U c, + T w, U d +) +{ + T Test1 = max(x, y); + T Test2 = max(z, w); + U Result1 = x > y ? a : b; + U Result2 = z > w ? c : d; + U Result = Test1 > Test2 ? Result1 : Result2; + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, vec const& a, + vec const& y, vec const& b, + vec const& z, vec const& c, + vec const& w, vec const& d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = max(x[i], y[i]); + T Test2 = max(z[i], w[i]); + U Result1 = x[i] > y[i] ? a[i] : b[i]; + U Result2 = z[i] > w[i] ? c[i] : d[i]; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + T x, vec const& a, + T y, vec const& b, + T z, vec const& c, + T w, vec const& d +) +{ + T Test1 = max(x, y); + T Test2 = max(z, w); + + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + U Result1 = x > y ? a[i] : b[i]; + U Result2 = z > w ? c[i] : d[i]; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} + +// Max comparison between 4 variables +template +GLM_FUNC_QUALIFIER vec associatedMax +( + vec const& x, U a, + vec const& y, U b, + vec const& z, U c, + vec const& w, U d +) +{ + vec Result; + for(length_t i = 0, n = Result.length(); i < n; ++i) + { + T Test1 = max(x[i], y[i]); + T Test2 = max(z[i], w[i]); + U Result1 = x[i] > y[i] ? a : b; + U Result2 = z[i] > w[i] ? c : d; + Result[i] = Test1 > Test2 ? Result1 : Result2; + } + return Result; +} +}//namespace glm diff --git a/includes/glm/gtx/bit.hpp b/includes/glm/gtx/bit.hpp new file mode 100644 index 0000000..2f6b3f6 --- /dev/null +++ b/includes/glm/gtx/bit.hpp @@ -0,0 +1,96 @@ +/// @ref gtx_bit +/// @file glm/gtx/bit.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_bit GLM_GTX_bit +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../gtc/bitfield.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_bit extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_bit + /// @{ + + /// @see gtx_bit + template + GLM_FUNC_DECL genIUType highestBitValue(genIUType Value); + + /// @see gtx_bit + template + GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value); + + /// Find the highest bit set to 1 in a integer variable and return its value. + /// + /// @see gtx_bit + template + GLM_FUNC_DECL vec highestBitValue(vec const& value); + + /// Return the power of two number which value is just higher the input value. + /// Deprecated, use ceilPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value); + + /// Return the power of two number which value is just higher the input value. + /// Deprecated, use ceilPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoAbove(vec const& value); + + /// Return the power of two number which value is just lower the input value. + /// Deprecated, use floorPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value); + + /// Return the power of two number which value is just lower the input value. + /// Deprecated, use floorPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoBelow(vec const& value); + + /// Return the power of two number which value is the closet to the input value. + /// Deprecated, use roundPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value); + + /// Return the power of two number which value is the closet to the input value. + /// Deprecated, use roundPowerOfTwo from GTC_round instead + /// + /// @see gtc_round + /// @see gtx_bit + template + GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoNearest(vec const& value); + + /// @} +} //namespace glm + + +#include "bit.inl" + diff --git a/includes/glm/gtx/bit.inl b/includes/glm/gtx/bit.inl new file mode 100644 index 0000000..621b626 --- /dev/null +++ b/includes/glm/gtx/bit.inl @@ -0,0 +1,92 @@ +/// @ref gtx_bit + +namespace glm +{ + /////////////////// + // highestBitValue + + template + GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value) + { + genIUType tmp = Value; + genIUType result = genIUType(0); + while(tmp) + { + result = (tmp & (~tmp + 1)); // grab lowest bit + tmp &= ~result; // clear lowest bit + } + return result; + } + + template + GLM_FUNC_QUALIFIER vec highestBitValue(vec const& v) + { + return detail::functor1::call(highestBitValue, v); + } + + /////////////////// + // lowestBitValue + + template + GLM_FUNC_QUALIFIER genIUType lowestBitValue(genIUType Value) + { + return (Value & (~Value + 1)); + } + + template + GLM_FUNC_QUALIFIER vec lowestBitValue(vec const& v) + { + return detail::functor1::call(lowestBitValue, v); + } + + /////////////////// + // powerOfTwoAbove + + template + GLM_FUNC_QUALIFIER genType powerOfTwoAbove(genType value) + { + return isPowerOfTwo(value) ? value : highestBitValue(value) << 1; + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoAbove(vec const& v) + { + return detail::functor1::call(powerOfTwoAbove, v); + } + + /////////////////// + // powerOfTwoBelow + + template + GLM_FUNC_QUALIFIER genType powerOfTwoBelow(genType value) + { + return isPowerOfTwo(value) ? value : highestBitValue(value); + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoBelow(vec const& v) + { + return detail::functor1::call(powerOfTwoBelow, v); + } + + ///////////////////// + // powerOfTwoNearest + + template + GLM_FUNC_QUALIFIER genType powerOfTwoNearest(genType value) + { + if(isPowerOfTwo(value)) + return value; + + genType const prev = highestBitValue(value); + genType const next = prev << 1; + return (next - value) < (value - prev) ? next : prev; + } + + template + GLM_FUNC_QUALIFIER vec powerOfTwoNearest(vec const& v) + { + return detail::functor1::call(powerOfTwoNearest, v); + } + +}//namespace glm diff --git a/includes/glm/gtx/closest_point.hpp b/includes/glm/gtx/closest_point.hpp new file mode 100644 index 0000000..a248e4b --- /dev/null +++ b/includes/glm/gtx/closest_point.hpp @@ -0,0 +1,47 @@ +/// @ref gtx_closest_point +/// @file glm/gtx/closest_point.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_closest_point GLM_GTX_closest_point +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Find the point on a straight line which is the closet of a point. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_closest_point extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_closest_point + /// @{ + + /// Find the point on a straight line which is the closet of a point. + /// @see gtx_closest_point + template + GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine( + vec<3, T, Q> const& point, + vec<3, T, Q> const& a, + vec<3, T, Q> const& b); + + /// 2d lines work as well + template + GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine( + vec<2, T, Q> const& point, + vec<2, T, Q> const& a, + vec<2, T, Q> const& b); + + /// @} +}// namespace glm + +#include "closest_point.inl" diff --git a/includes/glm/gtx/closest_point.inl b/includes/glm/gtx/closest_point.inl new file mode 100644 index 0000000..0a39b04 --- /dev/null +++ b/includes/glm/gtx/closest_point.inl @@ -0,0 +1,45 @@ +/// @ref gtx_closest_point + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> closestPointOnLine + ( + vec<3, T, Q> const& point, + vec<3, T, Q> const& a, + vec<3, T, Q> const& b + ) + { + T LineLength = distance(a, b); + vec<3, T, Q> Vector = point - a; + vec<3, T, Q> LineDirection = (b - a) / LineLength; + + // Project Vector to LineDirection to get the distance of point from a + T Distance = dot(Vector, LineDirection); + + if(Distance <= T(0)) return a; + if(Distance >= LineLength) return b; + return a + LineDirection * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> closestPointOnLine + ( + vec<2, T, Q> const& point, + vec<2, T, Q> const& a, + vec<2, T, Q> const& b + ) + { + T LineLength = distance(a, b); + vec<2, T, Q> Vector = point - a; + vec<2, T, Q> LineDirection = (b - a) / LineLength; + + // Project Vector to LineDirection to get the distance of point from a + T Distance = dot(Vector, LineDirection); + + if(Distance <= T(0)) return a; + if(Distance >= LineLength) return b; + return a + LineDirection * Distance; + } + +}//namespace glm diff --git a/includes/glm/gtx/color_encoding.hpp b/includes/glm/gtx/color_encoding.hpp new file mode 100644 index 0000000..4769e0a --- /dev/null +++ b/includes/glm/gtx/color_encoding.hpp @@ -0,0 +1,52 @@ +/// @ref gtx_color_encoding +/// @file glm/gtx/color_encoding.hpp +/// +/// @see core (dependence) +/// @see gtx_color_encoding (dependence) +/// +/// @defgroup gtx_color_encoding GLM_GTX_color_encoding +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../vec3.hpp" +#include + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTC_color_encoding extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_color_encoding + /// @{ + + /// Convert a linear sRGB color to D65 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB); + + /// Convert a linear sRGB color to D50 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB); + + /// Convert a D65 YUV color to linear sRGB. + template + GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ); + + /// Convert a D65 YUV color to D50 YUV. + template + GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ); + + /// @} +} //namespace glm + +#include "color_encoding.inl" diff --git a/includes/glm/gtx/color_encoding.inl b/includes/glm/gtx/color_encoding.inl new file mode 100644 index 0000000..e50fa3e --- /dev/null +++ b/includes/glm/gtx/color_encoding.inl @@ -0,0 +1,45 @@ +/// @ref gtx_color_encoding + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB) + { + vec<3, T, Q> const M(0.490f, 0.17697f, 0.2f); + vec<3, T, Q> const N(0.31f, 0.8124f, 0.01063f); + vec<3, T, Q> const O(0.490f, 0.01f, 0.99f); + + return (M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB) * static_cast(5.650675255693055f); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB) + { + vec<3, T, Q> const M(0.436030342570117f, 0.222438466210245f, 0.013897440074263f); + vec<3, T, Q> const N(0.385101860087134f, 0.716942745571917f, 0.097076381494207f); + vec<3, T, Q> const O(0.143067806654203f, 0.060618777416563f, 0.713926257896652f); + + return M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ) + { + vec<3, T, Q> const M(0.41847f, -0.091169f, 0.0009209f); + vec<3, T, Q> const N(-0.15866f, 0.25243f, 0.015708f); + vec<3, T, Q> const O(0.0009209f, -0.0025498f, 0.1786f); + + return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ) + { + vec<3, T, Q> const M(+1.047844353856414f, +0.029549007606644f, -0.009250984365223f); + vec<3, T, Q> const N(+0.022898981050086f, +0.990508028941971f, +0.015072338237051f); + vec<3, T, Q> const O(-0.050206647741605f, -0.017074711360960f, +0.751717835079977f); + + return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; + } + +}//namespace glm diff --git a/includes/glm/gtx/color_space.hpp b/includes/glm/gtx/color_space.hpp new file mode 100644 index 0000000..c39a1f4 --- /dev/null +++ b/includes/glm/gtx/color_space.hpp @@ -0,0 +1,70 @@ +/// @ref gtx_color_space +/// @file glm/gtx/color_space.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_color_space GLM_GTX_color_space +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Related to RGB to HSV conversions and operations. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_color_space extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_color_space + /// @{ + + /// Converts a color from HSV color space to its color in RGB color space. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> rgbColor( + vec<3, T, Q> const& hsvValue); + + /// Converts a color from RGB color space to its color in HSV color space. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> hsvColor( + vec<3, T, Q> const& rgbValue); + + /// Build a saturation matrix. + /// @see gtx_color_space + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation( + T const s); + + /// Modify the saturation of a color. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<3, T, Q> saturation( + T const s, + vec<3, T, Q> const& color); + + /// Modify the saturation of a color. + /// @see gtx_color_space + template + GLM_FUNC_DECL vec<4, T, Q> saturation( + T const s, + vec<4, T, Q> const& color); + + /// Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals. + /// @see gtx_color_space + template + GLM_FUNC_DECL T luminosity( + vec<3, T, Q> const& color); + + /// @} +}//namespace glm + +#include "color_space.inl" diff --git a/includes/glm/gtx/color_space.inl b/includes/glm/gtx/color_space.inl new file mode 100644 index 0000000..b3183b9 --- /dev/null +++ b/includes/glm/gtx/color_space.inl @@ -0,0 +1,144 @@ +/// @ref gtx_color_space + +#include +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgbColor(const vec<3, T, Q>& hsvColor) + { + vec<3, T, Q> hsv = hsvColor; + vec<3, T, Q> rgbColor; + + if(equal(hsv.y, static_cast(0), epsilon())) + // achromatic (grey) + rgbColor = vec<3, T, Q>(hsv.z); + else + { + T sector = floor(hsv.x * (T(1) / T(60))); + T frac = (hsv.x * (T(1) / T(60))) - sector; + // factorial part of h + T o = hsv.z * (T(1) - hsv.y); + T p = hsv.z * (T(1) - hsv.y * frac); + T q = hsv.z * (T(1) - hsv.y * (T(1) - frac)); + + switch(int(sector)) + { + default: + case 0: + rgbColor.r = hsv.z; + rgbColor.g = q; + rgbColor.b = o; + break; + case 1: + rgbColor.r = p; + rgbColor.g = hsv.z; + rgbColor.b = o; + break; + case 2: + rgbColor.r = o; + rgbColor.g = hsv.z; + rgbColor.b = q; + break; + case 3: + rgbColor.r = o; + rgbColor.g = p; + rgbColor.b = hsv.z; + break; + case 4: + rgbColor.r = q; + rgbColor.g = o; + rgbColor.b = hsv.z; + break; + case 5: + rgbColor.r = hsv.z; + rgbColor.g = o; + rgbColor.b = p; + break; + } + } + + return rgbColor; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> hsvColor(const vec<3, T, Q>& rgbColor) + { + vec<3, T, Q> hsv = rgbColor; + T Min = min(min(rgbColor.r, rgbColor.g), rgbColor.b); + T Max = max(max(rgbColor.r, rgbColor.g), rgbColor.b); + T Delta = Max - Min; + + hsv.z = Max; + + if(!equal(Max, static_cast(0), epsilon())) + { + hsv.y = Delta / hsv.z; + T h = static_cast(0); + + if(equal(rgbColor.r, Max, epsilon())) + // between yellow & magenta + h = static_cast(0) + T(60) * (rgbColor.g - rgbColor.b) / Delta; + else if(equal(rgbColor.g, Max, epsilon())) + // between cyan & yellow + h = static_cast(120) + T(60) * (rgbColor.b - rgbColor.r) / Delta; + else + // between magenta & cyan + h = static_cast(240) + T(60) * (rgbColor.r - rgbColor.g) / Delta; + + if(h < T(0)) + hsv.x = h + T(360); + else + hsv.x = h; + } + else + { + // If r = g = b = 0 then s = 0, h is undefined + hsv.y = static_cast(0); + hsv.x = static_cast(0); + } + + return hsv; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> saturation(T const s) + { + vec<3, T, defaultp> rgbw = vec<3, T, defaultp>(T(0.2126), T(0.7152), T(0.0722)); + + vec<3, T, defaultp> const col((T(1) - s) * rgbw); + + mat<4, 4, T, defaultp> result(T(1)); + result[0][0] = col.x + s; + result[0][1] = col.x; + result[0][2] = col.x; + result[1][0] = col.y; + result[1][1] = col.y + s; + result[1][2] = col.y; + result[2][0] = col.z; + result[2][1] = col.z; + result[2][2] = col.z + s; + + return result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> saturation(const T s, const vec<3, T, Q>& color) + { + return vec<3, T, Q>(saturation(s) * vec<4, T, Q>(color, T(0))); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> saturation(const T s, const vec<4, T, Q>& color) + { + return saturation(s) * color; + } + + template + GLM_FUNC_QUALIFIER T luminosity(const vec<3, T, Q>& color) + { + const vec<3, T, Q> tmp = vec<3, T, Q>(0.33, 0.59, 0.11); + return dot(color, tmp); + } +}//namespace glm diff --git a/includes/glm/gtx/color_space_YCoCg.hpp b/includes/glm/gtx/color_space_YCoCg.hpp new file mode 100644 index 0000000..a418037 --- /dev/null +++ b/includes/glm/gtx/color_space_YCoCg.hpp @@ -0,0 +1,58 @@ +/// @ref gtx_color_space_YCoCg +/// @file glm/gtx/color_space_YCoCg.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_color_space_YCoCg GLM_GTX_color_space_YCoCg +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// RGB to YCoCg conversions and operations + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_color_space_YCoCg extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_color_space_YCoCg + /// @{ + + /// Convert a color from RGB color space to YCoCg color space. + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg( + vec<3, T, Q> const& rgbColor); + + /// Convert a color from YCoCg color space to RGB color space. + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb( + vec<3, T, Q> const& YCoCgColor); + + /// Convert a color from RGB color space to YCoCgR color space. + /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR( + vec<3, T, Q> const& rgbColor); + + /// Convert a color from YCoCgR color space to RGB color space. + /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" + /// @see gtx_color_space_YCoCg + template + GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb( + vec<3, T, Q> const& YCoCgColor); + + /// @} +}//namespace glm + +#include "color_space_YCoCg.inl" diff --git a/includes/glm/gtx/color_space_YCoCg.inl b/includes/glm/gtx/color_space_YCoCg.inl new file mode 100644 index 0000000..83ba857 --- /dev/null +++ b/includes/glm/gtx/color_space_YCoCg.inl @@ -0,0 +1,107 @@ +/// @ref gtx_color_space_YCoCg + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCg + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.x/*Y */ = rgbColor.r / T(4) + rgbColor.g / T(2) + rgbColor.b / T(4); + result.y/*Co*/ = rgbColor.r / T(2) + rgbColor.g * T(0) - rgbColor.b / T(2); + result.z/*Cg*/ = - rgbColor.r / T(4) + rgbColor.g / T(2) - rgbColor.b / T(4); + return result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCg2rgb + ( + vec<3, T, Q> const& YCoCgColor + ) + { + vec<3, T, Q> result; + result.r = YCoCgColor.x + YCoCgColor.y - YCoCgColor.z; + result.g = YCoCgColor.x + YCoCgColor.z; + result.b = YCoCgColor.x - YCoCgColor.y - YCoCgColor.z; + return result; + } + + template + class compute_YCoCgR { + public: + static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.x/*Y */ = rgbColor.g * static_cast(0.5) + (rgbColor.r + rgbColor.b) * static_cast(0.25); + result.y/*Co*/ = rgbColor.r - rgbColor.b; + result.z/*Cg*/ = rgbColor.g - (rgbColor.r + rgbColor.b) * static_cast(0.5); + return result; + } + + static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + vec<3, T, Q> result; + T tmp = YCoCgRColor.x - (YCoCgRColor.z * static_cast(0.5)); + result.g = YCoCgRColor.z + tmp; + result.b = tmp - (YCoCgRColor.y * static_cast(0.5)); + result.r = result.b + YCoCgRColor.y; + return result; + } + }; + + template + class compute_YCoCgR { + public: + static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + vec<3, T, Q> result; + result.y/*Co*/ = rgbColor.r - rgbColor.b; + T tmp = rgbColor.b + (result.y >> 1); + result.z/*Cg*/ = rgbColor.g - tmp; + result.x/*Y */ = tmp + (result.z >> 1); + return result; + } + + static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + vec<3, T, Q> result; + T tmp = YCoCgRColor.x - (YCoCgRColor.z >> 1); + result.g = YCoCgRColor.z + tmp; + result.b = tmp - (YCoCgRColor.y >> 1); + result.r = result.b + YCoCgRColor.y; + return result; + } + }; + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR + ( + vec<3, T, Q> const& rgbColor + ) + { + return compute_YCoCgR::is_integer>::rgb2YCoCgR(rgbColor); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb + ( + vec<3, T, Q> const& YCoCgRColor + ) + { + return compute_YCoCgR::is_integer>::YCoCgR2rgb(YCoCgRColor); + } +}//namespace glm diff --git a/includes/glm/gtx/common.hpp b/includes/glm/gtx/common.hpp new file mode 100644 index 0000000..283f947 --- /dev/null +++ b/includes/glm/gtx/common.hpp @@ -0,0 +1,74 @@ +/// @ref gtx_common +/// @file glm/gtx/common.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_common GLM_GTX_common +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Provide functions to increase the compatibility with Cg and HLSL languages + +#pragma once + +// Dependencies: +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../gtc/vec1.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_common extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_common + /// @{ + + /// Returns true if x is a denormalized number + /// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format. + /// This format is less precise but can represent values closer to zero. + /// + /// @tparam genType Floating-point scalar or vector types. + /// + /// @see GLSL isnan man page + /// @see GLSL 4.20.8 specification, section 8.3 Common Functions + template + GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x); + + /// Similar to 'mod' but with a different rounding and integer support. + /// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)' + /// + /// @see GLSL mod vs HLSL fmod + /// @see GLSL mod man page + template + GLM_FUNC_DECL vec fmod(vec const& v); + + /// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_relational + template + GLM_FUNC_DECL vec openBounded(vec const& Value, vec const& Min, vec const& Max); + + /// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or integer scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see ext_vector_relational + template + GLM_FUNC_DECL vec closeBounded(vec const& Value, vec const& Min, vec const& Max); + + /// @} +}//namespace glm + +#include "common.inl" diff --git a/includes/glm/gtx/common.inl b/includes/glm/gtx/common.inl new file mode 100644 index 0000000..4651e35 --- /dev/null +++ b/includes/glm/gtx/common.inl @@ -0,0 +1,125 @@ +/// @ref gtx_common + +#include +#include "../gtc/epsilon.hpp" +#include "../gtc/constants.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_fmod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + return detail::functor2::call(TFmod(), a, b); + } + }; + + template + struct compute_fmod + { + GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) + { + return a % b; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool isdenormal(T const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + +# if GLM_HAS_CXX11_STL + return std::fpclassify(x) == FP_SUBNORMAL; +# else + return epsilonNotEqual(x, static_cast(0), epsilon()) && std::fabs(x) < std::numeric_limits::min(); +# endif + } + + template + GLM_FUNC_QUALIFIER typename vec<1, T, Q>::bool_type isdenormal + ( + vec<1, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<1, T, Q>::bool_type( + isdenormal(x.x)); + } + + template + GLM_FUNC_QUALIFIER typename vec<2, T, Q>::bool_type isdenormal + ( + vec<2, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<2, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y)); + } + + template + GLM_FUNC_QUALIFIER typename vec<3, T, Q>::bool_type isdenormal + ( + vec<3, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<3, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y), + isdenormal(x.z)); + } + + template + GLM_FUNC_QUALIFIER typename vec<4, T, Q>::bool_type isdenormal + ( + vec<4, T, Q> const& x + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'isdenormal' only accept floating-point inputs"); + + return typename vec<4, T, Q>::bool_type( + isdenormal(x.x), + isdenormal(x.y), + isdenormal(x.z), + isdenormal(x.w)); + } + + // fmod + template + GLM_FUNC_QUALIFIER genType fmod(genType x, genType y) + { + return fmod(vec<1, genType>(x), y).x; + } + + template + GLM_FUNC_QUALIFIER vec fmod(vec const& x, T y) + { + return detail::compute_fmod::is_iec559>::call(x, vec(y)); + } + + template + GLM_FUNC_QUALIFIER vec fmod(vec const& x, vec const& y) + { + return detail::compute_fmod::is_iec559>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER vec openBounded(vec const& Value, vec const& Min, vec const& Max) + { + return greaterThan(Value, Min) && lessThan(Value, Max); + } + + template + GLM_FUNC_QUALIFIER vec closeBounded(vec const& Value, vec const& Min, vec const& Max) + { + return greaterThanEqual(Value, Min) && lessThanEqual(Value, Max); + } +}//namespace glm diff --git a/includes/glm/gtx/compatibility.hpp b/includes/glm/gtx/compatibility.hpp new file mode 100644 index 0000000..463f86f --- /dev/null +++ b/includes/glm/gtx/compatibility.hpp @@ -0,0 +1,131 @@ +/// @ref gtx_compatibility +/// @file glm/gtx/compatibility.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_compatibility GLM_GTX_compatibility +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Provide functions to increase the compatibility with Cg and HLSL languages + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/quaternion.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_compatibility extension included") +#endif + +#if GLM_COMPILER & GLM_COMPILER_VC +# include +#elif GLM_COMPILER & GLM_COMPILER_GCC +# include +# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID) +# undef isfinite +# endif +#endif//GLM_COMPILER + +namespace glm +{ + /// @addtogroup gtx_compatibility + /// @{ + + template GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) + + template GLM_FUNC_QUALIFIER T atan2(T y, T x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& y, const vec<2, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& y, const vec<3, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + template GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& y, const vec<4, T, Q>& x){return atan(y, x);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) + + template GLM_FUNC_DECL bool isfinite(genType const& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + template GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) + + typedef bool bool1; //!< \brief boolean type with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, bool, highp> bool2; //!< \brief boolean type with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, bool, highp> bool3; //!< \brief boolean type with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, bool, highp> bool4; //!< \brief boolean type with 4 components. (From GLM_GTX_compatibility extension) + + typedef bool bool1x1; //!< \brief boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, bool, highp> bool2x2; //!< \brief boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, bool, highp> bool2x3; //!< \brief boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, bool, highp> bool2x4; //!< \brief boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, bool, highp> bool3x2; //!< \brief boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, bool, highp> bool3x3; //!< \brief boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, bool, highp> bool3x4; //!< \brief boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, bool, highp> bool4x2; //!< \brief boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, bool, highp> bool4x3; //!< \brief boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, bool, highp> bool4x4; //!< \brief boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef int int1; //!< \brief integer vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, int, highp> int2; //!< \brief integer vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, int, highp> int3; //!< \brief integer vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, int, highp> int4; //!< \brief integer vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef int int1x1; //!< \brief integer matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, int, highp> int2x2; //!< \brief integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, int, highp> int2x3; //!< \brief integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, int, highp> int2x4; //!< \brief integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, int, highp> int3x2; //!< \brief integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, int, highp> int3x3; //!< \brief integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, int, highp> int3x4; //!< \brief integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, int, highp> int4x2; //!< \brief integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, int, highp> int4x3; //!< \brief integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, int, highp> int4x4; //!< \brief integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef float float1; //!< \brief single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, float, highp> float2; //!< \brief single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, float, highp> float3; //!< \brief single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, float, highp> float4; //!< \brief single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef float float1x1; //!< \brief single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, float, highp> float2x2; //!< \brief single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, float, highp> float2x3; //!< \brief single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, float, highp> float2x4; //!< \brief single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, float, highp> float3x2; //!< \brief single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, float, highp> float3x3; //!< \brief single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, float, highp> float3x4; //!< \brief single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, float, highp> float4x2; //!< \brief single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, float, highp> float4x3; //!< \brief single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, float, highp> float4x4; //!< \brief single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + typedef double double1; //!< \brief double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) + typedef vec<2, double, highp> double2; //!< \brief double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) + typedef vec<3, double, highp> double3; //!< \brief double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) + typedef vec<4, double, highp> double4; //!< \brief double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) + + typedef double double1x1; //!< \brief double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) + typedef mat<2, 2, double, highp> double2x2; //!< \brief double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 3, double, highp> double2x3; //!< \brief double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<2, 4, double, highp> double2x4; //!< \brief double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 2, double, highp> double3x2; //!< \brief double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 3, double, highp> double3x3; //!< \brief double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<3, 4, double, highp> double3x4; //!< \brief double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 2, double, highp> double4x2; //!< \brief double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 3, double, highp> double4x3; //!< \brief double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) + typedef mat<4, 4, double, highp> double4x4; //!< \brief double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) + + /// @} +}//namespace glm + +#include "compatibility.inl" diff --git a/includes/glm/gtx/compatibility.inl b/includes/glm/gtx/compatibility.inl new file mode 100644 index 0000000..1d49496 --- /dev/null +++ b/includes/glm/gtx/compatibility.inl @@ -0,0 +1,62 @@ +#include + +namespace glm +{ + // isfinite + template + GLM_FUNC_QUALIFIER bool isfinite( + genType const& x) + { +# if GLM_HAS_CXX11_STL + return std::isfinite(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_VC + return _finite(x) != 0; +# elif GLM_COMPILER & GLM_COMPILER_GCC && GLM_PLATFORM & GLM_PLATFORM_ANDROID + return _isfinite(x) != 0; +# else + if (std::numeric_limits::is_integer || std::denorm_absent == std::numeric_limits::has_denorm) + return std::numeric_limits::min() <= x && std::numeric_limits::max() >= x; + else + return -std::numeric_limits::max() <= x && std::numeric_limits::max() >= x; +# endif + } + + template + GLM_FUNC_QUALIFIER vec<1, bool, Q> isfinite( + vec<1, T, Q> const& x) + { + return vec<1, bool, Q>( + isfinite(x.x)); + } + + template + GLM_FUNC_QUALIFIER vec<2, bool, Q> isfinite( + vec<2, T, Q> const& x) + { + return vec<2, bool, Q>( + isfinite(x.x), + isfinite(x.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, bool, Q> isfinite( + vec<3, T, Q> const& x) + { + return vec<3, bool, Q>( + isfinite(x.x), + isfinite(x.y), + isfinite(x.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isfinite( + vec<4, T, Q> const& x) + { + return vec<4, bool, Q>( + isfinite(x.x), + isfinite(x.y), + isfinite(x.z), + isfinite(x.w)); + } + +}//namespace glm diff --git a/includes/glm/gtx/component_wise.hpp b/includes/glm/gtx/component_wise.hpp new file mode 100644 index 0000000..b1caaa2 --- /dev/null +++ b/includes/glm/gtx/component_wise.hpp @@ -0,0 +1,77 @@ +/// @ref gtx_component_wise +/// @file glm/gtx/component_wise.hpp +/// @date 2007-05-21 / 2011-06-07 +/// @author Christophe Riccio +/// +/// @see core (dependence) +/// +/// @defgroup gtx_component_wise GLM_GTX_component_wise +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Operations between components of a type + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_component_wise extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_component_wise + /// @{ + + /// Convert an integer vector to a normalized float vector. + /// If the parameter value type is already a floating qualifier type, the value is passed through. + /// @see gtx_component_wise + template + GLM_FUNC_DECL vec compNormalize(vec const& v); + + /// Convert a normalized float vector to an integer vector. + /// If the parameter value type is already a floating qualifier type, the value is passed through. + /// @see gtx_component_wise + template + GLM_FUNC_DECL vec compScale(vec const& v); + + /// Add all vector components together. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v); + + /// Multiply all vector components together. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMul(genType const& v); + + /// Find the minimum value between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMin(genType const& v); + + /// Find the maximum value between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type compMax(genType const& v); + + /// Find the minimum float between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type fcompMin(genType const& v); + + /// Find the maximum float between single vector components. + /// @see gtx_component_wise + template + GLM_FUNC_DECL typename genType::value_type fcompMax(genType const& v); + + /// @} +}//namespace glm + +#include "component_wise.inl" diff --git a/includes/glm/gtx/component_wise.inl b/includes/glm/gtx/component_wise.inl new file mode 100644 index 0000000..f8217b2 --- /dev/null +++ b/includes/glm/gtx/component_wise.inl @@ -0,0 +1,147 @@ +/// @ref gtx_component_wise + +#include "../ext/scalar_common.hpp" +#include +#include + +namespace glm{ +namespace detail +{ + template + struct compute_compNormalize + {}; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + floatType const Min = static_cast(std::numeric_limits::min()); + floatType const Max = static_cast(std::numeric_limits::max()); + return (vec(v) - Min) / (Max - Min) * static_cast(2) - static_cast(1); + } + }; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return vec(v) / static_cast(std::numeric_limits::max()); + } + }; + + template + struct compute_compNormalize + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return v; + } + }; + + template + struct compute_compScale + {}; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + floatType const Max = static_cast(std::numeric_limits::max()) + static_cast(0.5); + vec const Scaled(v * Max); + vec const Result(Scaled - static_cast(0.5)); + return Result; + } + }; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return vec(vec(v) * static_cast(std::numeric_limits::max())); + } + }; + + template + struct compute_compScale + { + GLM_FUNC_QUALIFIER static vec call(vec const& v) + { + return v; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER vec compNormalize(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compNormalize' accepts only floating-point types for 'floatType' template parameter"); + + return detail::compute_compNormalize::is_integer, std::numeric_limits::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER vec compScale(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compScale' accepts only floating-point types for 'floatType' template parameter"); + + return detail::compute_compScale::is_integer, std::numeric_limits::is_signed>::call(v); + } + + template + GLM_FUNC_QUALIFIER T compAdd(vec const& v) + { + T Result(0); + for(length_t i = 0, n = v.length(); i < n; ++i) + Result += v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMul(vec const& v) + { + T Result(1); + for(length_t i = 0, n = v.length(); i < n; ++i) + Result *= v[i]; + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMin(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = min(Result, v[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER T compMax(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = max(Result, v[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER T fcompMin(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = fmin(Result, v[i]); + return Result; + } + + template + GLM_FUNC_QUALIFIER T fcompMax(vec const& v) + { + T Result(v[0]); + for(length_t i = 1, n = v.length(); i < n; ++i) + Result = fmax(Result, v[i]); + return Result; + } +}//namespace glm diff --git a/includes/glm/gtx/dual_quaternion.hpp b/includes/glm/gtx/dual_quaternion.hpp new file mode 100644 index 0000000..04a6070 --- /dev/null +++ b/includes/glm/gtx/dual_quaternion.hpp @@ -0,0 +1,272 @@ +/// @ref gtx_dual_quaternion +/// @file glm/gtx/dual_quaternion.hpp +/// @author Maksim Vorobiev (msomeone@gmail.com) +/// +/// @see core (dependence) +/// @see gtc_constants (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_dual_quaternion GLM_GTX_dual_quaternion +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines a templated dual-quaternion type and several dual-quaternion operations. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/quaternion.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_dual_quaternion extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_dual_quaternion + /// @{ + + template + struct tdualquat + { + // -- Implementation detail -- + + typedef T value_type; + typedef qua part_type; + + // -- Data -- + + qua real, dual; + + // -- Component accesses -- + + typedef length_t length_type; + /// Return the count of components of a dual quaternion + GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} + + GLM_FUNC_DECL part_type & operator[](length_type i); + GLM_FUNC_DECL part_type const& operator[](length_type i) const; + + // -- Implicit basic constructors -- + + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT; + GLM_DEFAULTED_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d) GLM_DEFAULT; + template + GLM_CTOR_DECL tdualquat(tdualquat const& d); + + // -- Explicit basic constructors -- + + GLM_CTOR_DECL tdualquat(qua const& real); + GLM_CTOR_DECL tdualquat(qua const& orientation, vec<3, T, Q> const& translation); + GLM_CTOR_DECL tdualquat(qua const& real, qua const& dual); + + // -- Conversion constructors -- + + template + GLM_CTOR_DECL GLM_EXPLICIT tdualquat(tdualquat const& q); + + GLM_CTOR_DECL GLM_EXPLICIT tdualquat(mat<2, 4, T, Q> const& holder_mat); + GLM_CTOR_DECL GLM_EXPLICIT tdualquat(mat<3, 4, T, Q> const& aug_mat); + + // -- Unary arithmetic operators -- + + GLM_DEFAULTED_FUNC_DECL tdualquat & operator=(tdualquat const& m) GLM_DEFAULT; + + template + GLM_FUNC_DISCARD_DECL tdualquat & operator=(tdualquat const& m); + template + GLM_FUNC_DISCARD_DECL tdualquat & operator*=(U s); + template + GLM_FUNC_DISCARD_DECL tdualquat & operator/=(U s); + }; + + // -- Unary bit operators -- + + template + GLM_FUNC_DECL tdualquat operator+(tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator-(tdualquat const& q); + + // -- Binary operators -- + + template + GLM_FUNC_DECL tdualquat operator+(tdualquat const& q, tdualquat const& p); + + template + GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, tdualquat const& p); + + template + GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v); + + template + GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q); + + template + GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v); + + template + GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, T const& s); + + template + GLM_FUNC_DECL tdualquat operator*(T const& s, tdualquat const& q); + + template + GLM_FUNC_DECL tdualquat operator/(tdualquat const& q, T const& s); + + // -- Boolean operators -- + + template + GLM_FUNC_DECL bool operator==(tdualquat const& q1, tdualquat const& q2); + + template + GLM_FUNC_DECL bool operator!=(tdualquat const& q1, tdualquat const& q2); + + /// Creates an identity dual quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dual_quat_identity(); + + /// Returns the normalized quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat normalize(tdualquat const& q); + + /// Returns the linear interpolation of two dual quaternion. + /// + /// @see gtc_dual_quaternion + template + GLM_FUNC_DECL tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a); + + /// Returns the q inverse. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat inverse(tdualquat const& q); + + /// Converts a quaternion to a 2 * 4 matrix. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x); + + /// Converts a quaternion to a 3 * 4 matrix. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x); + + /// Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dualquat_cast(mat<2, 4, T, Q> const& x); + + /// Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion. + /// + /// @see gtx_dual_quaternion + template + GLM_FUNC_DECL tdualquat dualquat_cast(mat<3, 4, T, Q> const& x); + + + /// Dual-quaternion of low single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_dualquat; + + /// Dual-quaternion of medium single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_dualquat; + + /// Dual-quaternion of high single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_dualquat; + + + /// Dual-quaternion of low single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_fdualquat; + + /// Dual-quaternion of medium single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_fdualquat; + + /// Dual-quaternion of high single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_fdualquat; + + + /// Dual-quaternion of low double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat lowp_ddualquat; + + /// Dual-quaternion of medium double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat mediump_ddualquat; + + /// Dual-quaternion of high double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef tdualquat highp_ddualquat; + + +#if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + /// Dual-quaternion of floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_fdualquat dualquat; + + /// Dual-quaternion of single-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_fdualquat fdualquat; +#elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + typedef highp_fdualquat dualquat; + typedef highp_fdualquat fdualquat; +#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) + typedef mediump_fdualquat dualquat; + typedef mediump_fdualquat fdualquat; +#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT)) + typedef lowp_fdualquat dualquat; + typedef lowp_fdualquat fdualquat; +#else +# error "GLM error: multiple default precision requested for single-precision floating-point types" +#endif + + +#if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + /// Dual-quaternion of default double-qualifier floating-point numbers. + /// + /// @see gtx_dual_quaternion + typedef highp_ddualquat ddualquat; +#elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef highp_ddualquat ddualquat; +#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef mediump_ddualquat ddualquat; +#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE)) + typedef lowp_ddualquat ddualquat; +#else +# error "GLM error: Multiple default precision requested for double-precision floating-point types" +#endif + + /// @} +} //namespace glm + +#include "dual_quaternion.inl" diff --git a/includes/glm/gtx/dual_quaternion.inl b/includes/glm/gtx/dual_quaternion.inl new file mode 100644 index 0000000..3a04160 --- /dev/null +++ b/includes/glm/gtx/dual_quaternion.inl @@ -0,0 +1,352 @@ +/// @ref gtx_dual_quaternion + +#include "../geometric.hpp" +#include + +namespace glm +{ + // -- Component accesses -- + + template + GLM_FUNC_QUALIFIER typename tdualquat::part_type & tdualquat::operator[](typename tdualquat::length_type i) + { + assert(i >= 0 && i < this->length()); + return (&real)[i]; + } + + template + GLM_FUNC_QUALIFIER typename tdualquat::part_type const& tdualquat::operator[](typename tdualquat::length_type i) const + { + assert(i >= 0 && i < this->length()); + return (&real)[i]; + } + + // -- Implicit basic constructors -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat() +# if GLM_CONFIG_DEFAULTED_FUNCTIONS != GLM_DISABLE + : real(qua()) + , dual(qua::wxyz(0, 0, 0, 0)) +# endif + {} + + template + GLM_DEFAULTED_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) + : real(d.real) + , dual(d.dual) + {} +# endif + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) + : real(d.real) + , dual(d.dual) + {} + + // -- Explicit basic constructors -- + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r) + : real(r), dual(qua::wxyz(0, 0, 0, 0)) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& q, vec<3, T, Q> const& p) + : real(q), dual(qua::wxyz( + T(-0.5) * ( p.x*q.x + p.y*q.y + p.z*q.z), + T(+0.5) * ( p.x*q.w + p.y*q.z - p.z*q.y), + T(+0.5) * (-p.x*q.z + p.y*q.w + p.z*q.x), + T(+0.5) * ( p.x*q.y - p.y*q.x + p.z*q.w))) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r, qua const& d) + : real(r), dual(d) + {} + + // -- Conversion constructors -- + + template + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& q) + : real(q.real) + , dual(q.dual) + {} + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<2, 4, T, Q> const& m) + { + *this = dualquat_cast(m); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<3, 4, T, Q> const& m) + { + *this = dualquat_cast(m); + } + + // -- Unary arithmetic operators -- + +# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE + template + GLM_DEFAULTED_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) + { + this->real = q.real; + this->dual = q.dual; + return *this; + } +# endif + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) + { + this->real = q.real; + this->dual = q.dual; + return *this; + } + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator*=(U s) + { + this->real *= static_cast(s); + this->dual *= static_cast(s); + return *this; + } + + template + template + GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator/=(U s) + { + this->real /= static_cast(s); + this->dual /= static_cast(s); + return *this; + } + + // -- Unary bit operators -- + + template + GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q) + { + return q; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator-(tdualquat const& q) + { + return tdualquat(-q.real, -q.dual); + } + + // -- Binary operators -- + + template + GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q, tdualquat const& p) + { + return tdualquat(q.real + p.real,q.dual + p.dual); + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& p, tdualquat const& o) + { + return tdualquat(p.real * o.real,p.real * o.dual + p.dual * o.real); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v) + { + vec<3, T, Q> const real_v3(q.real.x,q.real.y,q.real.z); + vec<3, T, Q> const dual_v3(q.dual.x,q.dual.y,q.dual.z); + return (cross(real_v3, cross(real_v3,v) + v * q.real.w + dual_v3) + dual_v3 * q.real.w - real_v3 * q.dual.w) * T(2) + v; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v) + { + return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q) + { + return glm::inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& q, T const& s) + { + return tdualquat(q.real * s, q.dual * s); + } + + template + GLM_FUNC_QUALIFIER tdualquat operator*(T const& s, tdualquat const& q) + { + return q * s; + } + + template + GLM_FUNC_QUALIFIER tdualquat operator/(tdualquat const& q, T const& s) + { + return tdualquat(q.real / s, q.dual / s); + } + + // -- Boolean operators -- + + template + GLM_FUNC_QUALIFIER bool operator==(tdualquat const& q1, tdualquat const& q2) + { + return (q1.real == q2.real) && (q1.dual == q2.dual); + } + + template + GLM_FUNC_QUALIFIER bool operator!=(tdualquat const& q1, tdualquat const& q2) + { + return (q1.real != q2.real) || (q1.dual != q2.dual); + } + + // -- Operations -- + + template + GLM_FUNC_QUALIFIER tdualquat dual_quat_identity() + { + return tdualquat( + qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)), + qua::wxyz(static_cast(0), static_cast(0), static_cast(0), static_cast(0))); + } + + template + GLM_FUNC_QUALIFIER tdualquat normalize(tdualquat const& q) + { + return q / length(q.real); + } + + template + GLM_FUNC_QUALIFIER tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a) + { + // Dual Quaternion Linear blend aka DLB: + // Lerp is only defined in [0, 1] + assert(a >= static_cast(0)); + assert(a <= static_cast(1)); + T const k = dot(x.real,y.real) < static_cast(0) ? -a : a; + T const one(1); + return tdualquat(x * (one - a) + y * k); + } + + template + GLM_FUNC_QUALIFIER tdualquat inverse(tdualquat const& q) + { + const glm::qua real = conjugate(q.real); + const glm::qua dual = conjugate(q.dual); + return tdualquat(real, dual + (real * (-2.0f * dot(real,dual)))); + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x) + { + return mat<2, 4, T, Q>( x[0].x, x[0].y, x[0].z, x[0].w, x[1].x, x[1].y, x[1].z, x[1].w ); + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x) + { + qua r = x.real / length2(x.real); + + qua const rr(r.w * x.real.w, r.x * x.real.x, r.y * x.real.y, r.z * x.real.z); + r *= static_cast(2); + + T const xy = r.x * x.real.y; + T const xz = r.x * x.real.z; + T const yz = r.y * x.real.z; + T const wx = r.w * x.real.x; + T const wy = r.w * x.real.y; + T const wz = r.w * x.real.z; + + vec<4, T, Q> const a( + rr.w + rr.x - rr.y - rr.z, + xy - wz, + xz + wy, + -(x.dual.w * r.x - x.dual.x * r.w + x.dual.y * r.z - x.dual.z * r.y)); + + vec<4, T, Q> const b( + xy + wz, + rr.w + rr.y - rr.x - rr.z, + yz - wx, + -(x.dual.w * r.y - x.dual.x * r.z - x.dual.y * r.w + x.dual.z * r.x)); + + vec<4, T, Q> const c( + xz - wy, + yz + wx, + rr.w + rr.z - rr.x - rr.y, + -(x.dual.w * r.z + x.dual.x * r.y - x.dual.y * r.x - x.dual.z * r.w)); + + return mat<3, 4, T, Q>(a, b, c); + } + + template + GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<2, 4, T, Q> const& x) + { + return tdualquat( + qua::wxyz( x[0].w, x[0].x, x[0].y, x[0].z ), + qua::wxyz( x[1].w, x[1].x, x[1].y, x[1].z )); + } + + template + GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<3, 4, T, Q> const& x) + { + qua real; + + T const trace = x[0].x + x[1].y + x[2].z; + if(trace > static_cast(0)) + { + T const r = sqrt(T(1) + trace); + T const invr = static_cast(0.5) / r; + real.w = static_cast(0.5) * r; + real.x = (x[2].y - x[1].z) * invr; + real.y = (x[0].z - x[2].x) * invr; + real.z = (x[1].x - x[0].y) * invr; + } + else if(x[0].x > x[1].y && x[0].x > x[2].z) + { + T const r = sqrt(T(1) + x[0].x - x[1].y - x[2].z); + T const invr = static_cast(0.5) / r; + real.x = static_cast(0.5)*r; + real.y = (x[1].x + x[0].y) * invr; + real.z = (x[0].z + x[2].x) * invr; + real.w = (x[2].y - x[1].z) * invr; + } + else if(x[1].y > x[2].z) + { + T const r = sqrt(T(1) + x[1].y - x[0].x - x[2].z); + T const invr = static_cast(0.5) / r; + real.x = (x[1].x + x[0].y) * invr; + real.y = static_cast(0.5) * r; + real.z = (x[2].y + x[1].z) * invr; + real.w = (x[0].z - x[2].x) * invr; + } + else + { + T const r = sqrt(T(1) + x[2].z - x[0].x - x[1].y); + T const invr = static_cast(0.5) / r; + real.x = (x[0].z + x[2].x) * invr; + real.y = (x[2].y + x[1].z) * invr; + real.z = static_cast(0.5) * r; + real.w = (x[1].x - x[0].y) * invr; + } + + qua dual; + dual.x = static_cast(0.5) * ( x[0].w * real.w + x[1].w * real.z - x[2].w * real.y); + dual.y = static_cast(0.5) * (-x[0].w * real.z + x[1].w * real.w + x[2].w * real.x); + dual.z = static_cast(0.5) * ( x[0].w * real.y - x[1].w * real.x + x[2].w * real.w); + dual.w = -static_cast(0.5) * ( x[0].w * real.x + x[1].w * real.y + x[2].w * real.z); + return tdualquat(real, dual); + } +}//namespace glm diff --git a/includes/glm/gtx/easing.hpp b/includes/glm/gtx/easing.hpp new file mode 100644 index 0000000..50ed903 --- /dev/null +++ b/includes/glm/gtx/easing.hpp @@ -0,0 +1,217 @@ +/// @ref gtx_easing +/// @file glm/gtx/easing.hpp +/// @author Robert Chisholm +/// +/// @see core (dependence) +/// +/// @defgroup gtx_easing GLM_GTX_easing +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Easing functions for animations and transitions +/// All functions take a parameter x in the range [0.0,1.0] +/// +/// Based on the AHEasing project of Warren Moore (https://github.com/warrenm/AHEasing) + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../detail/qualifier.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_easing extension included") +#endif + +namespace glm{ + /// @addtogroup gtx_easing + /// @{ + + /// Modelled after the line y = x + /// @see gtx_easing + template + GLM_FUNC_DECL genType linearInterpolation(genType const & a); + + /// Modelled after the parabola y = x^2 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseIn(genType const & a); + + /// Modelled after the parabola y = -x^2 + 2x + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseOut(genType const & a); + + /// Modelled after the piecewise quadratic + /// y = (1/2)((2x)^2) ; [0, 0.5) + /// y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a); + + /// Modelled after the cubic y = x^3 + template + GLM_FUNC_DECL genType cubicEaseIn(genType const & a); + + /// Modelled after the cubic y = (x - 1)^3 + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType cubicEaseOut(genType const & a); + + /// Modelled after the piecewise cubic + /// y = (1/2)((2x)^3) ; [0, 0.5) + /// y = (1/2)((2x-2)^3 + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType cubicEaseInOut(genType const & a); + + /// Modelled after the quartic x^4 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseIn(genType const & a); + + /// Modelled after the quartic y = 1 - (x - 1)^4 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseOut(genType const & a); + + /// Modelled after the piecewise quartic + /// y = (1/2)((2x)^4) ; [0, 0.5) + /// y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quarticEaseInOut(genType const & a); + + /// Modelled after the quintic y = x^5 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseIn(genType const & a); + + /// Modelled after the quintic y = (x - 1)^5 + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseOut(genType const & a); + + /// Modelled after the piecewise quintic + /// y = (1/2)((2x)^5) ; [0, 0.5) + /// y = (1/2)((2x-2)^5 + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType quinticEaseInOut(genType const & a); + + /// Modelled after quarter-cycle of sine wave + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseIn(genType const & a); + + /// Modelled after quarter-cycle of sine wave (different phase) + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseOut(genType const & a); + + /// Modelled after half sine wave + /// @see gtx_easing + template + GLM_FUNC_DECL genType sineEaseInOut(genType const & a); + + /// Modelled after shifted quadrant IV of unit circle + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseIn(genType const & a); + + /// Modelled after shifted quadrant II of unit circle + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseOut(genType const & a); + + /// Modelled after the piecewise circular function + /// y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) + /// y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType circularEaseInOut(genType const & a); + + /// Modelled after the exponential function y = 2^(10(x - 1)) + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseIn(genType const & a); + + /// Modelled after the exponential function y = -2^(-10x) + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseOut(genType const & a); + + /// Modelled after the piecewise exponential + /// y = (1/2)2^(10(2x - 1)) ; [0,0.5) + /// y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a); + + /// Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1)) + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseIn(genType const & a); + + /// Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1 + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseOut(genType const & a); + + /// Modelled after the piecewise exponentially-damped sine wave: + /// y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) + /// y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1] + /// @see gtx_easing + template + GLM_FUNC_DECL genType elasticEaseInOut(genType const & a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseIn(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseOut(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseInOut(genType const& a); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o); + + /// @param a parameter + /// @param o Optional overshoot modifier + /// @see gtx_easing + template + GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseIn(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseOut(genType const& a); + + /// @see gtx_easing + template + GLM_FUNC_DECL genType bounceEaseInOut(genType const& a); + + /// @} +}//namespace glm + +#include "easing.inl" diff --git a/includes/glm/gtx/easing.inl b/includes/glm/gtx/easing.inl new file mode 100644 index 0000000..b599c30 --- /dev/null +++ b/includes/glm/gtx/easing.inl @@ -0,0 +1,436 @@ +/// @ref gtx_easing + +#include + +namespace glm{ + + template + GLM_FUNC_QUALIFIER genType linearInterpolation(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a; + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a; + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return -(a * (a - static_cast(2))); + } + + template + GLM_FUNC_QUALIFIER genType quadraticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(2) * a * a; + } + else + { + return (-static_cast(2) * a * a) + (4 * a) - one(); + } + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = a - one(); + return f * f * f + one(); + } + + template + GLM_FUNC_QUALIFIER genType cubicEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if (a < static_cast(0.5)) + { + return static_cast(4) * a * a * a; + } + else + { + genType const f = ((static_cast(2) * a) - static_cast(2)); + return static_cast(0.5) * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = (a - one()); + return f * f * f * (one() - a) + one(); + } + + template + GLM_FUNC_QUALIFIER genType quarticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(8) * a * a * a * a; + } + else + { + genType const f = (a - one()); + return -static_cast(8) * f * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return a * a * a * a * a; + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType const f = (a - one()); + return f * f * f * f * f + one(); + } + + template + GLM_FUNC_QUALIFIER genType quinticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(16) * a * a * a * a * a; + } + else + { + genType const f = ((static_cast(2) * a) - static_cast(2)); + return static_cast(0.5) * f * f * f * f * f + one(); + } + } + + template + GLM_FUNC_QUALIFIER genType sineEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sin((a - one()) * half_pi()) + one(); + } + + template + GLM_FUNC_QUALIFIER genType sineEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sin(a * half_pi()); + } + + template + GLM_FUNC_QUALIFIER genType sineEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return static_cast(0.5) * (one() - cos(a * pi())); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return one() - sqrt(one() - (a * a)); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return sqrt((static_cast(2) - a) * a); + } + + template + GLM_FUNC_QUALIFIER genType circularEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(0.5) * (one() - std::sqrt(one() - static_cast(4) * (a * a))); + } + else + { + return static_cast(0.5) * (std::sqrt(-((static_cast(2) * a) - static_cast(3)) * ((static_cast(2) * a) - one())) + one()); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a <= zero()) + return a; + else + { + genType const Complementary = a - one(); + genType const Two = static_cast(2); + + return glm::pow(Two, Complementary * static_cast(10)); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a >= one()) + return a; + else + { + return one() - glm::pow(static_cast(2), -static_cast(10) * a); + } + } + + template + GLM_FUNC_QUALIFIER genType exponentialEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + return static_cast(0.5) * glm::pow(static_cast(2), (static_cast(20) * a) - static_cast(10)); + else + return -static_cast(0.5) * glm::pow(static_cast(2), (-static_cast(20) * a) + static_cast(10)) + one(); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return std::sin(static_cast(13) * half_pi() * a) * glm::pow(static_cast(2), static_cast(10) * (a - one())); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return std::sin(-static_cast(13) * half_pi() * (a + one())) * glm::pow(static_cast(2), -static_cast(10) * a) + one(); + } + + template + GLM_FUNC_QUALIFIER genType elasticEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + return static_cast(0.5) * std::sin(static_cast(13) * half_pi() * (static_cast(2) * a)) * glm::pow(static_cast(2), static_cast(10) * ((static_cast(2) * a) - one())); + else + return static_cast(0.5) * (std::sin(-static_cast(13) * half_pi() * ((static_cast(2) * a - one()) + one())) * glm::pow(static_cast(2), -static_cast(10) * (static_cast(2) * a - one())) + static_cast(2)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType z = ((o + one()) * a) - o; + return (a * a * z); + } + + template + GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType n = a - one(); + genType z = ((o + one()) * n) + o; + return (n * n * z) + one(); + } + + template + GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a, genType const& o) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + genType s = o * static_cast(1.525); + genType x = static_cast(0.5); + genType n = a / static_cast(0.5); + + if (n < static_cast(1)) + { + genType z = ((s + static_cast(1)) * n) - s; + genType m = n * n * z; + return x * m; + } + else + { + n -= static_cast(2); + genType z = ((s + static_cast(1)) * n) + s; + genType m = (n*n*z) + static_cast(2); + return x * m; + } + } + + template + GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a) + { + return backEaseIn(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a) + { + return backEaseOut(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a) + { + return backEaseInOut(a, static_cast(1.70158)); + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(4.0 / 11.0)) + { + return (static_cast(121) * a * a) / static_cast(16); + } + else if(a < static_cast(8.0 / 11.0)) + { + return (static_cast(363.0 / 40.0) * a * a) - (static_cast(99.0 / 10.0) * a) + static_cast(17.0 / 5.0); + } + else if(a < static_cast(9.0 / 10.0)) + { + return (static_cast(4356.0 / 361.0) * a * a) - (static_cast(35442.0 / 1805.0) * a) + static_cast(16061.0 / 1805.0); + } + else + { + return (static_cast(54.0 / 5.0) * a * a) - (static_cast(513.0 / 25.0) * a) + static_cast(268.0 / 25.0); + } + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseIn(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + return one() - bounceEaseOut(one() - a); + } + + template + GLM_FUNC_QUALIFIER genType bounceEaseInOut(genType const& a) + { + // Only defined in [0, 1] + assert(a >= zero()); + assert(a <= one()); + + if(a < static_cast(0.5)) + { + return static_cast(0.5) * (one() - bounceEaseOut(one() - a * static_cast(2))); + } + else + { + return static_cast(0.5) * bounceEaseOut(a * static_cast(2) - one()) + static_cast(0.5); + } + } + +}//namespace glm diff --git a/includes/glm/gtx/euler_angles.hpp b/includes/glm/gtx/euler_angles.hpp new file mode 100644 index 0000000..5d67d8e --- /dev/null +++ b/includes/glm/gtx/euler_angles.hpp @@ -0,0 +1,333 @@ +/// @ref gtx_euler_angles +/// @file glm/gtx/euler_angles.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_euler_angles GLM_GTX_euler_angles +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build matrices from Euler angles. +/// +/// Extraction of Euler angles from rotation matrix. +/// Based on the original paper 2014 Mike Day - Extracting Euler Angles from a Rotation Matrix. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_euler_angles extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_euler_angles + /// @{ + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX( + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY( + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ( + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX( + T const & angleX, T const & angularVelocityX); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY( + T const & angleY, T const & angularVelocityY); + + /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ( + T const & angleZ, T const & angularVelocityZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY( + T const& angleX, + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX( + T const& angleY, + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ( + T const& angleX, + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX( + T const& angle, + T const& angleX); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ( + T const& angleY, + T const& angleZ); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY( + T const& angleZ, + T const& angleY); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ( + T const& t1, + T const& t2, + T const& t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ( + T const& yaw, + T const& pitch, + T const& roll); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY( + T const & t1, + T const & t2, + T const & t3); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll( + T const& yaw, + T const& pitch, + T const& roll); + + /// Creates a 2D 2 * 2 rotation matrix from an euler angle. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle); + + /// Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle. + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle); + + /// Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles); + + /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). + /// @see gtx_euler_angles + template + GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles); + + /// Extracts the (X * Y * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * X * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Z * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Y * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * X * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * Z * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * Y * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * X * Z) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (X * Z * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Y * Z * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * Y * X) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// Extracts the (Z * X * Y) Euler angles from the rotation matrix M + /// @see gtx_euler_angles + template + GLM_FUNC_DISCARD_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3); + + /// @} +}//namespace glm + +#include "euler_angles.inl" diff --git a/includes/glm/gtx/euler_angles.inl b/includes/glm/gtx/euler_angles.inl new file mode 100644 index 0000000..85f22b9 --- /dev/null +++ b/includes/glm/gtx/euler_angles.inl @@ -0,0 +1,899 @@ +/// @ref gtx_euler_angles + +#include "compatibility.hpp" // glm::atan2 + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleX + ( + T const& angleX + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + + return mat<4, 4, T, defaultp>( + T(1), T(0), T(0), T(0), + T(0), cosX, sinX, T(0), + T(0),-sinX, cosX, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleY + ( + T const& angleY + ) + { + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, T(0), -sinY, T(0), + T(0), T(1), T(0), T(0), + sinY, T(0), cosY, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZ + ( + T const& angleZ + ) + { + T cosZ = glm::cos(angleZ); + T sinZ = glm::sin(angleZ); + + return mat<4, 4, T, defaultp>( + cosZ, sinZ, T(0), T(0), + -sinZ, cosZ, T(0), T(0), + T(0), T(0), T(1), T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleX + ( + T const & angleX, + T const & angularVelocityX + ) + { + T cosX = glm::cos(angleX) * angularVelocityX; + T sinX = glm::sin(angleX) * angularVelocityX; + + return mat<4, 4, T, defaultp>( + T(0), T(0), T(0), T(0), + T(0),-sinX, cosX, T(0), + T(0),-cosX,-sinX, T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleY + ( + T const & angleY, + T const & angularVelocityY + ) + { + T cosY = glm::cos(angleY) * angularVelocityY; + T sinY = glm::sin(angleY) * angularVelocityY; + + return mat<4, 4, T, defaultp>( + -sinY, T(0), -cosY, T(0), + T(0), T(0), T(0), T(0), + cosY, T(0), -sinY, T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleZ + ( + T const & angleZ, + T const & angularVelocityZ + ) + { + T cosZ = glm::cos(angleZ) * angularVelocityZ; + T sinZ = glm::sin(angleZ) * angularVelocityZ; + + return mat<4, 4, T, defaultp>( + -sinZ, cosZ, T(0), T(0), + -cosZ, -sinZ, T(0), T(0), + T(0), T(0), T(0), T(0), + T(0), T(0), T(0), T(0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXY + ( + T const& angleX, + T const& angleY + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, -sinX * -sinY, cosX * -sinY, T(0), + T(0), cosX, sinX, T(0), + sinY, -sinX * cosY, cosX * cosY, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYX + ( + T const& angleY, + T const& angleX + ) + { + T cosX = glm::cos(angleX); + T sinX = glm::sin(angleX); + T cosY = glm::cos(angleY); + T sinY = glm::sin(angleY); + + return mat<4, 4, T, defaultp>( + cosY, 0, -sinY, T(0), + sinY * sinX, cosX, cosY * sinX, T(0), + sinY * cosX, -sinX, cosY * cosX, T(0), + T(0), T(0), T(0), T(1)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZ + ( + T const& angleX, + T const& angleZ + ) + { + return eulerAngleX(angleX) * eulerAngleZ(angleZ); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZX + ( + T const& angleZ, + T const& angleX + ) + { + return eulerAngleZ(angleZ) * eulerAngleX(angleX); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZ + ( + T const& angleY, + T const& angleZ + ) + { + return eulerAngleY(angleY) * eulerAngleZ(angleZ); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZY + ( + T const& angleZ, + T const& angleY + ) + { + return eulerAngleZ(angleZ) * eulerAngleY(angleY); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ + ( + T const& t1, + T const& t2, + T const& t3 + ) + { + T c1 = glm::cos(-t1); + T c2 = glm::cos(-t2); + T c3 = glm::cos(-t3); + T s1 = glm::sin(-t1); + T s2 = glm::sin(-t2); + T s3 = glm::sin(-t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2 * c3; + Result[0][1] =-c1 * s3 + s1 * s2 * c3; + Result[0][2] = s1 * s3 + c1 * s2 * c3; + Result[0][3] = static_cast(0); + Result[1][0] = c2 * s3; + Result[1][1] = c1 * c3 + s1 * s2 * s3; + Result[1][2] =-s1 * c3 + c1 * s2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] =-s2; + Result[2][1] = s1 * c2; + Result[2][2] = c1 * c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXZ + ( + T const& yaw, + T const& pitch, + T const& roll + ) + { + T tmp_ch = glm::cos(yaw); + T tmp_sh = glm::sin(yaw); + T tmp_cp = glm::cos(pitch); + T tmp_sp = glm::sin(pitch); + T tmp_cb = glm::cos(roll); + T tmp_sb = glm::sin(roll); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; + Result[0][1] = tmp_sb * tmp_cp; + Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; + Result[0][3] = static_cast(0); + Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; + Result[1][1] = tmp_cb * tmp_cp; + Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; + Result[1][3] = static_cast(0); + Result[2][0] = tmp_sh * tmp_cp; + Result[2][1] = -tmp_sp; + Result[2][2] = tmp_ch * tmp_cp; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2; + Result[0][1] = c1 * s2; + Result[0][2] = s1 * s2; + Result[0][3] = static_cast(0); + Result[1][0] =-c3 * s2; + Result[1][1] = c1 * c2 * c3 - s1 * s3; + Result[1][2] = c1 * s3 + c2 * c3 * s1; + Result[1][3] = static_cast(0); + Result[2][0] = s2 * s3; + Result[2][1] =-c3 * s1 - c1 * c2 * s3; + Result[2][2] = c1 * c3 - c2 * s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2; + Result[0][1] = s1 * s2; + Result[0][2] =-c1 * s2; + Result[0][3] = static_cast(0); + Result[1][0] = s2 * s3; + Result[1][1] = c1 * c3 - c2 * s1 * s3; + Result[1][2] = c3 * s1 + c1 * c2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s2; + Result[2][1] =-c1 * s3 - c2 * c3 * s1; + Result[2][2] = c1 * c2 * c3 - s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - c2 * s1 * s3; + Result[0][1] = s2* s3; + Result[0][2] =-c3 * s1 - c1 * c2 * s3; + Result[0][3] = static_cast(0); + Result[1][0] = s1 * s2; + Result[1][1] = c2; + Result[1][2] = c1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s3 + c2 * c3 * s1; + Result[2][1] =-c3 * s2; + Result[2][2] = c1 * c2 * c3 - s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2 * c3 - s1 * s3; + Result[0][1] = c3 * s2; + Result[0][2] =-c1 * s3 - c2 * c3 * s1; + Result[0][3] = static_cast(0); + Result[1][0] =-c1 * s2; + Result[1][1] = c2; + Result[1][2] = s1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s1 + c1 * c2 * s3; + Result[2][1] = s2 * s3; + Result[2][2] = c1 * c3 - c2 * s1 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYZ + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2 * c3 - s1 * s3; + Result[0][1] = c1 * s3 + c2 * c3 * s1; + Result[0][2] =-c3 * s2; + Result[0][3] = static_cast(0); + Result[1][0] =-c3 * s1 - c1 * c2 * s3; + Result[1][1] = c1 * c3 - c2 * s1 * s3; + Result[1][2] = s2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s2; + Result[2][1] = s1 * s2; + Result[2][2] = c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXZ + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - c2 * s1 * s3; + Result[0][1] = c3 * s1 + c1 * c2 * s3; + Result[0][2] = s2 *s3; + Result[0][3] = static_cast(0); + Result[1][0] =-c1 * s3 - c2 * c3 * s1; + Result[1][1] = c1 * c2 * c3 - s1 * s3; + Result[1][2] = c3 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = s1 * s2; + Result[2][1] =-c1 * s2; + Result[2][2] = c2; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c2 * c3; + Result[0][1] = s1 * s3 + c1 * c3 * s2; + Result[0][2] = c3 * s1 * s2 - c1 * s3; + Result[0][3] = static_cast(0); + Result[1][0] =-s2; + Result[1][1] = c1 * c2; + Result[1][2] = c2 * s1; + Result[1][3] = static_cast(0); + Result[2][0] = c2 * s3; + Result[2][1] = c1 * s2 * s3 - c3 * s1; + Result[2][2] = c1 * c3 + s1 * s2 *s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2; + Result[0][1] = s2; + Result[0][2] =-c2 * s1; + Result[0][3] = static_cast(0); + Result[1][0] = s1 * s3 - c1 * c3 * s2; + Result[1][1] = c2 * c3; + Result[1][2] = c1 * s3 + c3 * s1 * s2; + Result[1][3] = static_cast(0); + Result[2][0] = c3 * s1 + c1 * s2 * s3; + Result[2][1] =-c2 * s3; + Result[2][2] = c1 * c3 - s1 * s2 * s3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYX + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c2; + Result[0][1] = c2 * s1; + Result[0][2] =-s2; + Result[0][3] = static_cast(0); + Result[1][0] = c1 * s2 * s3 - c3 * s1; + Result[1][1] = c1 * c3 + s1 * s2 * s3; + Result[1][2] = c2 * s3; + Result[1][3] = static_cast(0); + Result[2][0] = s1 * s3 + c1 * c3 * s2; + Result[2][1] = c3 * s1 * s2 - c1 * s3; + Result[2][2] = c2 * c3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXY + ( + T const & t1, + T const & t2, + T const & t3 + ) + { + T c1 = glm::cos(t1); + T s1 = glm::sin(t1); + T c2 = glm::cos(t2); + T s2 = glm::sin(t2); + T c3 = glm::cos(t3); + T s3 = glm::sin(t3); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = c1 * c3 - s1 * s2 * s3; + Result[0][1] = c3 * s1 + c1 * s2 * s3; + Result[0][2] =-c2 * s3; + Result[0][3] = static_cast(0); + Result[1][0] =-c2 * s1; + Result[1][1] = c1 * c2; + Result[1][2] = s2; + Result[1][3] = static_cast(0); + Result[2][0] = c1 * s3 + c3 * s1 * s2; + Result[2][1] = s1 * s3 - c1 * c3 * s2; + Result[2][2] = c2 * c3; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll + ( + T const& yaw, + T const& pitch, + T const& roll + ) + { + T tmp_ch = glm::cos(yaw); + T tmp_sh = glm::sin(yaw); + T tmp_cp = glm::cos(pitch); + T tmp_sp = glm::sin(pitch); + T tmp_cb = glm::cos(roll); + T tmp_sb = glm::sin(roll); + + mat<4, 4, T, defaultp> Result; + Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; + Result[0][1] = tmp_sb * tmp_cp; + Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; + Result[0][3] = static_cast(0); + Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; + Result[1][1] = tmp_cb * tmp_cp; + Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; + Result[1][3] = static_cast(0); + Result[2][0] = tmp_sh * tmp_cp; + Result[2][1] = -tmp_sp; + Result[2][2] = tmp_ch * tmp_cp; + Result[2][3] = static_cast(0); + Result[3][0] = static_cast(0); + Result[3][1] = static_cast(0); + Result[3][2] = static_cast(0); + Result[3][3] = static_cast(1); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> orientate2 + ( + T const& angle + ) + { + T c = glm::cos(angle); + T s = glm::sin(angle); + + mat<2, 2, T, defaultp> Result; + Result[0][0] = c; + Result[0][1] = s; + Result[1][0] = -s; + Result[1][1] = c; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> orientate3 + ( + T const& angle + ) + { + T c = glm::cos(angle); + T s = glm::sin(angle); + + mat<3, 3, T, defaultp> Result; + Result[0][0] = c; + Result[0][1] = s; + Result[0][2] = T(0.0); + Result[1][0] = -s; + Result[1][1] = c; + Result[1][2] = T(0.0); + Result[2][0] = T(0.0); + Result[2][1] = T(0.0); + Result[2][2] = T(1.0); + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orientate3 + ( + vec<3, T, Q> const& angles + ) + { + return mat<3, 3, T, Q>(yawPitchRoll(angles.z, angles.x, angles.y)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientate4 + ( + vec<3, T, Q> const& angles + ) + { + return yawPitchRoll(angles.z, angles.x, angles.y); + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][1], M[2][2]); + T C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]); + T T2 = glm::atan2(-M[2][0], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]); + t1 = -T1; + t2 = -T2; + t3 = -T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][0], M[2][2]); + T C2 = glm::sqrt(M[0][1]*M[0][1] + M[1][1]*M[1][1]); + T T2 = glm::atan2(-M[2][1], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[1][2] - C1*M[1][0], C1*M[0][0] - S1*M[0][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][2], M[0][1]); + T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(S2, M[0][0]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[1][2] - S1*M[1][1], C1*M[2][2] - S1*M[2][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][1], -M[0][2]); + T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(S2, M[0][0]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-C1*M[2][1] - S1*M[2][2], C1*M[1][1] + S1*M[1][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][0], M[1][2]); + T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(S2, M[1][1]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[2][0] - S1*M[2][2], C1*M[0][0] - S1*M[0][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][2], -M[1][0]); + T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(S2, M[1][1]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-S1*M[0][0] - C1*M[0][2], S1*M[2][0] + C1*M[2][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][1], M[2][0]); + T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); + T T2 = glm::atan2(S2, M[2][2]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[0][1] - S1*M[0][0], C1*M[1][1] - S1*M[1][0]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[2][0], -M[2][1]); + T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); + T T2 = glm::atan2(S2, M[2][2]); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(-C1*M[1][0] - S1*M[1][1], C1*M[0][0] + S1*M[0][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[1][2], M[1][1]); + T C2 = glm::sqrt(M[0][0]*M[0][0] + M[2][0]*M[2][0]); + T T2 = glm::atan2(-M[1][0], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[0][1] - C1*M[0][2], C1*M[2][2] - S1*M[2][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(-M[0][2], M[0][0]); + T C2 = glm::sqrt(M[1][1]*M[1][1] + M[2][1]*M[2][1]); + T T2 = glm::atan2(M[0][1], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[1][0] + C1*M[1][2], S1*M[2][0] + C1*M[2][2]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(M[0][1], M[0][0]); + T C2 = glm::sqrt(M[1][2]*M[1][2] + M[2][2]*M[2][2]); + T T2 = glm::atan2(-M[0][2], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(S1*M[2][0] - C1*M[2][1], C1*M[1][1] - S1*M[1][0]); + t1 = T1; + t2 = T2; + t3 = T3; + } + + template + GLM_FUNC_QUALIFIER void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, + T & t1, + T & t2, + T & t3) + { + T T1 = glm::atan2(-M[1][0], M[1][1]); + T C2 = glm::sqrt(M[0][2]*M[0][2] + M[2][2]*M[2][2]); + T T2 = glm::atan2(M[1][2], C2); + T S1 = glm::sin(T1); + T C1 = glm::cos(T1); + T T3 = glm::atan2(C1*M[2][0] + S1*M[2][1], C1*M[0][0] + S1*M[0][1]); + t1 = T1; + t2 = T2; + t3 = T3; + } +}//namespace glm diff --git a/includes/glm/gtx/extend.hpp b/includes/glm/gtx/extend.hpp new file mode 100644 index 0000000..46bf5e7 --- /dev/null +++ b/includes/glm/gtx/extend.hpp @@ -0,0 +1,40 @@ +/// @ref gtx_extend +/// @file glm/gtx/extend.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_extend GLM_GTX_extend +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extend a position from a source to a position at a defined length. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_extend extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_extend + /// @{ + + /// Extends of Length the Origin position using the (Source - Origin) direction. + /// @see gtx_extend + template + GLM_FUNC_DECL genType extend( + genType const& Origin, + genType const& Source, + typename genType::value_type const Length); + + /// @} +}//namespace glm + +#include "extend.inl" diff --git a/includes/glm/gtx/extend.inl b/includes/glm/gtx/extend.inl new file mode 100644 index 0000000..32128eb --- /dev/null +++ b/includes/glm/gtx/extend.inl @@ -0,0 +1,48 @@ +/// @ref gtx_extend + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType extend + ( + genType const& Origin, + genType const& Source, + genType const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> extend + ( + vec<2, T, Q> const& Origin, + vec<2, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> extend + ( + vec<3, T, Q> const& Origin, + vec<3, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> extend + ( + vec<4, T, Q> const& Origin, + vec<4, T, Q> const& Source, + T const& Distance + ) + { + return Origin + (Source - Origin) * Distance; + } +}//namespace glm diff --git a/includes/glm/gtx/extended_min_max.hpp b/includes/glm/gtx/extended_min_max.hpp new file mode 100644 index 0000000..e1b722f --- /dev/null +++ b/includes/glm/gtx/extended_min_max.hpp @@ -0,0 +1,135 @@ +/// @ref gtx_extended_min_max +/// @file glm/gtx/extended_min_max.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_extended_min_max GLM_GTX_extended_min_max +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Min and max functions for 3 to 4 parameters. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/vector_common.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_extended_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_extended_min_max extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_extended_min_max + /// @{ + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T min( + T const& x, + T const& y, + T const& z); + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + typename C::T const& y, + typename C::T const& z); + + /// Return the minimum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + C const& y, + C const& z); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T min( + T const& x, + T const& y, + T const& z, + T const& w); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w); + + /// Return the minimum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C min( + C const& x, + C const& y, + C const& z, + C const& w); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T max( + T const& x, + T const& y, + T const& z); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + typename C::T const& y, + typename C::T const& z); + + /// Return the maximum component-wise values of 3 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + C const& y, + C const& z); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template + GLM_FUNC_DECL T max( + T const& x, + T const& y, + T const& z, + T const& w); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w); + + /// Return the maximum component-wise values of 4 inputs + /// @see gtx_extented_min_max + template class C> + GLM_FUNC_DECL C max( + C const& x, + C const& y, + C const& z, + C const& w); + + /// @} +}//namespace glm + +#include "extended_min_max.inl" diff --git a/includes/glm/gtx/extended_min_max.inl b/includes/glm/gtx/extended_min_max.inl new file mode 100644 index 0000000..de5998f --- /dev/null +++ b/includes/glm/gtx/extended_min_max.inl @@ -0,0 +1,138 @@ +/// @ref gtx_extended_min_max + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T min( + T const& x, + T const& y, + T const& z) + { + return glm::min(glm::min(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + typename C::T const& y, + typename C::T const& z + ) + { + return glm::min(glm::min(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + C const& y, + C const& z + ) + { + return glm::min(glm::min(x, y), z); + } + + template + GLM_FUNC_QUALIFIER T min + ( + T const& x, + T const& y, + T const& z, + T const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C min + ( + C const& x, + C const& y, + C const& z, + C const& w + ) + { + return glm::min(glm::min(x, y), glm::min(z, w)); + } + + template + GLM_FUNC_QUALIFIER T max( + T const& x, + T const& y, + T const& z) + { + return glm::max(glm::max(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + typename C::T const& y, + typename C::T const& z + ) + { + return glm::max(glm::max(x, y), z); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + C const& y, + C const& z + ) + { + return glm::max(glm::max(x, y), z); + } + + template + GLM_FUNC_QUALIFIER T max + ( + T const& x, + T const& y, + T const& z, + T const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + typename C::T const& y, + typename C::T const& z, + typename C::T const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } + + template class C> + GLM_FUNC_QUALIFIER C max + ( + C const& x, + C const& y, + C const& z, + C const& w + ) + { + return glm::max(glm::max(x, y), glm::max(z, w)); + } +}//namespace glm diff --git a/includes/glm/gtx/exterior_product.hpp b/includes/glm/gtx/exterior_product.hpp new file mode 100644 index 0000000..1979acc --- /dev/null +++ b/includes/glm/gtx/exterior_product.hpp @@ -0,0 +1,43 @@ +/// @ref gtx_exterior_product +/// @file glm/gtx/exterior_product.hpp +/// +/// @see core (dependence) +/// @see gtx_exterior_product (dependence) +/// +/// @defgroup gtx_exterior_product GLM_GTX_exterior_product +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// @brief Allow to perform bit operations on integer values + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_exterior_product extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_exterior_product + /// @{ + + /// Returns the cross product of x and y. + /// + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see Exterior product + template + GLM_FUNC_DECL GLM_CONSTEXPR T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u); + + /// @} +} //namespace glm + +#include "exterior_product.inl" diff --git a/includes/glm/gtx/exterior_product.inl b/includes/glm/gtx/exterior_product.inl new file mode 100644 index 0000000..690085d --- /dev/null +++ b/includes/glm/gtx/exterior_product.inl @@ -0,0 +1,26 @@ +/// @ref gtx_exterior_product + +#include + +namespace glm { +namespace detail +{ + template + struct compute_cross_vec2 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(vec<2, T, Q> const& v, vec<2, T, Q> const& u) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'cross' accepts only floating-point inputs"); + + return v.x * u.y - u.x * v.y; + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T cross(vec<2, T, Q> const& x, vec<2, T, Q> const& y) + { + return detail::compute_cross_vec2::value>::call(x, y); + } +}//namespace glm + diff --git a/includes/glm/gtx/fast_exponential.hpp b/includes/glm/gtx/fast_exponential.hpp new file mode 100644 index 0000000..9fae325 --- /dev/null +++ b/includes/glm/gtx/fast_exponential.hpp @@ -0,0 +1,93 @@ +/// @ref gtx_fast_exponential +/// @file glm/gtx/fast_exponential.hpp +/// +/// @see core (dependence) +/// @see gtx_half_float (dependence) +/// +/// @defgroup gtx_fast_exponential GLM_GTX_fast_exponential +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of exponential based functions. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_fast_exponential extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_exponential + /// @{ + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL genType fastPow(genType x, genType y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastPow(vec const& x, vec const& y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y); + + /// Faster than the common pow function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastPow(vec const& x); + + /// Faster than the common exp function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastExp(T x); + + /// Faster than the common exp function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastExp(vec const& x); + + /// Faster than the common log function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastLog(T x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastLog(vec const& x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastExp2(T x); + + /// Faster than the common exp2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastExp2(vec const& x); + + /// Faster than the common log2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL T fastLog2(T x); + + /// Faster than the common log2 function but less accurate. + /// @see gtx_fast_exponential + template + GLM_FUNC_DECL vec fastLog2(vec const& x); + + /// @} +}//namespace glm + +#include "fast_exponential.inl" diff --git a/includes/glm/gtx/fast_exponential.inl b/includes/glm/gtx/fast_exponential.inl new file mode 100644 index 0000000..5b11742 --- /dev/null +++ b/includes/glm/gtx/fast_exponential.inl @@ -0,0 +1,136 @@ +/// @ref gtx_fast_exponential + +namespace glm +{ + // fastPow: + template + GLM_FUNC_QUALIFIER genType fastPow(genType x, genType y) + { + return exp(y * log(x)); + } + + template + GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) + { + return exp(y * log(x)); + } + + template + GLM_FUNC_QUALIFIER T fastPow(T x, int y) + { + T f = static_cast(1); + for(int i = 0; i < y; ++i) + f *= x; + return f; + } + + template + GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) + { + vec Result; + for(length_t i = 0, n = x.length(); i < n; ++i) + Result[i] = fastPow(x[i], y[i]); + return Result; + } + + // fastExp + // Note: This function provides accurate results only for value between -1 and 1, else avoid it. + template + GLM_FUNC_QUALIFIER T fastExp(T x) + { + // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. + // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); + T x2 = x * x; + T x3 = x2 * x; + T x4 = x3 * x; + T x5 = x4 * x; + return T(1) + x + (x2 * T(0.5)) + (x3 * T(0.1666666667)) + (x4 * T(0.041666667)) + (x5 * T(0.008333333333)); + } + /* // Try to handle all values of float... but often shower than std::exp, glm::floor and the loop kill the performance + GLM_FUNC_QUALIFIER float fastExp(float x) + { + const float e = 2.718281828f; + const float IntegerPart = floor(x); + const float FloatPart = x - IntegerPart; + float z = 1.f; + + for(int i = 0; i < int(IntegerPart); ++i) + z *= e; + + const float x2 = FloatPart * FloatPart; + const float x3 = x2 * FloatPart; + const float x4 = x3 * FloatPart; + const float x5 = x4 * FloatPart; + return z * (1.0f + FloatPart + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)); + } + + // Increase accuracy on number bigger that 1 and smaller than -1 but it's not enough for high and negative numbers + GLM_FUNC_QUALIFIER float fastExp(float x) + { + // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. + // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); + float x2 = x * x; + float x3 = x2 * x; + float x4 = x3 * x; + float x5 = x4 * x; + float x6 = x5 * x; + float x7 = x6 * x; + float x8 = x7 * x; + return 1.0f + x + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)+ (x6 * 0.00138888888888f) + (x7 * 0.000198412698f) + (x8 * 0.0000248015873f);; + } + */ + + template + GLM_FUNC_QUALIFIER vec fastExp(vec const& x) + { + return detail::functor1::call(fastExp, x); + } + + // fastLog + template + GLM_FUNC_QUALIFIER genType fastLog(genType x) + { + return std::log(x); + } + + /* Slower than the VC7.1 function... + GLM_FUNC_QUALIFIER float fastLog(float x) + { + float y1 = (x - 1.0f) / (x + 1.0f); + float y2 = y1 * y1; + return 2.0f * y1 * (1.0f + y2 * (0.3333333333f + y2 * (0.2f + y2 * 0.1428571429f))); + } + */ + + template + GLM_FUNC_QUALIFIER vec fastLog(vec const& x) + { + return detail::functor1::call(fastLog, x); + } + + //fastExp2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType fastExp2(genType x) + { + return fastExp(static_cast(0.69314718055994530941723212145818) * x); + } + + template + GLM_FUNC_QUALIFIER vec fastExp2(vec const& x) + { + return detail::functor1::call(fastExp2, x); + } + + // fastLog2, ln2 = 0.69314718055994530941723212145818f + template + GLM_FUNC_QUALIFIER genType fastLog2(genType x) + { + return fastLog(x) / static_cast(0.69314718055994530941723212145818); + } + + template + GLM_FUNC_QUALIFIER vec fastLog2(vec const& x) + { + return detail::functor1::call(fastLog2, x); + } +}//namespace glm diff --git a/includes/glm/gtx/fast_square_root.hpp b/includes/glm/gtx/fast_square_root.hpp new file mode 100644 index 0000000..80729db --- /dev/null +++ b/includes/glm/gtx/fast_square_root.hpp @@ -0,0 +1,96 @@ +/// @ref gtx_fast_square_root +/// @file glm/gtx/fast_square_root.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_fast_square_root GLM_GTX_fast_square_root +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of square root based functions. +/// - Sqrt optimisation based on Newton's method, +/// www.gamedev.net/community/forums/topic.asp?topic id=139956 + +#pragma once + +// Dependency: +#include "../common.hpp" +#include "../exponential.hpp" +#include "../geometric.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_fast_square_root extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_square_root + /// @{ + + /// Faster than the common sqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastSqrt(genType x); + + /// Faster than the common sqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastSqrt(vec const& x); + + /// Faster than the common inversesqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastInverseSqrt(genType x); + + /// Faster than the common inversesqrt function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastInverseSqrt(vec const& x); + + /// Faster than the common length function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastLength(genType x); + + /// Faster than the common length function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL T fastLength(vec const& x); + + /// Faster than the common distance function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastDistance(genType x, genType y); + + /// Faster than the common distance function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL T fastDistance(vec const& x, vec const& y); + + /// Faster than the common normalize function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL genType fastNormalize(genType x); + + /// Faster than the common normalize function but less accurate. + /// + /// @see gtx_fast_square_root extension. + template + GLM_FUNC_DECL vec fastNormalize(vec const& x); + + /// @} +}// namespace glm + +#include "fast_square_root.inl" diff --git a/includes/glm/gtx/fast_square_root.inl b/includes/glm/gtx/fast_square_root.inl new file mode 100644 index 0000000..60fdb7a --- /dev/null +++ b/includes/glm/gtx/fast_square_root.inl @@ -0,0 +1,75 @@ +/// @ref gtx_fast_square_root + +namespace glm +{ + // fastSqrt + template + GLM_FUNC_QUALIFIER genType fastSqrt(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastSqrt' only accept floating-point input"); + + return genType(1) / fastInverseSqrt(x); + } + + template + GLM_FUNC_QUALIFIER vec fastSqrt(vec const& x) + { + return detail::functor1::call(fastSqrt, x); + } + + // fastInversesqrt + template + GLM_FUNC_QUALIFIER genType fastInverseSqrt(genType x) + { + return detail::compute_inversesqrt<1, genType, lowp, detail::is_aligned::value>::call(vec<1, genType, lowp>(x)).x; + } + + template + GLM_FUNC_QUALIFIER vec fastInverseSqrt(vec const& x) + { + return detail::compute_inversesqrt::value>::call(x); + } + + // fastLength + template + GLM_FUNC_QUALIFIER genType fastLength(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastLength' only accept floating-point inputs"); + + return abs(x); + } + + template + GLM_FUNC_QUALIFIER T fastLength(vec const& x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'fastLength' only accept floating-point inputs"); + + return fastSqrt(dot(x, x)); + } + + // fastDistance + template + GLM_FUNC_QUALIFIER genType fastDistance(genType x, genType y) + { + return fastLength(y - x); + } + + template + GLM_FUNC_QUALIFIER T fastDistance(vec const& x, vec const& y) + { + return fastLength(y - x); + } + + // fastNormalize + template + GLM_FUNC_QUALIFIER genType fastNormalize(genType x) + { + return x > genType(0) ? genType(1) : -genType(1); + } + + template + GLM_FUNC_QUALIFIER vec fastNormalize(vec const& x) + { + return x * fastInverseSqrt(dot(x, x)); + } +}//namespace glm diff --git a/includes/glm/gtx/fast_trigonometry.hpp b/includes/glm/gtx/fast_trigonometry.hpp new file mode 100644 index 0000000..93acab5 --- /dev/null +++ b/includes/glm/gtx/fast_trigonometry.hpp @@ -0,0 +1,77 @@ +/// @ref gtx_fast_trigonometry +/// @file glm/gtx/fast_trigonometry.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_fast_trigonometry GLM_GTX_fast_trigonometry +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Fast but less accurate implementations of trigonometric functions. + +#pragma once + +// Dependency: +#include "../gtc/constants.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_fast_trigonometry extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_fast_trigonometry + /// @{ + + /// Wrap an angle to [0 2pi[ + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T wrapAngle(T angle); + + /// Faster than the common sin function but less accurate. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastSin(T angle); + + /// Faster than the common cos function but less accurate. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastCos(T angle); + + /// Faster than the common tan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastTan(T angle); + + /// Faster than the common asin function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAsin(T angle); + + /// Faster than the common acos function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAcos(T angle); + + /// Faster than the common atan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAtan(T y, T x); + + /// Faster than the common atan function but less accurate. + /// Defined between -2pi and 2pi. + /// From GLM_GTX_fast_trigonometry extension. + template + GLM_FUNC_DECL T fastAtan(T angle); + + /// @} +}//namespace glm + +#include "fast_trigonometry.inl" diff --git a/includes/glm/gtx/fast_trigonometry.inl b/includes/glm/gtx/fast_trigonometry.inl new file mode 100644 index 0000000..1a710cb --- /dev/null +++ b/includes/glm/gtx/fast_trigonometry.inl @@ -0,0 +1,142 @@ +/// @ref gtx_fast_trigonometry + +namespace glm{ +namespace detail +{ + template + GLM_FUNC_QUALIFIER vec taylorCos(vec const& x) + { + return static_cast(1) + - (x * x) * (1.f / 2.f) + + ((x * x) * (x * x)) * (1.f / 24.f) + - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f) + + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f); + } + + template + GLM_FUNC_QUALIFIER T cos_52s(T x) + { + T const xx(x * x); + return (T(0.9999932946) + xx * (T(-0.4999124376) + xx * (T(0.0414877472) + xx * T(-0.0012712095)))); + } + + template + GLM_FUNC_QUALIFIER vec cos_52s(vec const& x) + { + return detail::functor1::call(cos_52s, x); + } +}//namespace detail + + // wrapAngle + template + GLM_FUNC_QUALIFIER T wrapAngle(T angle) + { + return abs(mod(angle, two_pi())); + } + + template + GLM_FUNC_QUALIFIER vec wrapAngle(vec const& x) + { + return detail::functor1::call(wrapAngle, x); + } + + // cos + template + GLM_FUNC_QUALIFIER T fastCos(T x) + { + T const angle(wrapAngle(x)); + + if(angle < half_pi()) + return detail::cos_52s(angle); + if(angle < pi()) + return -detail::cos_52s(pi() - angle); + if(angle < (T(3) * half_pi())) + return -detail::cos_52s(angle - pi()); + + return detail::cos_52s(two_pi() - angle); + } + + template + GLM_FUNC_QUALIFIER vec fastCos(vec const& x) + { + return detail::functor1::call(fastCos, x); + } + + // sin + template + GLM_FUNC_QUALIFIER T fastSin(T x) + { + return fastCos(half_pi() - x); + } + + template + GLM_FUNC_QUALIFIER vec fastSin(vec const& x) + { + return detail::functor1::call(fastSin, x); + } + + // tan + template + GLM_FUNC_QUALIFIER T fastTan(T x) + { + return x + (x * x * x * T(0.3333333333)) + (x * x * x * x * x * T(0.1333333333333)) + (x * x * x * x * x * x * x * T(0.0539682539)); + } + + template + GLM_FUNC_QUALIFIER vec fastTan(vec const& x) + { + return detail::functor1::call(fastTan, x); + } + + // asin + template + GLM_FUNC_QUALIFIER T fastAsin(T x) + { + return x + (x * x * x * T(0.166666667)) + (x * x * x * x * x * T(0.075)) + (x * x * x * x * x * x * x * T(0.0446428571)) + (x * x * x * x * x * x * x * x * x * T(0.0303819444));// + (x * x * x * x * x * x * x * x * x * x * x * T(0.022372159)); + } + + template + GLM_FUNC_QUALIFIER vec fastAsin(vec const& x) + { + return detail::functor1::call(fastAsin, x); + } + + // acos + template + GLM_FUNC_QUALIFIER T fastAcos(T x) + { + return T(1.5707963267948966192313216916398) - fastAsin(x); //(PI / 2) + } + + template + GLM_FUNC_QUALIFIER vec fastAcos(vec const& x) + { + return detail::functor1::call(fastAcos, x); + } + + // atan + template + GLM_FUNC_QUALIFIER T fastAtan(T y, T x) + { + T sgn = sign(y) * sign(x); + return abs(fastAtan(y / x)) * sgn; + } + + template + GLM_FUNC_QUALIFIER vec fastAtan(vec const& y, vec const& x) + { + return detail::functor2::call(fastAtan, y, x); + } + + template + GLM_FUNC_QUALIFIER T fastAtan(T x) + { + return x - (x * x * x * T(0.333333333333)) + (x * x * x * x * x * T(0.2)) - (x * x * x * x * x * x * x * T(0.1428571429)) + (x * x * x * x * x * x * x * x * x * T(0.111111111111)) - (x * x * x * x * x * x * x * x * x * x * x * T(0.0909090909)); + } + + template + GLM_FUNC_QUALIFIER vec fastAtan(vec const& x) + { + return detail::functor1::call(fastAtan, x); + } +}//namespace glm diff --git a/includes/glm/gtx/float_normalize.inl b/includes/glm/gtx/float_normalize.inl new file mode 100644 index 0000000..8cdbc5a --- /dev/null +++ b/includes/glm/gtx/float_normalize.inl @@ -0,0 +1,13 @@ +/// @ref gtx_float_normalize + +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec floatNormalize(vec const& v) + { + return vec(v) / static_cast(std::numeric_limits::max()); + } + +}//namespace glm diff --git a/includes/glm/gtx/functions.hpp b/includes/glm/gtx/functions.hpp new file mode 100644 index 0000000..df68a0d --- /dev/null +++ b/includes/glm/gtx/functions.hpp @@ -0,0 +1,54 @@ +/// @ref gtx_functions +/// @file glm/gtx/functions.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_functions GLM_GTX_functions +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// List of useful common functions. + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" +#include "../detail/qualifier.hpp" +#include "../detail/type_vec2.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_functions extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_functions + /// @{ + + /// 1D gauss function + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL T gauss( + T x, + T ExpectedValue, + T StandardDeviation); + + /// 2D gauss function + /// + /// @see gtc_epsilon + template + GLM_FUNC_DECL T gauss( + vec<2, T, Q> const& Coord, + vec<2, T, Q> const& ExpectedValue, + vec<2, T, Q> const& StandardDeviation); + + /// @} +}//namespace glm + +#include "functions.inl" + diff --git a/includes/glm/gtx/functions.inl b/includes/glm/gtx/functions.inl new file mode 100644 index 0000000..29cbb20 --- /dev/null +++ b/includes/glm/gtx/functions.inl @@ -0,0 +1,30 @@ +/// @ref gtx_functions + +#include "../exponential.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T gauss + ( + T x, + T ExpectedValue, + T StandardDeviation + ) + { + return exp(-((x - ExpectedValue) * (x - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation)) / (StandardDeviation * sqrt(static_cast(6.28318530717958647692528676655900576))); + } + + template + GLM_FUNC_QUALIFIER T gauss + ( + vec<2, T, Q> const& Coord, + vec<2, T, Q> const& ExpectedValue, + vec<2, T, Q> const& StandardDeviation + ) + { + vec<2, T, Q> const Squared = ((Coord - ExpectedValue) * (Coord - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation); + return exp(-(Squared.x + Squared.y)); + } +}//namespace glm + diff --git a/includes/glm/gtx/gradient_paint.hpp b/includes/glm/gtx/gradient_paint.hpp new file mode 100644 index 0000000..5656445 --- /dev/null +++ b/includes/glm/gtx/gradient_paint.hpp @@ -0,0 +1,51 @@ +/// @ref gtx_gradient_paint +/// @file glm/gtx/gradient_paint.hpp +/// +/// @see core (dependence) +/// @see gtx_optimum_pow (dependence) +/// +/// @defgroup gtx_gradient_paint GLM_GTX_gradient_paint +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions that return the color of procedural gradient for specific coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/optimum_pow.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_gradient_paint extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_gradient_paint + /// @{ + + /// Return a color from a radial gradient. + /// @see - gtx_gradient_paint + template + GLM_FUNC_DECL T radialGradient( + vec<2, T, Q> const& Center, + T const& Radius, + vec<2, T, Q> const& Focal, + vec<2, T, Q> const& Position); + + /// Return a color from a linear gradient. + /// @see - gtx_gradient_paint + template + GLM_FUNC_DECL T linearGradient( + vec<2, T, Q> const& Point0, + vec<2, T, Q> const& Point1, + vec<2, T, Q> const& Position); + + /// @} +}// namespace glm + +#include "gradient_paint.inl" diff --git a/includes/glm/gtx/gradient_paint.inl b/includes/glm/gtx/gradient_paint.inl new file mode 100644 index 0000000..4c495e6 --- /dev/null +++ b/includes/glm/gtx/gradient_paint.inl @@ -0,0 +1,36 @@ +/// @ref gtx_gradient_paint + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T radialGradient + ( + vec<2, T, Q> const& Center, + T const& Radius, + vec<2, T, Q> const& Focal, + vec<2, T, Q> const& Position + ) + { + vec<2, T, Q> F = Focal - Center; + vec<2, T, Q> D = Position - Focal; + T Radius2 = pow2(Radius); + T Fx2 = pow2(F.x); + T Fy2 = pow2(F.y); + + T Numerator = (D.x * F.x + D.y * F.y) + sqrt(Radius2 * (pow2(D.x) + pow2(D.y)) - pow2(D.x * F.y - D.y * F.x)); + T Denominator = Radius2 - (Fx2 + Fy2); + return Numerator / Denominator; + } + + template + GLM_FUNC_QUALIFIER T linearGradient + ( + vec<2, T, Q> const& Point0, + vec<2, T, Q> const& Point1, + vec<2, T, Q> const& Position + ) + { + vec<2, T, Q> Dist = Point1 - Point0; + return (Dist.x * (Position.x - Point0.x) + Dist.y * (Position.y - Point0.y)) / glm::dot(Dist, Dist); + } +}//namespace glm diff --git a/includes/glm/gtx/handed_coordinate_space.hpp b/includes/glm/gtx/handed_coordinate_space.hpp new file mode 100644 index 0000000..7c2aada --- /dev/null +++ b/includes/glm/gtx/handed_coordinate_space.hpp @@ -0,0 +1,48 @@ +/// @ref gtx_handed_coordinate_space +/// @file glm/gtx/handed_coordinate_space.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_handed_coordinate_space GLM_GTX_handed_coordinate_space +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// To know if a set of three basis vectors defines a right or left-handed coordinate system. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_handed_coordinate_space extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_handed_coordinate_space + /// @{ + + //! Return if a trihedron right handed or not. + //! From GLM_GTX_handed_coordinate_space extension. + template + GLM_FUNC_DECL bool rightHanded( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal); + + //! Return if a trihedron left handed or not. + //! From GLM_GTX_handed_coordinate_space extension. + template + GLM_FUNC_DECL bool leftHanded( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal); + + /// @} +}// namespace glm + +#include "handed_coordinate_space.inl" diff --git a/includes/glm/gtx/handed_coordinate_space.inl b/includes/glm/gtx/handed_coordinate_space.inl new file mode 100644 index 0000000..e43c17b --- /dev/null +++ b/includes/glm/gtx/handed_coordinate_space.inl @@ -0,0 +1,26 @@ +/// @ref gtx_handed_coordinate_space + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool rightHanded + ( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal + ) + { + return dot(cross(normal, tangent), binormal) > T(0); + } + + template + GLM_FUNC_QUALIFIER bool leftHanded + ( + vec<3, T, Q> const& tangent, + vec<3, T, Q> const& binormal, + vec<3, T, Q> const& normal + ) + { + return dot(cross(normal, tangent), binormal) < T(0); + } +}//namespace glm diff --git a/includes/glm/gtx/hash.hpp b/includes/glm/gtx/hash.hpp new file mode 100644 index 0000000..a2ac989 --- /dev/null +++ b/includes/glm/gtx/hash.hpp @@ -0,0 +1,156 @@ +/// @ref gtx_hash +/// @file glm/gtx/hash.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_hash GLM_GTX_hash +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add std::hash support for glm types + +#pragma once + +#if defined(GLM_FORCE_MESSAGES) && !defined(GLM_EXT_INCLUDED) +# ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +# else +# pragma message("GLM: GLM_GTX_hash extension included") +# endif +#endif + +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../gtc/vec1.hpp" + +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" + +#include "../mat2x2.hpp" +#include "../mat2x3.hpp" +#include "../mat2x4.hpp" + +#include "../mat3x2.hpp" +#include "../mat3x3.hpp" +#include "../mat3x4.hpp" + +#include "../mat4x2.hpp" +#include "../mat4x3.hpp" +#include "../mat4x4.hpp" + +#if defined(_MSC_VER) + // MSVC uses _MSVC_LANG instead of __cplusplus + #if _MSVC_LANG < 201103L + #pragma message("GLM_GTX_hash requires C++11 standard library support") + #endif +#elif defined(__GNUC__) || defined(__clang__) + // GNU and Clang use __cplusplus + #if __cplusplus < 201103L + #pragma message("GLM_GTX_hash requires C++11 standard library support") + #endif +#else + #error "Unknown compiler" +#endif + +#if GLM_LANG & GLM_LANG_CXX11 +#define GLM_GTX_hash 1 +#include + +namespace std +{ + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::qua const& q) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::tdualquat const& q) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const GLM_NOEXCEPT; + }; + + template + struct hash > + { + GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const GLM_NOEXCEPT; + }; +} // namespace std + +#include "hash.inl" + +#endif //GLM_LANG & GLM_LANG_CXX11 diff --git a/includes/glm/gtx/hash.inl b/includes/glm/gtx/hash.inl new file mode 100644 index 0000000..bcadfe5 --- /dev/null +++ b/includes/glm/gtx/hash.inl @@ -0,0 +1,175 @@ +/// @ref gtx_hash + +namespace glm { +namespace detail +{ + GLM_INLINE void hash_combine(size_t &seed, size_t hash) + { + hash += 0x9e3779b9 + (seed << 6) + (seed >> 2); + seed ^= hash; + } +}} + +namespace std +{ + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<1, T, Q> const& v) const GLM_NOEXCEPT + { + hash hasher; + return hasher(v.x); + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<2, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<3, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + glm::detail::hash_combine(seed, hasher(v.z)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::vec<4, T, Q> const& v) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(v.x)); + glm::detail::hash_combine(seed, hasher(v.y)); + glm::detail::hash_combine(seed, hasher(v.z)); + glm::detail::hash_combine(seed, hasher(v.w)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::qua const& q) const GLM_NOEXCEPT + { + size_t seed = 0; + hash hasher; + glm::detail::hash_combine(seed, hasher(q.x)); + glm::detail::hash_combine(seed, hasher(q.y)); + glm::detail::hash_combine(seed, hasher(q.z)); + glm::detail::hash_combine(seed, hasher(q.w)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::tdualquat const& q) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(q.real)); + glm::detail::hash_combine(seed, hasher(q.dual)); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 2, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 3, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<2, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 2, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 3, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<3, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 2, T,Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 3, T,Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } + + template + GLM_FUNC_QUALIFIER size_t hash >::operator()(glm::mat<4, 4, T, Q> const& m) const GLM_NOEXCEPT + { + size_t seed = 0; + hash > hasher; + glm::detail::hash_combine(seed, hasher(m[0])); + glm::detail::hash_combine(seed, hasher(m[1])); + glm::detail::hash_combine(seed, hasher(m[2])); + glm::detail::hash_combine(seed, hasher(m[3])); + return seed; + } +} diff --git a/includes/glm/gtx/integer.hpp b/includes/glm/gtx/integer.hpp new file mode 100644 index 0000000..2b16830 --- /dev/null +++ b/includes/glm/gtx/integer.hpp @@ -0,0 +1,74 @@ +/// @ref gtx_integer +/// @file glm/gtx/integer.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_integer GLM_GTX_integer +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add support for integer for core functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/integer.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_integer extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_integer + /// @{ + + //! Returns x raised to the y power. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int pow(int x, uint y); + + //! Returns the positive square root of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int sqrt(int x); + + //! Returns the floor log2 of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL unsigned int floor_log2(unsigned int x); + + //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL int mod(int x, int y); + + //! Return the factorial value of a number (!12 max, integer only) + //! From GLM_GTX_integer extension. + template + GLM_FUNC_DECL genType factorial(genType const& x); + + //! 32bit signed integer. + //! From GLM_GTX_integer extension. + typedef signed int sint; + + //! Returns x raised to the y power. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint pow(uint x, uint y); + + //! Returns the positive square root of x. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint sqrt(uint x); + + //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint mod(uint x, uint y); + + //! Returns the number of leading zeros. + //! From GLM_GTX_integer extension. + GLM_FUNC_DECL uint nlz(uint x); + + /// @} +}//namespace glm + +#include "integer.inl" diff --git a/includes/glm/gtx/integer.inl b/includes/glm/gtx/integer.inl new file mode 100644 index 0000000..eb5df30 --- /dev/null +++ b/includes/glm/gtx/integer.inl @@ -0,0 +1,185 @@ +/// @ref gtx_integer + +namespace glm +{ + // pow + GLM_FUNC_QUALIFIER int pow(int x, uint y) + { + if(y == 0) + return x >= 0 ? 1 : -1; + + int result = x; + for(uint i = 1; i < y; ++i) + result *= x; + return result; + } + + // sqrt: From Christopher J. Musial, An integer square root, Graphics Gems, 1990, page 387 + GLM_FUNC_QUALIFIER int sqrt(int x) + { + if(x <= 1) return x; + + int NextTrial = x >> 1; + int CurrentAnswer; + + do + { + CurrentAnswer = NextTrial; + NextTrial = (NextTrial + x / NextTrial) >> 1; + } while(NextTrial < CurrentAnswer); + + return CurrentAnswer; + } + +// Henry Gordon Dietz: http://aggregate.org/MAGIC/ +namespace detail +{ + GLM_FUNC_QUALIFIER unsigned int ones32(unsigned int x) + { + /* 32-bit recursive reduction using SWAR... + but first step is mapping 2-bit values + into sum of 2 1-bit values in sneaky way + */ + x -= ((x >> 1) & 0x55555555); + x = (((x >> 2) & 0x33333333) + (x & 0x33333333)); + x = (((x >> 4) + x) & 0x0f0f0f0f); + x += (x >> 8); + x += (x >> 16); + return(x & 0x0000003f); + } +}//namespace detail + + // Henry Gordon Dietz: http://aggregate.org/MAGIC/ +/* + GLM_FUNC_QUALIFIER unsigned int floor_log2(unsigned int x) + { + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + + return _detail::ones32(x) >> 1; + } +*/ + // mod + GLM_FUNC_QUALIFIER int mod(int x, int y) + { + return ((x % y) + y) % y; + } + + // factorial (!12 max, integer only) + template + GLM_FUNC_QUALIFIER genType factorial(genType const& x) + { + genType Temp = x; + genType Result; + for(Result = 1; Temp > 1; --Temp) + Result *= Temp; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> factorial( + vec<2, T, Q> const& x) + { + return vec<2, T, Q>( + factorial(x.x), + factorial(x.y)); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> factorial( + vec<3, T, Q> const& x) + { + return vec<3, T, Q>( + factorial(x.x), + factorial(x.y), + factorial(x.z)); + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> factorial( + vec<4, T, Q> const& x) + { + return vec<4, T, Q>( + factorial(x.x), + factorial(x.y), + factorial(x.z), + factorial(x.w)); + } + + GLM_FUNC_QUALIFIER uint pow(uint x, uint y) + { + if (y == 0) + return 1u; + + uint result = x; + for(uint i = 1; i < y; ++i) + result *= x; + return result; + } + + GLM_FUNC_QUALIFIER uint sqrt(uint x) + { + if(x <= 1) return x; + + uint NextTrial = x >> 1; + uint CurrentAnswer; + + do + { + CurrentAnswer = NextTrial; + NextTrial = (NextTrial + x / NextTrial) >> 1; + } while(NextTrial < CurrentAnswer); + + return CurrentAnswer; + } + + GLM_FUNC_QUALIFIER uint mod(uint x, uint y) + { + return x - y * (x / y); + } + +//#if(GLM_COMPILER & (GLM_COMPILER_VC | GLM_COMPILER_GCC)) + + GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) + { + return 31u - static_cast(findMSB(x)); + } +/* +#else + + // Hackers Delight: http://www.hackersdelight.org/HDcode/nlz.c.txt + GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) + { + int y, m, n; + + y = -int(x >> 16); // If left half of x is 0, + m = (y >> 16) & 16; // set n = 16. If left half + n = 16 - m; // is nonzero, set n = 0 and + x = x >> m; // shift x right 16. + // Now x is of the form 0000xxxx. + y = x - 0x100; // If positions 8-15 are 0, + m = (y >> 16) & 8; // add 8 to n and shift x left 8. + n = n + m; + x = x << m; + + y = x - 0x1000; // If positions 12-15 are 0, + m = (y >> 16) & 4; // add 4 to n and shift x left 4. + n = n + m; + x = x << m; + + y = x - 0x4000; // If positions 14-15 are 0, + m = (y >> 16) & 2; // add 2 to n and shift x left 2. + n = n + m; + x = x << m; + + y = x >> 14; // Set y = 0, 1, 2, or 3. + m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp. + return unsigned(n + 2 - m); + } + +#endif//(GLM_COMPILER) +*/ +}//namespace glm diff --git a/includes/glm/gtx/intersect.hpp b/includes/glm/gtx/intersect.hpp new file mode 100644 index 0000000..c7aec6f --- /dev/null +++ b/includes/glm/gtx/intersect.hpp @@ -0,0 +1,90 @@ +/// @ref gtx_intersect +/// @file glm/gtx/intersect.hpp +/// +/// @see core (dependence) +/// @see gtx_closest_point (dependence) +/// +/// @defgroup gtx_intersect GLM_GTX_intersect +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add intersection functions + +#pragma once + +// Dependency: +#include +#include +#include "../glm.hpp" +#include "../geometric.hpp" +#include "../gtx/closest_point.hpp" +#include "../gtx/vector_query.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_closest_point extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_intersect + /// @{ + + //! Compute the intersection of a ray and a plane. + //! Ray direction and plane normal must be unit length. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRayPlane( + genType const& orig, genType const& dir, + genType const& planeOrig, genType const& planeNormal, + typename genType::value_type & intersectionDistance); + + //! Compute the intersection of a ray and a triangle. + /// Based om Tomas Möller implementation http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRayTriangle( + vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, + vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2, + vec<2, T, Q>& baryPosition, T& distance); + + //! Compute the intersection of a line and a triangle. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectLineTriangle( + genType const& orig, genType const& dir, + genType const& vert0, genType const& vert1, genType const& vert2, + genType & position); + + //! Compute the intersection distance of a ray and a sphere. + //! The ray direction vector is unit length. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRaySphere( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, typename genType::value_type const sphereRadiusSquared, + typename genType::value_type & intersectionDistance); + + //! Compute the intersection of a ray and a sphere. + //! From GLM_GTX_intersect extension. + template + GLM_FUNC_DECL bool intersectRaySphere( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadius, + genType & intersectionPosition, genType & intersectionNormal); + + //! Compute the intersection of a line and a sphere. + //! From GLM_GTX_intersect extension + template + GLM_FUNC_DECL bool intersectLineSphere( + genType const& point0, genType const& point1, + genType const& sphereCenter, typename genType::value_type sphereRadius, + genType & intersectionPosition1, genType & intersectionNormal1, + genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType()); + + /// @} +}//namespace glm + +#include "intersect.inl" diff --git a/includes/glm/gtx/intersect.inl b/includes/glm/gtx/intersect.inl new file mode 100644 index 0000000..925a903 --- /dev/null +++ b/includes/glm/gtx/intersect.inl @@ -0,0 +1,200 @@ +/// @ref gtx_intersect + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool intersectRayPlane + ( + genType const& orig, genType const& dir, + genType const& planeOrig, genType const& planeNormal, + typename genType::value_type & intersectionDistance + ) + { + typename genType::value_type d = glm::dot(dir, planeNormal); + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + + if(glm::abs(d) > Epsilon) // if dir and planeNormal are not perpendicular + { + typename genType::value_type const tmp_intersectionDistance = glm::dot(planeOrig - orig, planeNormal) / d; + if (tmp_intersectionDistance > static_cast(0)) { // allow only intersections + intersectionDistance = tmp_intersectionDistance; + return true; + } + } + + return false; + } + + template + GLM_FUNC_QUALIFIER bool intersectRayTriangle + ( + vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, + vec<3, T, Q> const& vert0, vec<3, T, Q> const& vert1, vec<3, T, Q> const& vert2, + vec<2, T, Q>& baryPosition, T& distance + ) + { + // find vectors for two edges sharing vert0 + vec<3, T, Q> const edge1 = vert1 - vert0; + vec<3, T, Q> const edge2 = vert2 - vert0; + + // begin calculating determinant - also used to calculate U parameter + vec<3, T, Q> const p = glm::cross(dir, edge2); + + // if determinant is near zero, ray lies in plane of triangle + T const det = glm::dot(edge1, p); + + vec<3, T, Q> Perpendicular(0); + + if (det > static_cast(0)) + { + // calculate distance from vert0 to ray origin + vec<3, T, Q> const dist = orig - vert0; + + // calculate U parameter and test bounds + baryPosition.x = glm::dot(dist, p); + if(baryPosition.x < static_cast(0) || baryPosition.x > det) + return false; + + // prepare to test V parameter + Perpendicular = glm::cross(dist, edge1); + + // calculate V parameter and test bounds + baryPosition.y = glm::dot(dir, Perpendicular); + if((baryPosition.y < static_cast(0)) || ((baryPosition.x + baryPosition.y) > det)) + return false; + } + else if(det < static_cast(0)) + { + // calculate distance from vert0 to ray origin + vec<3, T, Q> const dist = orig - vert0; + + // calculate U parameter and test bounds + baryPosition.x = glm::dot(dist, p); + if((baryPosition.x > static_cast(0)) || (baryPosition.x < det)) + return false; + + // prepare to test V parameter + Perpendicular = glm::cross(dist, edge1); + + // calculate V parameter and test bounds + baryPosition.y = glm::dot(dir, Perpendicular); + if((baryPosition.y > static_cast(0)) || (baryPosition.x + baryPosition.y < det)) + return false; + } + else + return false; // ray is parallel to the plane of the triangle + + T inv_det = static_cast(1) / det; + + // calculate distance, ray intersects triangle + distance = glm::dot(edge2, Perpendicular) * inv_det; + baryPosition *= inv_det; + + return true; + } + + template + GLM_FUNC_QUALIFIER bool intersectLineTriangle + ( + genType const& orig, genType const& dir, + genType const& vert0, genType const& vert1, genType const& vert2, + genType & position + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + + genType edge1 = vert1 - vert0; + genType edge2 = vert2 - vert0; + + genType Perpendicular = cross(dir, edge2); + + typename genType::value_type det = dot(edge1, Perpendicular); + + if (det > -Epsilon && det < Epsilon) + return false; + typename genType::value_type inv_det = typename genType::value_type(1) / det; + + genType Tangent = orig - vert0; + + position.y = dot(Tangent, Perpendicular) * inv_det; + if (position.y < typename genType::value_type(0) || position.y > typename genType::value_type(1)) + return false; + + genType Cotangent = cross(Tangent, edge1); + + position.z = dot(dir, Cotangent) * inv_det; + if (position.z < typename genType::value_type(0) || position.y + position.z > typename genType::value_type(1)) + return false; + + position.x = dot(edge2, Cotangent) * inv_det; + + return true; + } + + template + GLM_FUNC_QUALIFIER bool intersectRaySphere + ( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadiusSquared, + typename genType::value_type & intersectionDistance + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + genType diff = sphereCenter - rayStarting; + typename genType::value_type t0 = dot(diff, rayNormalizedDirection); + typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; + if( dSquared > sphereRadiusSquared ) + { + return false; + } + typename genType::value_type t1 = sqrt( sphereRadiusSquared - dSquared ); + intersectionDistance = t0 > t1 + Epsilon ? t0 - t1 : t0 + t1; + return intersectionDistance > Epsilon; + } + + template + GLM_FUNC_QUALIFIER bool intersectRaySphere + ( + genType const& rayStarting, genType const& rayNormalizedDirection, + genType const& sphereCenter, const typename genType::value_type sphereRadius, + genType & intersectionPosition, genType & intersectionNormal + ) + { + typename genType::value_type distance; + if( intersectRaySphere( rayStarting, rayNormalizedDirection, sphereCenter, sphereRadius * sphereRadius, distance ) ) + { + intersectionPosition = rayStarting + rayNormalizedDirection * distance; + intersectionNormal = (intersectionPosition - sphereCenter) / sphereRadius; + return true; + } + return false; + } + + template + GLM_FUNC_QUALIFIER bool intersectLineSphere + ( + genType const& point0, genType const& point1, + genType const& sphereCenter, typename genType::value_type sphereRadius, + genType & intersectionPoint1, genType & intersectionNormal1, + genType & intersectionPoint2, genType & intersectionNormal2 + ) + { + typename genType::value_type Epsilon = std::numeric_limits::epsilon(); + genType dir = normalize(point1 - point0); + genType diff = sphereCenter - point0; + typename genType::value_type t0 = dot(diff, dir); + typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; + if( dSquared > sphereRadius * sphereRadius ) + { + return false; + } + typename genType::value_type t1 = sqrt( sphereRadius * sphereRadius - dSquared ); + if( t0 < t1 + Epsilon ) + t1 = -t1; + intersectionPoint1 = point0 + dir * (t0 - t1); + intersectionNormal1 = (intersectionPoint1 - sphereCenter) / sphereRadius; + intersectionPoint2 = point0 + dir * (t0 + t1); + intersectionNormal2 = (intersectionPoint2 - sphereCenter) / sphereRadius; + return true; + } +}//namespace glm diff --git a/includes/glm/gtx/io.hpp b/includes/glm/gtx/io.hpp new file mode 100644 index 0000000..5afc8cc --- /dev/null +++ b/includes/glm/gtx/io.hpp @@ -0,0 +1,210 @@ +/// @ref gtx_io +/// @file glm/gtx/io.hpp +/// @author Jan P Springer (regnirpsj@gmail.com) +/// +/// @see core (dependence) +/// @see gtc_matrix_access (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_io GLM_GTX_io +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// std::[w]ostream support for glm types +/// +/// std::[w]ostream support for glm types + qualifier/width/etc. manipulators +/// based on howard hinnant's std::chrono io proposal +/// [http://home.roadrunner.com/~hinnant/bloomington/chrono_io.html] + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/quaternion.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_io extension included") +#endif + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wpadded" +# pragma clang diagnostic ignored "-Wshorten-64-to-32" +# pragma clang diagnostic ignored "-Wglobal-constructors" +#endif + +#include // std::basic_ostream<> (fwd) +#include // std::locale, std::locale::facet, std::locale::id +#include // std::pair<> + +namespace glm +{ + /// @addtogroup gtx_io + /// @{ + + namespace io + { + enum order_type { column_major, row_major}; + + template + class format_punct : public std::locale::facet + { + typedef CTy char_type; + + public: + + static std::locale::id id; + + bool formatted; + unsigned precision; + unsigned width; + char_type separator; + char_type delim_left; + char_type delim_right; + char_type space; + char_type newline; + order_type order; + + GLM_FUNC_DISCARD_DECL explicit format_punct(size_t a = 0); + GLM_FUNC_DISCARD_DECL explicit format_punct(format_punct const&); + }; + + template > + class basic_state_saver { + + public: + + GLM_FUNC_DISCARD_DECL explicit basic_state_saver(std::basic_ios&); + GLM_FUNC_DISCARD_DECL ~basic_state_saver(); + + private: + + typedef ::std::basic_ios state_type; + typedef typename state_type::char_type char_type; + typedef ::std::ios_base::fmtflags flags_type; + typedef ::std::streamsize streamsize_type; + typedef ::std::locale const locale_type; + + state_type& state_; + flags_type flags_; + streamsize_type precision_; + streamsize_type width_; + char_type fill_; + locale_type locale_; + + GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&); + }; + + typedef basic_state_saver state_saver; + typedef basic_state_saver wstate_saver; + + template > + class basic_format_saver + { + public: + + GLM_FUNC_DISCARD_DECL explicit basic_format_saver(std::basic_ios&); + GLM_FUNC_DISCARD_DECL ~basic_format_saver(); + + private: + + basic_state_saver const bss_; + + GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&); + }; + + typedef basic_format_saver format_saver; + typedef basic_format_saver wformat_saver; + + struct precision + { + unsigned value; + + GLM_FUNC_DISCARD_DECL explicit precision(unsigned); + }; + + struct width + { + unsigned value; + + GLM_FUNC_DISCARD_DECL explicit width(unsigned); + }; + + template + struct delimeter + { + CTy value[3]; + + GLM_FUNC_DISCARD_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ','); + }; + + struct order + { + order_type value; + + GLM_FUNC_DISCARD_DECL explicit order(order_type); + }; + + // functions, inlined (inline) + + template + FTy const& get_facet(std::basic_ios&); + template + std::basic_ios& formatted(std::basic_ios&); + template + std::basic_ios& unformatted(std::basic_ios&); + + template + std::basic_ostream& operator<<(std::basic_ostream&, precision const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, width const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, delimeter const&); + template + std::basic_ostream& operator<<(std::basic_ostream&, order const&); + }//namespace io + + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, qua const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<1, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<2, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<3, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<4, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 2, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 3, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 4, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 2, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 3, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 4, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 2, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 3, T, Q> const&); + template + GLM_FUNC_DISCARD_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 4, T, Q> const&); + + template + GLM_FUNC_DISCARD_DECL std::basic_ostream & operator<<(std::basic_ostream &, + std::pair const, mat<4, 4, T, Q> const> const&); + + /// @} +}//namespace glm + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +#endif + +#include "io.inl" diff --git a/includes/glm/gtx/io.inl b/includes/glm/gtx/io.inl new file mode 100644 index 0000000..d4ef825 --- /dev/null +++ b/includes/glm/gtx/io.inl @@ -0,0 +1,452 @@ +/// @ref gtx_io +/// @author Jan P Springer (regnirpsj@gmail.com) + +#include // std::fixed, std::setfill<>, std::setprecision, std::right, std::setw +#include // std::basic_ostream<> +#include "../gtc/matrix_access.hpp" // glm::col, glm::row +#include "../gtx/type_trait.hpp" // glm::type<> + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wpadded" +# pragma clang diagnostic ignored "-Wshorten-64-to-32" +# pragma clang diagnostic ignored "-Wglobal-constructors" +#endif + +namespace glm{ +namespace io +{ + template + GLM_FUNC_QUALIFIER format_punct::format_punct(size_t a) + : std::locale::facet(a) + , formatted(true) + , precision(3) + , width(1 + 4 + 1 + precision) + , separator(',') + , delim_left('[') + , delim_right(']') + , space(' ') + , newline('\n') + , order(column_major) + {} + + template + GLM_FUNC_QUALIFIER format_punct::format_punct(format_punct const& a) + : std::locale::facet(0) + , formatted(a.formatted) + , precision(a.precision) + , width(a.width) + , separator(a.separator) + , delim_left(a.delim_left) + , delim_right(a.delim_right) + , space(a.space) + , newline(a.newline) + , order(a.order) + {} + + template std::locale::id format_punct::id; + + template + GLM_FUNC_QUALIFIER basic_state_saver::basic_state_saver(std::basic_ios& a) + : state_(a) + , flags_(a.flags()) + , precision_(a.precision()) + , width_(a.width()) + , fill_(a.fill()) + , locale_(a.getloc()) + {} + + template + GLM_FUNC_QUALIFIER basic_state_saver::~basic_state_saver() + { + state_.imbue(locale_); + state_.fill(fill_); + state_.width(width_); + state_.precision(precision_); + state_.flags(flags_); + } + + template + GLM_FUNC_QUALIFIER basic_format_saver::basic_format_saver(std::basic_ios& a) + : bss_(a) + { + a.imbue(std::locale(a.getloc(), new format_punct(get_facet >(a)))); + } + + template + GLM_FUNC_QUALIFIER + basic_format_saver::~basic_format_saver() + {} + + GLM_FUNC_QUALIFIER precision::precision(unsigned a) + : value(a) + {} + + GLM_FUNC_QUALIFIER width::width(unsigned a) + : value(a) + {} + + template + GLM_FUNC_QUALIFIER delimeter::delimeter(CTy a, CTy b, CTy c) + : value() + { + value[0] = a; + value[1] = b; + value[2] = c; + } + + GLM_FUNC_QUALIFIER order::order(order_type a) + : value(a) + {} + + template + GLM_FUNC_QUALIFIER FTy const& get_facet(std::basic_ios& ios) + { + if(!std::has_facet(ios.getloc())) + ios.imbue(std::locale(ios.getloc(), new FTy)); + + return std::use_facet(ios.getloc()); + } + + template + GLM_FUNC_QUALIFIER std::basic_ios& formatted(std::basic_ios& ios) + { + const_cast&>(get_facet >(ios)).formatted = true; + return ios; + } + + template + GLM_FUNC_QUALIFIER std::basic_ios& unformatted(std::basic_ios& ios) + { + const_cast&>(get_facet >(ios)).formatted = false; + return ios; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, precision const& a) + { + const_cast&>(get_facet >(os)).precision = a.value; + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, width const& a) + { + const_cast&>(get_facet >(os)).width = a.value; + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, delimeter const& a) + { + format_punct & fmt(const_cast&>(get_facet >(os))); + + fmt.delim_left = a.value[0]; + fmt.delim_right = a.value[1]; + fmt.separator = a.value[2]; + + return os; + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, order const& a) + { + const_cast&>(get_facet >(os)).order = a.value; + return os; + } +} // namespace io + +namespace detail +{ + template + GLM_FUNC_QUALIFIER std::basic_ostream& + print_vector_on(std::basic_ostream& os, V const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + + length_t const& components(type::components); + + if(fmt.formatted) + { + io::basic_state_saver const bss(os); + + os << std::fixed << std::right << std::setprecision(static_cast(fmt.precision)) << std::setfill(fmt.space) << fmt.delim_left; + + for(length_t i(0); i < components; ++i) + { + os << std::setw(static_cast(fmt.width)) << a[i]; + if(components-1 != i) + os << fmt.separator; + } + + os << fmt.delim_right; + } + else + { + for(length_t i(0); i < components; ++i) + { + os << a[i]; + + if(components-1 != i) + os << fmt.space; + } + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, qua const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<1, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<2, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<3, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<4, T, Q> const& a) + { + return detail::print_vector_on(os, a); + } + +namespace detail +{ + template class M, length_t C, length_t R, typename T, qualifier Q> + GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_on(std::basic_ostream& os, M const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + + length_t const& cols(type >::cols); + length_t const& rows(type >::rows); + + if(fmt.formatted) + { + os << fmt.newline << fmt.delim_left; + + switch(fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < rows; ++i) + { + if (0 != i) + os << fmt.space; + + os << row(a, i); + + if(rows-1 != i) + os << fmt.newline; + } + } + break; + + case io::row_major: + { + for(length_t i(0); i < cols; ++i) + { + if(0 != i) + os << fmt.space; + + os << column(a, i); + + if(cols-1 != i) + os << fmt.newline; + } + } + break; + } + + os << fmt.delim_right; + } + else + { + switch (fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < cols; ++i) + { + os << column(a, i); + + if(cols - 1 != i) + os << fmt.space; + } + } + break; + + case io::row_major: + { + for (length_t i(0); i < rows; ++i) + { + os << row(a, i); + + if (rows-1 != i) + os << fmt.space; + } + } + break; + } + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<3, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 2, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 3, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + + template + GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 4, T, Q> const& a) + { + return detail::print_matrix_on(os, a); + } + +namespace detail +{ + template class M, length_t C, length_t R, typename T, qualifier Q> + GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_pair_on(std::basic_ostream& os, std::pair const, M const> const& a) + { + typename std::basic_ostream::sentry const cerberus(os); + + if(cerberus) + { + io::format_punct const& fmt(io::get_facet >(os)); + M const& ml(a.first); + M const& mr(a.second); + length_t const& cols(type >::cols); + length_t const& rows(type >::rows); + + if(fmt.formatted) + { + os << fmt.newline << fmt.delim_left; + + switch(fmt.order) + { + case io::column_major: + { + for(length_t i(0); i < rows; ++i) + { + if(0 != i) + os << fmt.space; + + os << row(ml, i) << ((rows-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << row(mr, i); + + if(rows-1 != i) + os << fmt.newline; + } + } + break; + case io::row_major: + { + for(length_t i(0); i < cols; ++i) + { + if(0 != i) + os << fmt.space; + + os << column(ml, i) << ((cols-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << column(mr, i); + + if(cols-1 != i) + os << fmt.newline; + } + } + break; + } + + os << fmt.delim_right; + } + else + { + os << ml << fmt.space << mr; + } + } + + return os; + } +}//namespace detail + + template + GLM_FUNC_QUALIFIER std::basic_ostream& operator<<( + std::basic_ostream & os, + std::pair const, + mat<4, 4, T, Q> const> const& a) + { + return detail::print_matrix_pair_on(os, a); + } +}//namespace glm + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +#endif + diff --git a/includes/glm/gtx/log_base.hpp b/includes/glm/gtx/log_base.hpp new file mode 100644 index 0000000..915c7a4 --- /dev/null +++ b/includes/glm/gtx/log_base.hpp @@ -0,0 +1,46 @@ +/// @ref gtx_log_base +/// @file glm/gtx/log_base.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_log_base GLM_GTX_log_base +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Logarithm for any base. base can be a vector or a scalar. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_log_base extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_log_base + /// @{ + + /// Logarithm for any base. + /// From GLM_GTX_log_base. + template + GLM_FUNC_DECL genType log( + genType const& x, + genType const& base); + + /// Logarithm for any base. + /// From GLM_GTX_log_base. + template + GLM_FUNC_DECL vec sign( + vec const& x, + vec const& base); + + /// @} +}//namespace glm + +#include "log_base.inl" diff --git a/includes/glm/gtx/log_base.inl b/includes/glm/gtx/log_base.inl new file mode 100644 index 0000000..4bbb8e8 --- /dev/null +++ b/includes/glm/gtx/log_base.inl @@ -0,0 +1,16 @@ +/// @ref gtx_log_base + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType log(genType const& x, genType const& base) + { + return glm::log(x) / glm::log(base); + } + + template + GLM_FUNC_QUALIFIER vec log(vec const& x, vec const& base) + { + return glm::log(x) / glm::log(base); + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_cross_product.hpp b/includes/glm/gtx/matrix_cross_product.hpp new file mode 100644 index 0000000..882a1d7 --- /dev/null +++ b/includes/glm/gtx/matrix_cross_product.hpp @@ -0,0 +1,45 @@ +/// @ref gtx_matrix_cross_product +/// @file glm/gtx/matrix_cross_product.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_matrix_cross_product GLM_GTX_matrix_cross_product +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build cross product matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_cross_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_cross_product extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_cross_product + /// @{ + + //! Build a cross product matrix. + //! From GLM_GTX_matrix_cross_product extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> matrixCross3( + vec<3, T, Q> const& x); + + //! Build a cross product matrix. + //! From GLM_GTX_matrix_cross_product extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> matrixCross4( + vec<3, T, Q> const& x); + + /// @} +}//namespace glm + +#include "matrix_cross_product.inl" diff --git a/includes/glm/gtx/matrix_cross_product.inl b/includes/glm/gtx/matrix_cross_product.inl new file mode 100644 index 0000000..3a15397 --- /dev/null +++ b/includes/glm/gtx/matrix_cross_product.inl @@ -0,0 +1,37 @@ +/// @ref gtx_matrix_cross_product + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> matrixCross3 + ( + vec<3, T, Q> const& x + ) + { + mat<3, 3, T, Q> Result(T(0)); + Result[0][1] = x.z; + Result[1][0] = -x.z; + Result[0][2] = -x.y; + Result[2][0] = x.y; + Result[1][2] = x.x; + Result[2][1] = -x.x; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> matrixCross4 + ( + vec<3, T, Q> const& x + ) + { + mat<4, 4, T, Q> Result(T(0)); + Result[0][1] = x.z; + Result[1][0] = -x.z; + Result[0][2] = -x.y; + Result[2][0] = x.y; + Result[1][2] = x.x; + Result[2][1] = -x.x; + return Result; + } + +}//namespace glm diff --git a/includes/glm/gtx/matrix_decompose.hpp b/includes/glm/gtx/matrix_decompose.hpp new file mode 100644 index 0000000..19ac8a8 --- /dev/null +++ b/includes/glm/gtx/matrix_decompose.hpp @@ -0,0 +1,50 @@ +/// @ref gtx_matrix_decompose +/// @file glm/gtx/matrix_decompose.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_decompose GLM_GTX_matrix_decompose +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Decomposes a model matrix to translations, rotation and scale components + +#pragma once + +// Dependencies +#include "../mat4x4.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../geometric.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtc/matrix_transform.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_decompose is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_decompose extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_decompose + /// @{ + + /// Decomposes a model matrix to translations, rotation and scale components + /// @see gtx_matrix_decompose + template + GLM_FUNC_DISCARD_DECL bool decompose( + mat<4, 4, T, Q> const& modelMatrix, + vec<3, T, Q> & scale, qua & orientation, vec<3, T, Q> & translation, vec<3, T, Q> & skew, vec<4, T, Q> & perspective); + + // Recomposes a model matrix from a previously-decomposed matrix + template + GLM_FUNC_DISCARD_DECL mat<4, 4, T, Q> recompose( + vec<3, T, Q> const& scale, qua const& orientation, vec<3, T, Q> const& translation, + vec<3, T, Q> const& skew, vec<4, T, Q> const& perspective); + + /// @} +}//namespace glm + +#include "matrix_decompose.inl" diff --git a/includes/glm/gtx/matrix_decompose.inl b/includes/glm/gtx/matrix_decompose.inl new file mode 100644 index 0000000..1b587e2 --- /dev/null +++ b/includes/glm/gtx/matrix_decompose.inl @@ -0,0 +1,234 @@ +/// @ref gtx_matrix_decompose + +#include "../gtc/constants.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtx/transform.hpp" + +namespace glm{ +namespace detail +{ + /// Make a linear combination of two vectors and return the result. + // result = (a * ascl) + (b * bscl) + template + GLM_FUNC_QUALIFIER vec<3, T, Q> combine( + vec<3, T, Q> const& a, + vec<3, T, Q> const& b, + T ascl, T bscl) + { + return (a * ascl) + (b * bscl); + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> scale(vec<3, T, Q> const& v, T desiredLength) + { + return v * desiredLength / length(v); + } +}//namespace detail + + // Matrix decompose + // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp + // Decomposes the mode matrix to translations,rotation scale components + + template + GLM_FUNC_QUALIFIER bool decompose(mat<4, 4, T, Q> const& ModelMatrix, vec<3, T, Q> & Scale, qua & Orientation, vec<3, T, Q> & Translation, vec<3, T, Q> & Skew, vec<4, T, Q> & Perspective) + { + mat<4, 4, T, Q> LocalMatrix(ModelMatrix); + + // Normalize the matrix. + if(epsilonEqual(LocalMatrix[3][3], static_cast(0), epsilon())) + return false; + + for(length_t i = 0; i < 4; ++i) + for(length_t j = 0; j < 4; ++j) + LocalMatrix[i][j] /= LocalMatrix[3][3]; + + // perspectiveMatrix is used to solve for perspective, but it also provides + // an easy way to test for singularity of the upper 3x3 component. + mat<4, 4, T, Q> PerspectiveMatrix(LocalMatrix); + + for(length_t i = 0; i < 3; i++) + PerspectiveMatrix[i][3] = static_cast(0); + PerspectiveMatrix[3][3] = static_cast(1); + + /// TODO: Fixme! + if(epsilonEqual(determinant(PerspectiveMatrix), static_cast(0), epsilon())) + return false; + + // First, isolate perspective. This is the messiest. + if( + epsilonNotEqual(LocalMatrix[0][3], static_cast(0), epsilon()) || + epsilonNotEqual(LocalMatrix[1][3], static_cast(0), epsilon()) || + epsilonNotEqual(LocalMatrix[2][3], static_cast(0), epsilon())) + { + // rightHandSide is the right hand side of the equation. + vec<4, T, Q> RightHandSide; + RightHandSide[0] = LocalMatrix[0][3]; + RightHandSide[1] = LocalMatrix[1][3]; + RightHandSide[2] = LocalMatrix[2][3]; + RightHandSide[3] = LocalMatrix[3][3]; + + // Solve the equation by inverting PerspectiveMatrix and multiplying + // rightHandSide by the inverse. (This is the easiest way, not + // necessarily the best.) + mat<4, 4, T, Q> InversePerspectiveMatrix = glm::inverse(PerspectiveMatrix);// inverse(PerspectiveMatrix, inversePerspectiveMatrix); + mat<4, 4, T, Q> TransposedInversePerspectiveMatrix = glm::transpose(InversePerspectiveMatrix);// transposeMatrix4(inversePerspectiveMatrix, transposedInversePerspectiveMatrix); + + Perspective = TransposedInversePerspectiveMatrix * RightHandSide; + // v4MulPointByMatrix(rightHandSide, transposedInversePerspectiveMatrix, perspectivePoint); + + // Clear the perspective partition + LocalMatrix[0][3] = LocalMatrix[1][3] = LocalMatrix[2][3] = static_cast(0); + LocalMatrix[3][3] = static_cast(1); + } + else + { + // No perspective. + Perspective = vec<4, T, Q>(0, 0, 0, 1); + } + + // Next take care of translation (easy). + Translation = vec<3, T, Q>(LocalMatrix[3]); + LocalMatrix[3] = vec<4, T, Q>(0, 0, 0, LocalMatrix[3].w); + + vec<3, T, Q> Row[3], Pdum3; + + // Now get scale and shear. + for(length_t i = 0; i < 3; ++i) + for(length_t j = 0; j < 3; ++j) + Row[i][j] = LocalMatrix[i][j]; + + // Compute X scale factor and normalize first row. + Scale.x = length(Row[0]);// v3Length(Row[0]); + + Row[0] = detail::scale(Row[0], static_cast(1)); + + // Compute XY shear factor and make 2nd row orthogonal to 1st. + Skew.z = dot(Row[0], Row[1]); + Row[1] = detail::combine(Row[1], Row[0], static_cast(1), -Skew.z); + + // Now, compute Y scale and normalize 2nd row. + Scale.y = length(Row[1]); + Row[1] = detail::scale(Row[1], static_cast(1)); + Skew.z /= Scale.y; + + // Compute XZ and YZ shears, orthogonalize 3rd row. + Skew.y = glm::dot(Row[0], Row[2]); + Row[2] = detail::combine(Row[2], Row[0], static_cast(1), -Skew.y); + Skew.x = glm::dot(Row[1], Row[2]); + Row[2] = detail::combine(Row[2], Row[1], static_cast(1), -Skew.x); + + // Next, get Z scale and normalize 3rd row. + Scale.z = length(Row[2]); + Row[2] = detail::scale(Row[2], static_cast(1)); + Skew.y /= Scale.z; + Skew.x /= Scale.z; + + // At this point, the matrix (in rows[]) is orthonormal. + // Check for a coordinate system flip. If the determinant + // is -1, then negate the matrix and the scaling factors. + Pdum3 = cross(Row[1], Row[2]); // v3Cross(row[1], row[2], Pdum3); + if(dot(Row[0], Pdum3) < 0) + { + for(length_t i = 0; i < 3; i++) + { + Scale[i] *= static_cast(-1); + Row[i] *= static_cast(-1); + } + } + + // Now, get the rotations out, as described in the gem. + + // FIXME - Add the ability to return either quaternions (which are + // easier to recompose with) or Euler angles (rx, ry, rz), which + // are easier for authors to deal with. The latter will only be useful + // when we fix https://bugs.webkit.org/show_bug.cgi?id=23799, so I + // will leave the Euler angle code here for now. + + // ret.rotateY = asin(-Row[0][2]); + // if (cos(ret.rotateY) != 0) { + // ret.rotateX = atan2(Row[1][2], Row[2][2]); + // ret.rotateZ = atan2(Row[0][1], Row[0][0]); + // } else { + // ret.rotateX = atan2(-Row[2][0], Row[1][1]); + // ret.rotateZ = 0; + // } + + int i, j, k = 0; + T root, trace = Row[0].x + Row[1].y + Row[2].z; + if(trace > static_cast(0)) + { + root = sqrt(trace + static_cast(1.0)); + Orientation.w = static_cast(0.5) * root; + root = static_cast(0.5) / root; + Orientation.x = root * (Row[1].z - Row[2].y); + Orientation.y = root * (Row[2].x - Row[0].z); + Orientation.z = root * (Row[0].y - Row[1].x); + } // End if > 0 + else + { + static int Next[3] = {1, 2, 0}; + i = 0; + if(Row[1].y > Row[0].x) i = 1; + if(Row[2].z > Row[i][i]) i = 2; + j = Next[i]; + k = Next[j]; + +# ifdef GLM_FORCE_QUAT_DATA_WXYZ + int off = 1; +# else + int off = 0; +# endif + + root = sqrt(Row[i][i] - Row[j][j] - Row[k][k] + static_cast(1.0)); + + Orientation[i + off] = static_cast(0.5) * root; + root = static_cast(0.5) / root; + Orientation[j + off] = root * (Row[i][j] + Row[j][i]); + Orientation[k + off] = root * (Row[i][k] + Row[k][i]); + Orientation.w = root * (Row[j][k] - Row[k][j]); + } // End if <= 0 + + return true; + } + + // Recomposes a model matrix from a previously-decomposed matrix + // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp + // https://stackoverflow.com/a/75573092/1047040 + template + GLM_FUNC_DECL mat<4, 4, T, Q> recompose( + vec<3, T, Q> const& scale, qua const& orientation, vec<3, T, Q> const& translation, + vec<3, T, Q> const& skew, vec<4, T, Q> const& perspective) + { + glm::mat4 m = glm::mat4(1.f); + + m[0][3] = perspective.x; + m[1][3] = perspective.y; + m[2][3] = perspective.z; + m[3][3] = perspective.w; + + m *= glm::translate(translation); + m *= glm::mat4_cast(orientation); + + if (abs(skew.x) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[2][1] = skew.x; + m *= tmp; + } + + if (abs(skew.y) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[2][0] = skew.y; + m *= tmp; + } + + if (abs(skew.z) > static_cast(0)) { + glm::mat4 tmp(1.f); + tmp[1][0] = skew.z; + m *= tmp; + } + + m *= glm::scale(scale); + + return m; + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_factorisation.hpp b/includes/glm/gtx/matrix_factorisation.hpp new file mode 100644 index 0000000..dc32847 --- /dev/null +++ b/includes/glm/gtx/matrix_factorisation.hpp @@ -0,0 +1,67 @@ +/// @ref gtx_matrix_factorisation +/// @file glm/gtx/matrix_factorisation.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_factorisation GLM_GTX_matrix_factorisation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions to factor matrices in various forms + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_factorisation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_factorisation extension included") +#endif + +/* +Suggestions: + - Move helper functions flipud and fliplr to another file: They may be helpful in more general circumstances. + - Implement other types of matrix factorisation, such as: QL and LQ, L(D)U, eigendecompositions, etc... +*/ + +namespace glm +{ + /// @addtogroup gtx_matrix_factorisation + /// @{ + + /// Flips the matrix rows up and down. + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL mat flipud(mat const& in); + + /// Flips the matrix columns right and left. + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DECL mat fliplr(mat const& in); + + /// Performs QR factorisation of a matrix. + /// Returns 2 matrices, q and r, such that the columns of q are orthonormal and span the same subspace than those of the input matrix, r is an upper triangular matrix, and q*r=in. + /// Given an n-by-m input matrix, q has dimensions min(n,m)-by-m, and r has dimensions n-by-min(n,m). + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DISCARD_DECL void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r); + + /// Performs RQ factorisation of a matrix. + /// Returns 2 matrices, r and q, such that r is an upper triangular matrix, the rows of q are orthonormal and span the same subspace than those of the input matrix, and r*q=in. + /// Note that in the context of RQ factorisation, the diagonal is seen as starting in the lower-right corner of the matrix, instead of the usual upper-left. + /// Given an n-by-m input matrix, r has dimensions min(n,m)-by-m, and q has dimensions n-by-min(n,m). + /// + /// From GLM_GTX_matrix_factorisation extension. + template + GLM_FUNC_DISCARD_DECL void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q); + + /// @} +} + +#include "matrix_factorisation.inl" diff --git a/includes/glm/gtx/matrix_factorisation.inl b/includes/glm/gtx/matrix_factorisation.inl new file mode 100644 index 0000000..6f1683c --- /dev/null +++ b/includes/glm/gtx/matrix_factorisation.inl @@ -0,0 +1,84 @@ +/// @ref gtx_matrix_factorisation + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat flipud(mat const& in) + { + mat tin = transpose(in); + tin = fliplr(tin); + mat out = transpose(tin); + + return out; + } + + template + GLM_FUNC_QUALIFIER mat fliplr(mat const& in) + { + mat out; + for (length_t i = 0; i < C; i++) + { + out[i] = in[(C - i) - 1]; + } + + return out; + } + + template + GLM_FUNC_QUALIFIER void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r) + { + // Uses modified Gram-Schmidt method + // Source: https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process + // And https://en.wikipedia.org/wiki/QR_decomposition + + //For all the linearly independs columns of the input... + // (there can be no more linearly independents columns than there are rows.) + for (length_t i = 0; i < (C < R ? C : R); i++) + { + //Copy in Q the input's i-th column. + q[i] = in[i]; + + //j = [0,i[ + // Make that column orthogonal to all the previous ones by substracting to it the non-orthogonal projection of all the previous columns. + // Also: Fill the zero elements of R + for (length_t j = 0; j < i; j++) + { + q[i] -= dot(q[i], q[j])*q[j]; + r[j][i] = 0; + } + + //Now, Q i-th column is orthogonal to all the previous columns. Normalize it. + q[i] = normalize(q[i]); + + //j = [i,C[ + //Finally, compute the corresponding coefficients of R by computing the projection of the resulting column on the other columns of the input. + for (length_t j = i; j < C; j++) + { + r[j][i] = dot(in[j], q[i]); + } + } + } + + template + GLM_FUNC_QUALIFIER void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q) + { + // From https://en.wikipedia.org/wiki/QR_decomposition: + // The RQ decomposition transforms a matrix A into the product of an upper triangular matrix R (also known as right-triangular) and an orthogonal matrix Q. The only difference from QR decomposition is the order of these matrices. + // QR decomposition is Gram-Schmidt orthogonalization of columns of A, started from the first column. + // RQ decomposition is Gram-Schmidt orthogonalization of rows of A, started from the last row. + + mat tin = transpose(in); + tin = fliplr(tin); + + mat tr; + mat<(C < R ? C : R), C, T, Q> tq; + qr_decompose(tin, tq, tr); + + tr = fliplr(tr); + r = transpose(tr); + r = fliplr(r); + + tq = fliplr(tq); + q = transpose(tq); + } +} //namespace glm diff --git a/includes/glm/gtx/matrix_interpolation.hpp b/includes/glm/gtx/matrix_interpolation.hpp new file mode 100644 index 0000000..e2767c8 --- /dev/null +++ b/includes/glm/gtx/matrix_interpolation.hpp @@ -0,0 +1,58 @@ +/// @ref gtx_matrix_interpolation +/// @file glm/gtx/matrix_interpolation.hpp +/// @author Ghenadii Ursachi (the.asteroth@gmail.com) +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_interpolation GLM_GTX_matrix_interpolation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Allows to directly interpolate two matrices. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_interpolation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_interpolation extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_interpolation + /// @{ + + /// Get the axis and angle of the rotation from a matrix. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DISCARD_DECL void axisAngle( + mat<4, 4, T, Q> const& Mat, vec<3, T, Q> & Axis, T & Angle); + + /// Build a matrix from axis and angle. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> axisAngleMatrix( + vec<3, T, Q> const& Axis, T const Angle); + + /// Extracts the rotation part of a matrix. + /// From GLM_GTX_matrix_interpolation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> extractMatrixRotation( + mat<4, 4, T, Q> const& Mat); + + /// Build a interpolation of 4 * 4 matrixes. + /// From GLM_GTX_matrix_interpolation extension. + /// Warning! works only with rotation and/or translation matrixes, scale will generate unexpected results. + template + GLM_FUNC_DECL mat<4, 4, T, Q> interpolate( + mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const Delta); + + /// @} +}//namespace glm + +#include "matrix_interpolation.inl" diff --git a/includes/glm/gtx/matrix_interpolation.inl b/includes/glm/gtx/matrix_interpolation.inl new file mode 100644 index 0000000..f4ba3a6 --- /dev/null +++ b/includes/glm/gtx/matrix_interpolation.inl @@ -0,0 +1,146 @@ +/// @ref gtx_matrix_interpolation + +#include "../ext/scalar_constants.hpp" + +#include + +namespace glm +{ + template + GLM_FUNC_QUALIFIER void axisAngle(mat<4, 4, T, Q> const& m, vec<3, T, Q>& axis, T& angle) + { + T const epsilon = + std::numeric_limits::epsilon() * static_cast(1e2); + + bool const nearSymmetrical = + abs(m[1][0] - m[0][1]) < epsilon && + abs(m[2][0] - m[0][2]) < epsilon && + abs(m[2][1] - m[1][2]) < epsilon; + + if(nearSymmetrical) + { + bool const nearIdentity = + abs(m[1][0] + m[0][1]) < epsilon && + abs(m[2][0] + m[0][2]) < epsilon && + abs(m[2][1] + m[1][2]) < epsilon && + abs(m[0][0] + m[1][1] + m[2][2] - T(3.0)) < epsilon; + if (nearIdentity) + { + angle = static_cast(0.0); + axis = vec<3, T, Q>( + static_cast(1.0), static_cast(0.0), static_cast(0.0)); + return; + } + angle = pi(); + T xx = (m[0][0] + static_cast(1.0)) * static_cast(0.5); + T yy = (m[1][1] + static_cast(1.0)) * static_cast(0.5); + T zz = (m[2][2] + static_cast(1.0)) * static_cast(0.5); + T xy = (m[1][0] + m[0][1]) * static_cast(0.25); + T xz = (m[2][0] + m[0][2]) * static_cast(0.25); + T yz = (m[2][1] + m[1][2]) * static_cast(0.25); + if((xx > yy) && (xx > zz)) + { + if(xx < epsilon) + { + axis.x = static_cast(0.0); + axis.y = static_cast(0.7071); + axis.z = static_cast(0.7071); + } + else + { + axis.x = sqrt(xx); + axis.y = xy / axis.x; + axis.z = xz / axis.x; + } + } + else if (yy > zz) + { + if(yy < epsilon) + { + axis.x = static_cast(0.7071); + axis.y = static_cast(0.0); + axis.z = static_cast(0.7071); + } + else + { + axis.y = sqrt(yy); + axis.x = xy / axis.y; + axis.z = yz / axis.y; + } + } + else + { + if (zz < epsilon) + { + axis.x = static_cast(0.7071); + axis.y = static_cast(0.7071); + axis.z = static_cast(0.0); + } + else + { + axis.z = sqrt(zz); + axis.x = xz / axis.z; + axis.y = yz / axis.z; + } + } + return; + } + + T const angleCos = (m[0][0] + m[1][1] + m[2][2] - static_cast(1)) * static_cast(0.5); + if(angleCos >= static_cast(1.0)) + { + angle = static_cast(0.0); + } + else if (angleCos <= static_cast(-1.0)) + { + angle = pi(); + } + else + { + angle = acos(angleCos); + } + + axis = glm::normalize(glm::vec<3, T, Q>( + m[1][2] - m[2][1], m[2][0] - m[0][2], m[0][1] - m[1][0])); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> axisAngleMatrix(vec<3, T, Q> const& axis, T const angle) + { + T c = cos(angle); + T s = sin(angle); + T t = static_cast(1) - c; + vec<3, T, Q> n = normalize(axis); + + return mat<4, 4, T, Q>( + t * n.x * n.x + c, t * n.x * n.y + n.z * s, t * n.x * n.z - n.y * s, static_cast(0.0), + t * n.x * n.y - n.z * s, t * n.y * n.y + c, t * n.y * n.z + n.x * s, static_cast(0.0), + t * n.x * n.z + n.y * s, t * n.y * n.z - n.x * s, t * n.z * n.z + c, static_cast(0.0), + static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> extractMatrixRotation(mat<4, 4, T, Q> const& m) + { + return mat<4, 4, T, Q>( + m[0][0], m[0][1], m[0][2], static_cast(0.0), + m[1][0], m[1][1], m[1][2], static_cast(0.0), + m[2][0], m[2][1], m[2][2], static_cast(0.0), + static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> interpolate(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const delta) + { + mat<4, 4, T, Q> m1rot = extractMatrixRotation(m1); + mat<4, 4, T, Q> dltRotation = m2 * transpose(m1rot); + vec<3, T, Q> dltAxis; + T dltAngle; + axisAngle(dltRotation, dltAxis, dltAngle); + mat<4, 4, T, Q> out = axisAngleMatrix(dltAxis, dltAngle * delta) * m1rot; + out[3][0] = m1[3][0] + delta * (m2[3][0] - m1[3][0]); + out[3][1] = m1[3][1] + delta * (m2[3][1] - m1[3][1]); + out[3][2] = m1[3][2] + delta * (m2[3][2] - m1[3][2]); + return out; + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_major_storage.hpp b/includes/glm/gtx/matrix_major_storage.hpp new file mode 100644 index 0000000..f518578 --- /dev/null +++ b/includes/glm/gtx/matrix_major_storage.hpp @@ -0,0 +1,117 @@ +/// @ref gtx_matrix_major_storage +/// @file glm/gtx/matrix_major_storage.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_matrix_major_storage GLM_GTX_matrix_major_storage +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build matrices with specific matrix order, row or column + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_major_storage is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_major_storage extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_major_storage + /// @{ + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( + mat<2, 2, T, Q> const& m); + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( + mat<3, 3, T, Q> const& m); + + //! Build a row major matrix from row vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( + vec<4, T, Q> const& v1, + vec<4, T, Q> const& v2, + vec<4, T, Q> const& v3, + vec<4, T, Q> const& v4); + + //! Build a row major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( + mat<4, 4, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( + mat<2, 2, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( + mat<3, 3, T, Q> const& m); + + //! Build a column major matrix from column vectors. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( + vec<4, T, Q> const& v1, + vec<4, T, Q> const& v2, + vec<4, T, Q> const& v3, + vec<4, T, Q> const& v4); + + //! Build a column major matrix from other matrix. + //! From GLM_GTX_matrix_major_storage extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( + mat<4, 4, T, Q> const& m); + + /// @} +}//namespace glm + +#include "matrix_major_storage.inl" diff --git a/includes/glm/gtx/matrix_major_storage.inl b/includes/glm/gtx/matrix_major_storage.inl new file mode 100644 index 0000000..279dd34 --- /dev/null +++ b/includes/glm/gtx/matrix_major_storage.inl @@ -0,0 +1,166 @@ +/// @ref gtx_matrix_major_storage + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2 + ( + vec<2, T, Q> const& v1, + vec<2, T, Q> const& v2 + ) + { + mat<2, 2, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2( + const mat<2, 2, T, Q>& m) + { + mat<2, 2, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( + const vec<3, T, Q>& v1, + const vec<3, T, Q>& v2, + const vec<3, T, Q>& v3) + { + mat<3, 3, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[2][0] = v1.z; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + Result[2][1] = v2.z; + Result[0][2] = v3.x; + Result[1][2] = v3.y; + Result[2][2] = v3.z; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( + const mat<3, 3, T, Q>& m) + { + mat<3, 3, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( + const vec<4, T, Q>& v1, + const vec<4, T, Q>& v2, + const vec<4, T, Q>& v3, + const vec<4, T, Q>& v4) + { + mat<4, 4, T, Q> Result; + Result[0][0] = v1.x; + Result[1][0] = v1.y; + Result[2][0] = v1.z; + Result[3][0] = v1.w; + Result[0][1] = v2.x; + Result[1][1] = v2.y; + Result[2][1] = v2.z; + Result[3][1] = v2.w; + Result[0][2] = v3.x; + Result[1][2] = v3.y; + Result[2][2] = v3.z; + Result[3][2] = v3.w; + Result[0][3] = v4.x; + Result[1][3] = v4.y; + Result[2][3] = v4.z; + Result[3][3] = v4.w; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( + const mat<4, 4, T, Q>& m) + { + mat<4, 4, T, Q> Result; + Result[0][0] = m[0][0]; + Result[0][1] = m[1][0]; + Result[0][2] = m[2][0]; + Result[0][3] = m[3][0]; + Result[1][0] = m[0][1]; + Result[1][1] = m[1][1]; + Result[1][2] = m[2][1]; + Result[1][3] = m[3][1]; + Result[2][0] = m[0][2]; + Result[2][1] = m[1][2]; + Result[2][2] = m[2][2]; + Result[2][3] = m[3][2]; + Result[3][0] = m[0][3]; + Result[3][1] = m[1][3]; + Result[3][2] = m[2][3]; + Result[3][3] = m[3][3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( + const vec<2, T, Q>& v1, + const vec<2, T, Q>& v2) + { + return mat<2, 2, T, Q>(v1, v2); + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( + const mat<2, 2, T, Q>& m) + { + return mat<2, 2, T, Q>(m); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( + const vec<3, T, Q>& v1, + const vec<3, T, Q>& v2, + const vec<3, T, Q>& v3) + { + return mat<3, 3, T, Q>(v1, v2, v3); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( + const mat<3, 3, T, Q>& m) + { + return mat<3, 3, T, Q>(m); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( + const vec<4, T, Q>& v1, + const vec<4, T, Q>& v2, + const vec<4, T, Q>& v3, + const vec<4, T, Q>& v4) + { + return mat<4, 4, T, Q>(v1, v2, v3, v4); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( + const mat<4, 4, T, Q>& m) + { + return mat<4, 4, T, Q>(m); + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_operation.hpp b/includes/glm/gtx/matrix_operation.hpp new file mode 100644 index 0000000..07ed8e8 --- /dev/null +++ b/includes/glm/gtx/matrix_operation.hpp @@ -0,0 +1,101 @@ +/// @ref gtx_matrix_operation +/// @file glm/gtx/matrix_operation.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_operation GLM_GTX_matrix_operation +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Build diagonal matrices from vectors. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_operation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_operation extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_operation + /// @{ + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> diagonal2x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 3, T, Q> diagonal2x3( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 4, T, Q> diagonal2x4( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 2, T, Q> diagonal3x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> diagonal3x3( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 4, T, Q> diagonal3x4( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 2, T, Q> diagonal4x2( + vec<2, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 3, T, Q> diagonal4x3( + vec<3, T, Q> const& v); + + //! Build a diagonal matrix. + //! From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> diagonal4x4( + vec<4, T, Q> const& v); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m); + + /// Build an adjugate matrix. + /// From GLM_GTX_matrix_operation extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m); + + /// @} +}//namespace glm + +#include "matrix_operation.inl" diff --git a/includes/glm/gtx/matrix_operation.inl b/includes/glm/gtx/matrix_operation.inl new file mode 100644 index 0000000..a4f4a85 --- /dev/null +++ b/includes/glm/gtx/matrix_operation.inl @@ -0,0 +1,176 @@ +/// @ref gtx_matrix_operation + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> diagonal2x2 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 3, T, Q> diagonal2x3 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 4, T, Q> diagonal2x4 + ( + vec<2, T, Q> const& v + ) + { + mat<2, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 2, T, Q> diagonal3x2 + ( + vec<2, T, Q> const& v + ) + { + mat<3, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> diagonal3x3 + ( + vec<3, T, Q> const& v + ) + { + mat<3, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 4, T, Q> diagonal3x4 + ( + vec<3, T, Q> const& v + ) + { + mat<3, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> diagonal4x4 + ( + vec<4, T, Q> const& v + ) + { + mat<4, 4, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + Result[3][3] = v[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 3, T, Q> diagonal4x3 + ( + vec<3, T, Q> const& v + ) + { + mat<4, 3, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + Result[2][2] = v[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 2, T, Q> diagonal4x2 + ( + vec<2, T, Q> const& v + ) + { + mat<4, 2, T, Q> Result(static_cast(1)); + Result[0][0] = v[0]; + Result[1][1] = v[1]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m) + { + return mat<2, 2, T, Q>( + +m[1][1], -m[0][1], + -m[1][0], +m[0][0]); + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m) + { + T const m00 = determinant(mat<2, 2, T, Q>(m[1][1], m[2][1], m[1][2], m[2][2])); + T const m01 = determinant(mat<2, 2, T, Q>(m[0][1], m[2][1], m[0][2], m[2][2])); + T const m02 = determinant(mat<2, 2, T, Q>(m[0][1], m[1][1], m[0][2], m[1][2])); + + T const m10 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][2], m[2][2])); + T const m11 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][2], m[2][2])); + T const m12 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][2], m[1][2])); + + T const m20 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][1], m[2][1])); + T const m21 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][1], m[2][1])); + T const m22 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][1], m[1][1])); + + return mat<3, 3, T, Q>( + +m00, -m01, +m02, + -m10, +m11, -m12, + +m20, -m21, +m22); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m) + { + T const m00 = determinant(mat<3, 3, T, Q>(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); + T const m01 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); + T const m02 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); + T const m03 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); + + T const m10 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); + T const m11 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); + T const m12 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); + T const m13 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); + + T const m20 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3])); + T const m21 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3])); + T const m22 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3])); + T const m23 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2])); + + T const m30 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3])); + T const m31 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3])); + T const m32 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3])); + T const m33 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2])); + + return mat<4, 4, T, Q>( + +m00, -m10, +m20, -m30, + -m01, +m11, -m21, +m31, + +m02, -m12, +m22, -m32, + -m03, +m13, -m23, +m33); + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_query.hpp b/includes/glm/gtx/matrix_query.hpp new file mode 100644 index 0000000..de8c655 --- /dev/null +++ b/includes/glm/gtx/matrix_query.hpp @@ -0,0 +1,75 @@ +/// @ref gtx_matrix_query +/// @file glm/gtx/matrix_query.hpp +/// +/// @see core (dependence) +/// @see gtx_vector_query (dependence) +/// +/// @defgroup gtx_matrix_query GLM_GTX_matrix_query +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Query to evaluate matrix properties + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/vector_query.hpp" +#include + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_query extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_query + /// @{ + + /// Return whether a matrix a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a null matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is an identity matrix. + /// From GLM_GTX_matrix_query extension. + template class matType> + GLM_FUNC_DECL bool isIdentity(matType const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is a normalized matrix. + /// From GLM_GTX_matrix_query extension. + template + GLM_FUNC_DECL bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon); + + /// Return whether a matrix is an orthonormalized matrix. + /// From GLM_GTX_matrix_query extension. + template class matType> + GLM_FUNC_DECL bool isOrthogonal(matType const& m, T const& epsilon); + + /// @} +}//namespace glm + +#include "matrix_query.inl" diff --git a/includes/glm/gtx/matrix_query.inl b/includes/glm/gtx/matrix_query.inl new file mode 100644 index 0000000..dc3ec84 --- /dev/null +++ b/includes/glm/gtx/matrix_query.inl @@ -0,0 +1,119 @@ +/// @ref gtx_matrix_query + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length() ; ++i) + result = isNull(m[i], epsilon); + return result; + } + + template + GLM_FUNC_QUALIFIER bool isIdentity(mat const& m, T const& epsilon) + { + bool result = true; + for(length_t i = 0; result && i < m.length(); ++i) + { + for(length_t j = 0; result && j < glm::min(i, m[0].length()); ++j) + result = abs(m[i][j]) <= epsilon; + if(result && i < m[0].length()) + result = abs(m[i][i] - 1) <= epsilon; + for(length_t j = i + 1; result && j < m[0].length(); ++j) + result = abs(m[i][j]) <= epsilon; + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<2, 2, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<3, 3, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon) + { + bool result(true); + for(length_t i = 0; result && i < m.length(); ++i) + result = isNormalized(m[i], epsilon); + for(length_t i = 0; result && i < m.length(); ++i) + { + typename mat<4, 4, T, Q>::col_type v; + for(length_t j = 0; j < m.length(); ++j) + v[j] = m[j][i]; + result = isNormalized(v, epsilon); + } + return result; + } + + template + GLM_FUNC_QUALIFIER bool isOrthogonal(mat const& m, T const& epsilon) + { + bool result = true; + for(length_t i(0); result && i < m.length(); ++i) + { + result = isNormalized(m[i], epsilon); + for(length_t j(i + 1); result && j < m.length(); ++j) + result = abs(dot(m[i], m[j])) <= epsilon; + } + + if(result) + { + mat tmp = transpose(m); + for(length_t i(0); result && i < m.length(); ++i) + { + result = isNormalized(tmp[i], epsilon); + for(length_t j(i + 1); result && j < m.length(); ++j) + result = abs(dot(tmp[i], tmp[j])) <= epsilon; + } + } + return result; + } +}//namespace glm diff --git a/includes/glm/gtx/matrix_transform_2d.hpp b/includes/glm/gtx/matrix_transform_2d.hpp new file mode 100644 index 0000000..deb8da2 --- /dev/null +++ b/includes/glm/gtx/matrix_transform_2d.hpp @@ -0,0 +1,79 @@ +/// @ref gtx_matrix_transform_2d +/// @file glm/gtx/matrix_transform_2d.hpp +/// @author Miguel Ángel Pérez Martínez +/// +/// @see core (dependence) +/// +/// @defgroup gtx_matrix_transform_2d GLM_GTX_matrix_transform_2d +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines functions that generate common 2d transformation matrices. + +#pragma once + +// Dependency: +#include "../mat3x3.hpp" +#include "../vec2.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_matrix_transform_2d is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_matrix_transform_2d extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_matrix_transform_2d + /// @{ + + /// Builds a translation 3 * 3 matrix created from a vector of 2 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a translation vector. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v); + + /// Builds a rotation 3 * 3 matrix created from an angle. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param angle Rotation angle expressed in radians. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( + mat<3, 3, T, Q> const& m, + T angle); + + /// Builds a scale 3 * 3 matrix created from a vector of 2 components. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param v Coordinates of a scale vector. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v); + + /// Builds an horizontal (parallel to the x axis) shear 3 * 3 matrix. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param y Shear factor. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( + mat<3, 3, T, Q> const& m, + T y); + + /// Builds a vertical (parallel to the y axis) shear 3 * 3 matrix. + /// + /// @param m Input matrix multiplied by this translation matrix. + /// @param x Shear factor. + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( + mat<3, 3, T, Q> const& m, + T x); + + /// @} +}//namespace glm + +#include "matrix_transform_2d.inl" diff --git a/includes/glm/gtx/matrix_transform_2d.inl b/includes/glm/gtx/matrix_transform_2d.inl new file mode 100644 index 0000000..a68d24d --- /dev/null +++ b/includes/glm/gtx/matrix_transform_2d.inl @@ -0,0 +1,68 @@ +/// @ref gtx_matrix_transform_2d +/// @author Miguel Ángel Pérez Martínez + +#include "../trigonometric.hpp" + +namespace glm +{ + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v) + { + mat<3, 3, T, Q> Result(m); + Result[2] = m[0] * v[0] + m[1] * v[1] + m[2]; + return Result; + } + + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( + mat<3, 3, T, Q> const& m, + T angle) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + mat<3, 3, T, Q> Result; + Result[0] = m[0] * c + m[1] * s; + Result[1] = m[0] * -s + m[1] * c; + Result[2] = m[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( + mat<3, 3, T, Q> const& m, + vec<2, T, Q> const& v) + { + mat<3, 3, T, Q> Result; + Result[0] = m[0] * v[0]; + Result[1] = m[1] * v[1]; + Result[2] = m[2]; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( + mat<3, 3, T, Q> const& m, + T y) + { + mat<3, 3, T, Q> Result(1); + Result[0][1] = y; + return m * Result; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( + mat<3, 3, T, Q> const& m, + T x) + { + mat<3, 3, T, Q> Result(1); + Result[1][0] = x; + return m * Result; + } + +}//namespace glm diff --git a/includes/glm/gtx/mixed_product.hpp b/includes/glm/gtx/mixed_product.hpp new file mode 100644 index 0000000..a091274 --- /dev/null +++ b/includes/glm/gtx/mixed_product.hpp @@ -0,0 +1,39 @@ +/// @ref gtx_mixed_product +/// @file glm/gtx/mixed_product.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_mixed_product GLM_GTX_mixed_producte +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Mixed product of 3 vectors. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_mixed_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_mixed_product extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_mixed_product + /// @{ + + /// @brief Mixed product of 3 vectors (from GLM_GTX_mixed_product extension) + template + GLM_FUNC_DECL T mixedProduct( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3); + + /// @} +}// namespace glm + +#include "mixed_product.inl" diff --git a/includes/glm/gtx/mixed_product.inl b/includes/glm/gtx/mixed_product.inl new file mode 100644 index 0000000..e5cdbdb --- /dev/null +++ b/includes/glm/gtx/mixed_product.inl @@ -0,0 +1,15 @@ +/// @ref gtx_mixed_product + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T mixedProduct + ( + vec<3, T, Q> const& v1, + vec<3, T, Q> const& v2, + vec<3, T, Q> const& v3 + ) + { + return dot(cross(v1, v2), v3); + } +}//namespace glm diff --git a/includes/glm/gtx/norm.hpp b/includes/glm/gtx/norm.hpp new file mode 100644 index 0000000..ba6958b --- /dev/null +++ b/includes/glm/gtx/norm.hpp @@ -0,0 +1,85 @@ +/// @ref gtx_norm +/// @file glm/gtx/norm.hpp +/// +/// @see core (dependence) +/// @see gtx_quaternion (dependence) +/// @see gtx_component_wise (dependence) +/// +/// @defgroup gtx_norm GLM_GTX_norm +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Various ways to compute vector norms. + +#pragma once + +// Dependency: +#include "../geometric.hpp" +#include "../gtx/component_wise.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_norm is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_norm extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_norm + /// @{ + + /// Returns the squared length of x. + /// From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T length2(vec const& x); + + /// Returns the squared distance between p0 and p1, i.e., length2(p0 - p1). + /// From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T distance2(vec const& p0, vec const& p1); + + //! Returns the L1 norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the L1 norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& v); + + //! Returns the L2 norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the L2 norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x); + + //! Returns the L norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth); + + //! Returns the L norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, unsigned int Depth); + + //! Returns the LMax norm between x and y. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + //! Returns the LMax norm of v. + //! From GLM_GTX_norm extension. + template + GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x); + + /// @} +}//namespace glm + +#include "norm.inl" diff --git a/includes/glm/gtx/norm.inl b/includes/glm/gtx/norm.inl new file mode 100644 index 0000000..4a9f796 --- /dev/null +++ b/includes/glm/gtx/norm.inl @@ -0,0 +1,95 @@ +/// @ref gtx_norm + +#include "../detail/qualifier.hpp" + +namespace glm{ +namespace detail +{ + template + struct compute_length2 + { + GLM_FUNC_QUALIFIER static T call(vec const& v) + { + return dot(v, v); + } + }; +}//namespace detail + + template + GLM_FUNC_QUALIFIER genType length2(genType x) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'length2' accepts only floating-point inputs"); + return x * x; + } + + template + GLM_FUNC_QUALIFIER T length2(vec const& v) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'length2' accepts only floating-point inputs"); + return detail::compute_length2::value>::call(v); + } + + template + GLM_FUNC_QUALIFIER T distance2(T p0, T p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'distance2' accepts only floating-point inputs"); + return length2(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T distance2(vec const& p0, vec const& p1) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'distance2' accepts only floating-point inputs"); + return length2(p1 - p0); + } + + template + GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return abs(b.x - a.x) + abs(b.y - a.y) + abs(b.z - a.z); + } + + template + GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& v) + { + return abs(v.x) + abs(v.y) + abs(v.z); + } + + template + GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b + ) + { + return length(b - a); + } + + template + GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& v) + { + return length(v); + } + + template + GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth) + { + return pow(pow(abs(y.x - x.x), T(Depth)) + pow(abs(y.y - x.y), T(Depth)) + pow(abs(y.z - x.z), T(Depth)), T(1) / T(Depth)); + } + + template + GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& v, unsigned int Depth) + { + return pow(pow(abs(v.x), T(Depth)) + pow(abs(v.y), T(Depth)) + pow(abs(v.z), T(Depth)), T(1) / T(Depth)); + } + + template + GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) + { + return compMax(abs(b - a)); + } + + template + GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& v) + { + return compMax(abs(v)); + } + +}//namespace glm diff --git a/includes/glm/gtx/normal.hpp b/includes/glm/gtx/normal.hpp new file mode 100644 index 0000000..8b3a4b5 --- /dev/null +++ b/includes/glm/gtx/normal.hpp @@ -0,0 +1,39 @@ +/// @ref gtx_normal +/// @file glm/gtx/normal.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_normal GLM_GTX_normal +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Compute the normal of a triangle. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_normal is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_normal extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_normal + /// @{ + + /// Computes triangle normal from triangle points. + /// + /// @see gtx_normal + template + GLM_FUNC_DECL vec<3, T, Q> triangleNormal(vec<3, T, Q> const& p1, vec<3, T, Q> const& p2, vec<3, T, Q> const& p3); + + /// @} +}//namespace glm + +#include "normal.inl" diff --git a/includes/glm/gtx/normal.inl b/includes/glm/gtx/normal.inl new file mode 100644 index 0000000..74f9fc9 --- /dev/null +++ b/includes/glm/gtx/normal.inl @@ -0,0 +1,15 @@ +/// @ref gtx_normal + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> triangleNormal + ( + vec<3, T, Q> const& p1, + vec<3, T, Q> const& p2, + vec<3, T, Q> const& p3 + ) + { + return normalize(cross(p1 - p2, p1 - p3)); + } +}//namespace glm diff --git a/includes/glm/gtx/normalize_dot.hpp b/includes/glm/gtx/normalize_dot.hpp new file mode 100644 index 0000000..04a6b08 --- /dev/null +++ b/includes/glm/gtx/normalize_dot.hpp @@ -0,0 +1,47 @@ +/// @ref gtx_normalize_dot +/// @file glm/gtx/normalize_dot.hpp +/// +/// @see core (dependence) +/// @see gtx_fast_square_root (dependence) +/// +/// @defgroup gtx_normalize_dot GLM_GTX_normalize_dot +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Dot product of vectors that need to be normalize with a single square root. + +#pragma once + +// Dependency: +#include "../gtx/fast_square_root.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_normalize_dot is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_normalize_dot extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_normalize_dot + /// @{ + + /// Normalize parameters and returns the dot product of x and y. + /// It's faster that dot(normalize(x), normalize(y)). + /// + /// @see gtx_normalize_dot extension. + template + GLM_FUNC_DECL T normalizeDot(vec const& x, vec const& y); + + /// Normalize parameters and returns the dot product of x and y. + /// Faster that dot(fastNormalize(x), fastNormalize(y)). + /// + /// @see gtx_normalize_dot extension. + template + GLM_FUNC_DECL T fastNormalizeDot(vec const& x, vec const& y); + + /// @} +}//namespace glm + +#include "normalize_dot.inl" diff --git a/includes/glm/gtx/normalize_dot.inl b/includes/glm/gtx/normalize_dot.inl new file mode 100644 index 0000000..7bcd9a5 --- /dev/null +++ b/includes/glm/gtx/normalize_dot.inl @@ -0,0 +1,16 @@ +/// @ref gtx_normalize_dot + +namespace glm +{ + template + GLM_FUNC_QUALIFIER T normalizeDot(vec const& x, vec const& y) + { + return glm::dot(x, y) * glm::inversesqrt(glm::dot(x, x) * glm::dot(y, y)); + } + + template + GLM_FUNC_QUALIFIER T fastNormalizeDot(vec const& x, vec const& y) + { + return glm::dot(x, y) * glm::fastInverseSqrt(glm::dot(x, x) * glm::dot(y, y)); + } +}//namespace glm diff --git a/includes/glm/gtx/number_precision.hpp b/includes/glm/gtx/number_precision.hpp new file mode 100644 index 0000000..5b9663e --- /dev/null +++ b/includes/glm/gtx/number_precision.hpp @@ -0,0 +1,44 @@ +/// @ref gtx_number_precision +/// @file glm/gtx/number_precision.hpp +/// +/// @see core (dependence) +/// @see gtc_type_precision (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_number_precision GLM_GTX_number_precision +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defined size types. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/type_precision.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_number_precision is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_number_precision extension included") +#endif + +namespace glm{ + ///////////////////////////// + // Unsigned int vector types + + /// @addtogroup gtx_number_precision + /// @{ + + ////////////////////// + // Float matrix types + + typedef f32 f32mat1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f32 f32mat1x1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f64 f64mat1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + typedef f64 f64mat1x1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) + + /// @} +}//namespace glm + diff --git a/includes/glm/gtx/optimum_pow.hpp b/includes/glm/gtx/optimum_pow.hpp new file mode 100644 index 0000000..ac34e7e --- /dev/null +++ b/includes/glm/gtx/optimum_pow.hpp @@ -0,0 +1,50 @@ +/// @ref gtx_optimum_pow +/// @file glm/gtx/optimum_pow.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_optimum_pow GLM_GTX_optimum_pow +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Integer exponentiation of power functions. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_optimum_pow is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_optimum_pow extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_optimum_pow + /// @{ + + /// Returns x raised to the power of 2. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow2(genType const& x); + + /// Returns x raised to the power of 3. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow3(genType const& x); + + /// Returns x raised to the power of 4. + /// + /// @see gtx_optimum_pow + template + GLM_FUNC_DECL genType pow4(genType const& x); + + /// @} +}//namespace glm + +#include "optimum_pow.inl" diff --git a/includes/glm/gtx/optimum_pow.inl b/includes/glm/gtx/optimum_pow.inl new file mode 100644 index 0000000..a26c19c --- /dev/null +++ b/includes/glm/gtx/optimum_pow.inl @@ -0,0 +1,22 @@ +/// @ref gtx_optimum_pow + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType pow2(genType const& x) + { + return x * x; + } + + template + GLM_FUNC_QUALIFIER genType pow3(genType const& x) + { + return x * x * x; + } + + template + GLM_FUNC_QUALIFIER genType pow4(genType const& x) + { + return (x * x) * (x * x); + } +}//namespace glm diff --git a/includes/glm/gtx/orthonormalize.hpp b/includes/glm/gtx/orthonormalize.hpp new file mode 100644 index 0000000..801b755 --- /dev/null +++ b/includes/glm/gtx/orthonormalize.hpp @@ -0,0 +1,47 @@ +/// @ref gtx_orthonormalize +/// @file glm/gtx/orthonormalize.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_orthonormalize GLM_GTX_orthonormalize +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Orthonormalize matrices. + +#pragma once + +// Dependency: +#include "../vec3.hpp" +#include "../mat3x3.hpp" +#include "../geometric.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_orthonormalize is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_orthonormalize extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_orthonormalize + /// @{ + + /// Returns the orthonormalized matrix of m. + /// + /// @see gtx_orthonormalize + template + GLM_FUNC_DECL mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m); + + /// Orthonormalizes x according y. + /// + /// @see gtx_orthonormalize + template + GLM_FUNC_DECL vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y); + + /// @} +}//namespace glm + +#include "orthonormalize.inl" diff --git a/includes/glm/gtx/orthonormalize.inl b/includes/glm/gtx/orthonormalize.inl new file mode 100644 index 0000000..cb553ba --- /dev/null +++ b/includes/glm/gtx/orthonormalize.inl @@ -0,0 +1,29 @@ +/// @ref gtx_orthonormalize + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m) + { + mat<3, 3, T, Q> r = m; + + r[0] = normalize(r[0]); + + T d0 = dot(r[0], r[1]); + r[1] -= r[0] * d0; + r[1] = normalize(r[1]); + + T d1 = dot(r[1], r[2]); + d0 = dot(r[0], r[2]); + r[2] -= r[0] * d0 + r[1] * d1; + r[2] = normalize(r[2]); + + return r; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y) + { + return normalize(x - y * dot(y, x)); + } +}//namespace glm diff --git a/includes/glm/gtx/pca.hpp b/includes/glm/gtx/pca.hpp new file mode 100644 index 0000000..26f9aec --- /dev/null +++ b/includes/glm/gtx/pca.hpp @@ -0,0 +1,112 @@ +/// @ref gtx_pca +/// @file glm/gtx/pca.hpp +/// +/// @see core (dependence) +/// @see ext_scalar_relational (dependence) +/// +/// @defgroup gtx_pca GLM_GTX_pca +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Implements functions required for fundamental 'princple component analysis' in 2D, 3D, and 4D: +/// 1) Computing a covariance matrics from a list of _relative_ position vectors +/// 2) Compute the eigenvalues and eigenvectors of the covariance matrics +/// This is useful, e.g., to compute an object-aligned bounding box from vertices of an object. +/// https://en.wikipedia.org/wiki/Principal_component_analysis +/// +/// Example: +/// ``` +/// std::vector ptData; +/// // ... fill ptData with some point data, e.g. vertices +/// +/// glm::dvec3 center = computeCenter(ptData); +/// +/// glm::dmat3 covarMat = glm::computeCovarianceMatrix(ptData.data(), ptData.size(), center); +/// +/// glm::dvec3 evals; +/// glm::dmat3 evecs; +/// int evcnt = glm::findEigenvaluesSymReal(covarMat, evals, evecs); +/// +/// if(evcnt != 3) +/// // ... error handling +/// +/// glm::sortEigenvalues(evals, evecs); +/// +/// // ... now evecs[0] points in the direction (symmetric) of the largest spatial distribution within ptData +/// ``` + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/scalar_relational.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_pca is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_pca extension included") +#endif + +namespace glm { + /// @addtogroup gtx_pca + /// @{ + + /// Compute a covariance matrix form an array of relative coordinates `v` (e.g., relative to the center of gravity of the object) + /// @param v Points to a memory holding `n` times vectors + /// @param n Number of points in v + template + GLM_INLINE mat computeCovarianceMatrix(vec const* v, size_t n); + + /// Compute a covariance matrix form an array of absolute coordinates `v` and a precomputed center of gravity `c` + /// @param v Points to a memory holding `n` times vectors + /// @param n Number of points in v + /// @param c Precomputed center of gravity + template + GLM_INLINE mat computeCovarianceMatrix(vec const* v, size_t n, vec const& c); + + /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with relative coordinates (e.g., relative to the center of gravity of the object) + /// Dereferencing an iterator of type I must yield a `vec<D, T, Q%gt;` + template + GLM_FUNC_DECL mat computeCovarianceMatrix(I const& b, I const& e); + + /// Compute a covariance matrix form a pair of iterators `b` (begin) and `e` (end) of a container with absolute coordinates and a precomputed center of gravity `c` + /// Dereferencing an iterator of type I must yield a `vec<D, T, Q%gt;` + template + GLM_FUNC_DECL mat computeCovarianceMatrix(I const& b, I const& e, vec const& c); + + /// Assuming the provided covariance matrix `covarMat` is symmetric and real-valued, this function find the `D` Eigenvalues of the matrix, and also provides the corresponding Eigenvectors. + /// Note: the data in `outEigenvalues` and `outEigenvectors` are in matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + /// This is a numeric implementation to find the Eigenvalues, using 'QL decomposition` (variant of QR decomposition: https://en.wikipedia.org/wiki/QR_decomposition). + /// + /// @param[in] covarMat A symmetric, real-valued covariance matrix, e.g. computed from computeCovarianceMatrix + /// @param[out] outEigenvalues Vector to receive the found eigenvalues + /// @param[out] outEigenvectors Matrix to receive the found eigenvectors corresponding to the found eigenvalues, as column vectors + /// @return The number of eigenvalues found, usually D if the precondition of the covariance matrix is met. + template + GLM_FUNC_DECL unsigned int findEigenvaluesSymReal + ( + mat const& covarMat, + vec& outEigenvalues, + mat& outEigenvectors + ); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DISCARD_DECL void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DISCARD_DECL void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors); + + /// Sorts a group of Eigenvalues&Eigenvectors, for largest Eigenvalue to smallest Eigenvalue. + /// The data in `outEigenvalues` and `outEigenvectors` are assumed to be matching order, i.e. `outEigenvector[i]` is the Eigenvector of the Eigenvalue `outEigenvalue[i]`. + template + GLM_FUNC_DISCARD_DECL void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors); + + /// @} +}//namespace glm + +#include "pca.inl" diff --git a/includes/glm/gtx/pca.inl b/includes/glm/gtx/pca.inl new file mode 100644 index 0000000..94cae94 --- /dev/null +++ b/includes/glm/gtx/pca.inl @@ -0,0 +1,343 @@ +/// @ref gtx_pca + +#ifndef GLM_HAS_CXX11_STL +#include +#else +#include +#endif + +namespace glm { + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(vec const* v, size_t n) + { + return computeCovarianceMatrix const*>(v, v + n); + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(vec const* v, size_t n, vec const& c) + { + return computeCovarianceMatrix const*>(v, v + n, c); + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(I const& b, I const& e) + { + glm::mat m(0); + + size_t cnt = 0; + for(I i = b; i != e; i++) + { + vec const& v = *i; + for(length_t x = 0; x < D; ++x) + for(length_t y = 0; y < D; ++y) + m[x][y] += static_cast(v[x] * v[y]); + cnt++; + } + if(cnt > 0) + m /= static_cast(cnt); + + return m; + } + + + template + GLM_FUNC_QUALIFIER mat computeCovarianceMatrix(I const& b, I const& e, vec const& c) + { + glm::mat m(0); + glm::vec v; + + size_t cnt = 0; + for(I i = b; i != e; i++) + { + v = *i - c; + for(length_t x = 0; x < D; ++x) + for(length_t y = 0; y < D; ++y) + m[x][y] += static_cast(v[x] * v[y]); + cnt++; + } + if(cnt > 0) + m /= static_cast(cnt); + + return m; + } + + namespace _internal_ + { + + template + GLM_FUNC_QUALIFIER static T transferSign(T const& v, T const& s) + { + return ((s) >= 0 ? glm::abs(v) : -glm::abs(v)); + } + + template + GLM_FUNC_QUALIFIER static T pythag(T const& a, T const& b) { + static const T epsilon = static_cast(0.0000001); + T absa = glm::abs(a); + T absb = glm::abs(b); + if(absa > absb) { + absb /= absa; + absb *= absb; + return absa * glm::sqrt(static_cast(1) + absb); + } + if(glm::equal(absb, 0, epsilon)) return static_cast(0); + absa /= absb; + absa *= absa; + return absb * glm::sqrt(static_cast(1) + absa); + } + + } + + template + GLM_FUNC_QUALIFIER unsigned int findEigenvaluesSymReal + ( + mat const& covarMat, + vec& outEigenvalues, + mat& outEigenvectors + ) + { + using _internal_::transferSign; + using _internal_::pythag; + + T a[D * D]; // matrix -- input and workspace for algorithm (will be changed inplace) + T d[D]; // diagonal elements + T e[D]; // off-diagonal elements + + for(length_t r = 0; r < D; r++) + for(length_t c = 0; c < D; c++) + a[(r) * D + (c)] = covarMat[c][r]; + + // 1. Householder reduction. + length_t l, k, j, i; + T scale, hh, h, g, f; + static const T epsilon = static_cast(0.0000001); + + for(i = D; i >= 2; i--) + { + l = i - 1; + h = scale = 0; + if(l > 1) + { + for(k = 1; k <= l; k++) + { + scale += glm::abs(a[(i - 1) * D + (k - 1)]); + } + if(glm::equal(scale, 0, epsilon)) + { + e[i - 1] = a[(i - 1) * D + (l - 1)]; + } + else + { + for(k = 1; k <= l; k++) + { + a[(i - 1) * D + (k - 1)] /= scale; + h += a[(i - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)]; + } + f = a[(i - 1) * D + (l - 1)]; + g = ((f >= 0) ? -glm::sqrt(h) : glm::sqrt(h)); + e[i - 1] = scale * g; + h -= f * g; + a[(i - 1) * D + (l - 1)] = f - g; + f = 0; + for(j = 1; j <= l; j++) + { + a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] / h; + g = 0; + for(k = 1; k <= j; k++) + { + g += a[(j - 1) * D + (k - 1)] * a[(i - 1) * D + (k - 1)]; + } + for(k = j + 1; k <= l; k++) + { + g += a[(k - 1) * D + (j - 1)] * a[(i - 1) * D + (k - 1)]; + } + e[j - 1] = g / h; + f += e[j - 1] * a[(i - 1) * D + (j - 1)]; + } + hh = f / (h + h); + for(j = 1; j <= l; j++) + { + f = a[(i - 1) * D + (j - 1)]; + e[j - 1] = g = e[j - 1] - hh * f; + for(k = 1; k <= j; k++) + { + a[(j - 1) * D + (k - 1)] -= (f * e[k - 1] + g * a[(i - 1) * D + (k - 1)]); + } + } + } + } + else + { + e[i - 1] = a[(i - 1) * D + (l - 1)]; + } + d[i - 1] = h; + } + d[0] = 0; + e[0] = 0; + for(i = 1; i <= D; i++) + { + l = i - 1; + if(!glm::equal(d[i - 1], 0, epsilon)) + { + for(j = 1; j <= l; j++) + { + g = 0; + for(k = 1; k <= l; k++) + { + g += a[(i - 1) * D + (k - 1)] * a[(k - 1) * D + (j - 1)]; + } + for(k = 1; k <= l; k++) + { + a[(k - 1) * D + (j - 1)] -= g * a[(k - 1) * D + (i - 1)]; + } + } + } + d[i - 1] = a[(i - 1) * D + (i - 1)]; + a[(i - 1) * D + (i - 1)] = 1; + for(j = 1; j <= l; j++) + { + a[(j - 1) * D + (i - 1)] = a[(i - 1) * D + (j - 1)] = 0; + } + } + + // 2. Calculation of eigenvalues and eigenvectors (QL algorithm) + length_t m, iter; + T s, r, p, dd, c, b; + const length_t MAX_ITER = 30; + + for(i = 2; i <= D; i++) + { + e[i - 2] = e[i - 1]; + } + e[D - 1] = 0; + + for(l = 1; l <= D; l++) + { + iter = 0; + do + { + for(m = l; m <= D - 1; m++) + { + dd = glm::abs(d[m - 1]) + glm::abs(d[m - 1 + 1]); + if(glm::equal(glm::abs(e[m - 1]) + dd, dd, epsilon)) + break; + } + if(m != l) + { + if(iter++ == MAX_ITER) + { + return 0; // Too many iterations in FindEigenvalues + } + g = (d[l - 1 + 1] - d[l - 1]) / (2 * e[l - 1]); + r = pythag(g, 1); + g = d[m - 1] - d[l - 1] + e[l - 1] / (g + transferSign(r, g)); + s = c = 1; + p = 0; + for(i = m - 1; i >= l; i--) + { + f = s * e[i - 1]; + b = c * e[i - 1]; + e[i - 1 + 1] = r = pythag(f, g); + if(glm::equal(r, 0, epsilon)) + { + d[i - 1 + 1] -= p; + e[m - 1] = 0; + break; + } + s = f / r; + c = g / r; + g = d[i - 1 + 1] - p; + r = (d[i - 1] - g) * s + 2 * c * b; + d[i - 1 + 1] = g + (p = s * r); + g = c * r - b; + for(k = 1; k <= D; k++) + { + f = a[(k - 1) * D + (i - 1 + 1)]; + a[(k - 1) * D + (i - 1 + 1)] = s * a[(k - 1) * D + (i - 1)] + c * f; + a[(k - 1) * D + (i - 1)] = c * a[(k - 1) * D + (i - 1)] - s * f; + } + } + if(glm::equal(r, 0, epsilon) && (i >= l)) + continue; + d[l - 1] -= p; + e[l - 1] = g; + e[m - 1] = 0; + } + } while(m != l); + } + + // 3. output + for(i = 0; i < D; i++) + outEigenvalues[i] = d[i]; + for(i = 0; i < D; i++) + for(j = 0; j < D; j++) + outEigenvectors[i][j] = a[(j) * D + (i)]; + + return D; + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<2, T, Q>& eigenvalues, mat<2, 2, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<3, T, Q>& eigenvalues, mat<3, 3, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + if (eigenvalues[0] < eigenvalues[2]) + { + std::swap(eigenvalues[0], eigenvalues[2]); + std::swap(eigenvectors[0], eigenvectors[2]); + } + if (eigenvalues[1] < eigenvalues[2]) + { + std::swap(eigenvalues[1], eigenvalues[2]); + std::swap(eigenvectors[1], eigenvectors[2]); + } + } + + template + GLM_FUNC_QUALIFIER void sortEigenvalues(vec<4, T, Q>& eigenvalues, mat<4, 4, T, Q>& eigenvectors) + { + if (eigenvalues[0] < eigenvalues[2]) + { + std::swap(eigenvalues[0], eigenvalues[2]); + std::swap(eigenvectors[0], eigenvectors[2]); + } + if (eigenvalues[1] < eigenvalues[3]) + { + std::swap(eigenvalues[1], eigenvalues[3]); + std::swap(eigenvectors[1], eigenvectors[3]); + } + if (eigenvalues[0] < eigenvalues[1]) + { + std::swap(eigenvalues[0], eigenvalues[1]); + std::swap(eigenvectors[0], eigenvectors[1]); + } + if (eigenvalues[2] < eigenvalues[3]) + { + std::swap(eigenvalues[2], eigenvalues[3]); + std::swap(eigenvectors[2], eigenvectors[3]); + } + if (eigenvalues[1] < eigenvalues[2]) + { + std::swap(eigenvalues[1], eigenvalues[2]); + std::swap(eigenvectors[1], eigenvectors[2]); + } + } + +}//namespace glm diff --git a/includes/glm/gtx/perpendicular.hpp b/includes/glm/gtx/perpendicular.hpp new file mode 100644 index 0000000..4087ab0 --- /dev/null +++ b/includes/glm/gtx/perpendicular.hpp @@ -0,0 +1,39 @@ +/// @ref gtx_perpendicular +/// @file glm/gtx/perpendicular.hpp +/// +/// @see core (dependence) +/// @see gtx_projection (dependence) +/// +/// @defgroup gtx_perpendicular GLM_GTX_perpendicular +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Perpendicular of a vector from other one + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/projection.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_perpendicular is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_perpendicular extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_perpendicular + /// @{ + + //! Projects x a perpendicular axis of Normal. + //! From GLM_GTX_perpendicular extension. + template + GLM_FUNC_DECL genType perp(genType const& x, genType const& Normal); + + /// @} +}//namespace glm + +#include "perpendicular.inl" diff --git a/includes/glm/gtx/perpendicular.inl b/includes/glm/gtx/perpendicular.inl new file mode 100644 index 0000000..1e72f33 --- /dev/null +++ b/includes/glm/gtx/perpendicular.inl @@ -0,0 +1,10 @@ +/// @ref gtx_perpendicular + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType perp(genType const& x, genType const& Normal) + { + return x - proj(x, Normal); + } +}//namespace glm diff --git a/includes/glm/gtx/polar_coordinates.hpp b/includes/glm/gtx/polar_coordinates.hpp new file mode 100644 index 0000000..c27aacf --- /dev/null +++ b/includes/glm/gtx/polar_coordinates.hpp @@ -0,0 +1,46 @@ +/// @ref gtx_polar_coordinates +/// @file glm/gtx/polar_coordinates.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_polar_coordinates GLM_GTX_polar_coordinates +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Conversion from Euclidean space to polar space and revert. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_polar_coordinates is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_polar_coordinates extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_polar_coordinates + /// @{ + + /// Convert Euclidean to Polar coordinates, x is the latitude, y the longitude and z the xz distance. + /// + /// @see gtx_polar_coordinates + template + GLM_FUNC_DECL vec<3, T, Q> polar( + vec<3, T, Q> const& euclidean); + + /// Convert Polar to Euclidean coordinates. + /// + /// @see gtx_polar_coordinates + template + GLM_FUNC_DECL vec<3, T, Q> euclidean( + vec<2, T, Q> const& polar); + + /// @} +}//namespace glm + +#include "polar_coordinates.inl" diff --git a/includes/glm/gtx/polar_coordinates.inl b/includes/glm/gtx/polar_coordinates.inl new file mode 100644 index 0000000..371c8dd --- /dev/null +++ b/includes/glm/gtx/polar_coordinates.inl @@ -0,0 +1,36 @@ +/// @ref gtx_polar_coordinates + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> polar + ( + vec<3, T, Q> const& euclidean + ) + { + T const Length(length(euclidean)); + vec<3, T, Q> const tmp(euclidean / Length); + T const xz_dist(sqrt(tmp.x * tmp.x + tmp.z * tmp.z)); + + return vec<3, T, Q>( + asin(tmp.y), // latitude + atan(tmp.x, tmp.z), // longitude + xz_dist); // xz distance + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> euclidean + ( + vec<2, T, Q> const& polar + ) + { + T const latitude(polar.x); + T const longitude(polar.y); + + return vec<3, T, Q>( + cos(latitude) * sin(longitude), + sin(latitude), + cos(latitude) * cos(longitude)); + } + +}//namespace glm diff --git a/includes/glm/gtx/projection.hpp b/includes/glm/gtx/projection.hpp new file mode 100644 index 0000000..a438f39 --- /dev/null +++ b/includes/glm/gtx/projection.hpp @@ -0,0 +1,41 @@ +/// @ref gtx_projection +/// @file glm/gtx/projection.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_projection GLM_GTX_projection +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Projection of a vector to other one + +#pragma once + +// Dependency: +#include "../geometric.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_projection is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_projection extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_projection + /// @{ + + /// Projects x on Normal. + /// + /// @param[in] x A vector to project + /// @param[in] Normal A normal that doesn't need to be of unit length. + /// + /// @see gtx_projection + template + GLM_FUNC_DECL genType proj(genType const& x, genType const& Normal); + + /// @} +}//namespace glm + +#include "projection.inl" diff --git a/includes/glm/gtx/projection.inl b/includes/glm/gtx/projection.inl new file mode 100644 index 0000000..f23f884 --- /dev/null +++ b/includes/glm/gtx/projection.inl @@ -0,0 +1,10 @@ +/// @ref gtx_projection + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType proj(genType const& x, genType const& Normal) + { + return glm::dot(x, Normal) / glm::dot(Normal, Normal) * Normal; + } +}//namespace glm diff --git a/includes/glm/gtx/quaternion.hpp b/includes/glm/gtx/quaternion.hpp new file mode 100644 index 0000000..f51c521 --- /dev/null +++ b/includes/glm/gtx/quaternion.hpp @@ -0,0 +1,172 @@ +/// @ref gtx_quaternion +/// @file glm/gtx/quaternion.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_quaternion GLM_GTX_quaternion +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extended quaternion types and functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/constants.hpp" +#include "../gtc/quaternion.hpp" +#include "../ext/quaternion_exponential.hpp" +#include "../gtx/norm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_quaternion extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_quaternion + /// @{ + + /// Create an identity quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR qua quat_identity(); + + /// Compute a cross product between a quaternion and a vector. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross( + qua const& q, + vec<3, T, Q> const& v); + + //! Compute a cross product between a vector and a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> cross( + vec<3, T, Q> const& v, + qua const& q); + + //! Compute a point on a path according squad equation. + //! q1 and q2 are control points; s1 and s2 are intermediate control points. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua squad( + qua const& q1, + qua const& q2, + qua const& s1, + qua const& s2, + T const& h); + + //! Returns an intermediate control point for squad interpolation. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua intermediate( + qua const& prev, + qua const& curr, + qua const& next); + + //! Returns quarternion square root. + /// + /// @see gtx_quaternion + //template + //qua sqrt( + // qua const& q); + + //! Rotates a 3 components vector by a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL vec<3, T, Q> rotate( + qua const& q, + vec<3, T, Q> const& v); + + /// Rotates a 4 components vector by a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL vec<4, T, Q> rotate( + qua const& q, + vec<4, T, Q> const& v); + + /// Extract the real component of a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL T extractRealComponent( + qua const& q); + + /// Converts a quaternion to a 3 * 3 matrix. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> toMat3( + qua const& x){return mat3_cast(x);} + + /// Converts a quaternion to a 4 * 4 matrix. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> toMat4( + qua const& x){return mat4_cast(x);} + + /// Converts a 3 * 3 matrix to a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER qua toQuat( + mat<3, 3, T, Q> const& x){return quat_cast(x);} + + /// Converts a 4 * 4 matrix to a quaternion. + /// + /// @see gtx_quaternion + template + GLM_FUNC_QUALIFIER qua toQuat( + mat<4, 4, T, Q> const& x){return quat_cast(x);} + + /// Quaternion interpolation using the rotation short path. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua shortMix( + qua const& x, + qua const& y, + T const& a); + + /// Quaternion normalized linear interpolation. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua fastMix( + qua const& x, + qua const& y, + T const& a); + + /// Compute the rotation between two vectors. + /// @param orig vector, needs to be normalized + /// @param dest vector, needs to be normalized + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL qua rotation( + vec<3, T, Q> const& orig, + vec<3, T, Q> const& dest); + + /// Returns the squared length of x. + /// + /// @see gtx_quaternion + template + GLM_FUNC_DECL GLM_CONSTEXPR T length2(qua const& q); + + /// @} +}//namespace glm + +#include "quaternion.inl" diff --git a/includes/glm/gtx/quaternion.inl b/includes/glm/gtx/quaternion.inl new file mode 100644 index 0000000..5e18899 --- /dev/null +++ b/includes/glm/gtx/quaternion.inl @@ -0,0 +1,159 @@ +/// @ref gtx_quaternion + +#include +#include "../gtc/constants.hpp" + +namespace glm +{ + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua quat_identity() + { + return qua::wxyz(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(vec<3, T, Q> const& v, qua const& q) + { + return inverse(q) * v; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> cross(qua const& q, vec<3, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER qua squad + ( + qua const& q1, + qua const& q2, + qua const& s1, + qua const& s2, + T const& h) + { + return mix(mix(q1, q2, h), mix(s1, s2, h), static_cast(2) * (static_cast(1) - h) * h); + } + + template + GLM_FUNC_QUALIFIER qua intermediate + ( + qua const& prev, + qua const& curr, + qua const& next + ) + { + qua invQuat = inverse(curr); + return exp((log(next * invQuat) + log(prev * invQuat)) / static_cast(-4)) * curr; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotate(qua const& q, vec<3, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotate(qua const& q, vec<4, T, Q> const& v) + { + return q * v; + } + + template + GLM_FUNC_QUALIFIER T extractRealComponent(qua const& q) + { + T w = static_cast(1) - q.x * q.x - q.y * q.y - q.z * q.z; + if(w < T(0)) + return T(0); + else + return -sqrt(w); + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR T length2(qua const& q) + { + return q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w; + } + + template + GLM_FUNC_QUALIFIER qua shortMix(qua const& x, qua const& y, T const& a) + { + if(a <= static_cast(0)) return x; + if(a >= static_cast(1)) return y; + + T fCos = dot(x, y); + qua y2(y); //BUG!!! qua y2; + if(fCos < static_cast(0)) + { + y2 = -y; + fCos = -fCos; + } + + //if(fCos > 1.0f) // problem + T k0, k1; + if(fCos > (static_cast(1) - epsilon())) + { + k0 = static_cast(1) - a; + k1 = static_cast(0) + a; //BUG!!! 1.0f + a; + } + else + { + T fSin = sqrt(T(1) - fCos * fCos); + T fAngle = atan(fSin, fCos); + T fOneOverSin = static_cast(1) / fSin; + k0 = sin((static_cast(1) - a) * fAngle) * fOneOverSin; + k1 = sin((static_cast(0) + a) * fAngle) * fOneOverSin; + } + + return qua::wxyz( + k0 * x.w + k1 * y2.w, + k0 * x.x + k1 * y2.x, + k0 * x.y + k1 * y2.y, + k0 * x.z + k1 * y2.z); + } + + template + GLM_FUNC_QUALIFIER qua fastMix(qua const& x, qua const& y, T const& a) + { + return glm::normalize(x * (static_cast(1) - a) + (y * a)); + } + + template + GLM_FUNC_QUALIFIER qua rotation(vec<3, T, Q> const& orig, vec<3, T, Q> const& dest) + { + T cosTheta = dot(orig, dest); + vec<3, T, Q> rotationAxis; + + if(cosTheta >= static_cast(1) - epsilon()) { + // orig and dest point in the same direction + return quat_identity(); + } + + if(cosTheta < static_cast(-1) + epsilon()) + { + // special case when vectors in opposite directions : + // there is no "ideal" rotation axis + // So guess one; any will do as long as it's perpendicular to start + // This implementation favors a rotation around the Up axis (Y), + // since it's often what you want to do. + rotationAxis = cross(vec<3, T, Q>(0, 0, 1), orig); + if(length2(rotationAxis) < epsilon()) // bad luck, they were parallel, try again! + rotationAxis = cross(vec<3, T, Q>(1, 0, 0), orig); + + rotationAxis = normalize(rotationAxis); + return angleAxis(pi(), rotationAxis); + } + + // Implementation from Stan Melax's Game Programming Gems 1 article + rotationAxis = cross(orig, dest); + + T s = sqrt((T(1) + cosTheta) * static_cast(2)); + T invs = static_cast(1) / s; + + return qua::wxyz( + s * static_cast(0.5f), + rotationAxis.x * invs, + rotationAxis.y * invs, + rotationAxis.z * invs); + } +}//namespace glm diff --git a/includes/glm/gtx/range.hpp b/includes/glm/gtx/range.hpp new file mode 100644 index 0000000..50c5e57 --- /dev/null +++ b/includes/glm/gtx/range.hpp @@ -0,0 +1,96 @@ +/// @ref gtx_range +/// @file glm/gtx/range.hpp +/// @author Joshua Moerman +/// +/// @defgroup gtx_range GLM_GTX_range +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines begin and end for vectors and matrices. Useful for range-based for loop. +/// The range is defined over the elements, not over columns or rows (e.g. mat4 has 16 elements). + +#pragma once + +// Dependencies +#include "../detail/setup.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_range is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_range extension included") +#endif + +#include "../gtc/type_ptr.hpp" +#include "../gtc/vec1.hpp" + +namespace glm +{ + /// @addtogroup gtx_range + /// @{ + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(push) +# pragma warning(disable : 4100) // unreferenced formal parameter +# endif + + template + inline length_t components(vec<1, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<2, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<3, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(vec<4, T, Q> const& v) + { + return v.length(); + } + + template + inline length_t components(genType const& m) + { + return m.length() * m[0].length(); + } + + template + inline typename genType::value_type const * begin(genType const& v) + { + return value_ptr(v); + } + + template + inline typename genType::value_type const * end(genType const& v) + { + return begin(v) + components(v); + } + + template + inline typename genType::value_type * begin(genType& v) + { + return value_ptr(v); + } + + template + inline typename genType::value_type * end(genType& v) + { + return begin(v) + components(v); + } + +# if GLM_COMPILER & GLM_COMPILER_VC +# pragma warning(pop) +# endif + + /// @} +}//namespace glm diff --git a/includes/glm/gtx/raw_data.hpp b/includes/glm/gtx/raw_data.hpp new file mode 100644 index 0000000..3bc27b9 --- /dev/null +++ b/includes/glm/gtx/raw_data.hpp @@ -0,0 +1,49 @@ +/// @ref gtx_raw_data +/// @file glm/gtx/raw_data.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_raw_data GLM_GTX_raw_data +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Projection of a vector to other one + +#pragma once + +// Dependencies +#include "../ext/scalar_uint_sized.hpp" +#include "../detail/setup.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_raw_data is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_raw_data extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_raw_data + /// @{ + + //! Type for byte numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint8 byte; + + //! Type for word numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint16 word; + + //! Type for dword numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint32 dword; + + //! Type for qword numbers. + //! From GLM_GTX_raw_data extension. + typedef detail::uint64 qword; + + /// @} +}// namespace glm + +#include "raw_data.inl" diff --git a/includes/glm/gtx/raw_data.inl b/includes/glm/gtx/raw_data.inl new file mode 100644 index 0000000..c740317 --- /dev/null +++ b/includes/glm/gtx/raw_data.inl @@ -0,0 +1,2 @@ +/// @ref gtx_raw_data + diff --git a/includes/glm/gtx/rotate_normalized_axis.hpp b/includes/glm/gtx/rotate_normalized_axis.hpp new file mode 100644 index 0000000..02c3f5c --- /dev/null +++ b/includes/glm/gtx/rotate_normalized_axis.hpp @@ -0,0 +1,66 @@ +/// @ref gtx_rotate_normalized_axis +/// @file glm/gtx/rotate_normalized_axis.hpp +/// +/// @see core (dependence) +/// @see gtc_matrix_transform +/// @see gtc_quaternion +/// +/// @defgroup gtx_rotate_normalized_axis GLM_GTX_rotate_normalized_axis +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Quaternions and matrices rotations around normalized axis. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtc/quaternion.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_rotate_normalized_axis is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_rotate_normalized_axis extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_rotate_normalized_axis + /// @{ + + /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. + /// + /// @param m Input matrix multiplied by this rotation matrix. + /// @param angle Rotation angle expressed in radians. + /// @param axis Rotation axis, must be normalized. + /// @tparam T Value type used to build the matrix. Currently supported: half (not recommended), float or double. + /// + /// @see gtx_rotate_normalized_axis + /// @see - rotate(T angle, T x, T y, T z) + /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) + /// @see - rotate(T angle, vec<3, T, Q> const& v) + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotateNormalizedAxis( + mat<4, 4, T, Q> const& m, + T const& angle, + vec<3, T, Q> const& axis); + + /// Rotates a quaternion from a vector of 3 components normalized axis and an angle. + /// + /// @param q Source orientation + /// @param angle Angle expressed in radians. + /// @param axis Normalized axis of the rotation, must be normalized. + /// + /// @see gtx_rotate_normalized_axis + template + GLM_FUNC_DECL qua rotateNormalizedAxis( + qua const& q, + T const& angle, + vec<3, T, Q> const& axis); + + /// @} +}//namespace glm + +#include "rotate_normalized_axis.inl" diff --git a/includes/glm/gtx/rotate_normalized_axis.inl b/includes/glm/gtx/rotate_normalized_axis.inl new file mode 100644 index 0000000..352a56c --- /dev/null +++ b/includes/glm/gtx/rotate_normalized_axis.inl @@ -0,0 +1,58 @@ +/// @ref gtx_rotate_normalized_axis + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotateNormalizedAxis + ( + mat<4, 4, T, Q> const& m, + T const& angle, + vec<3, T, Q> const& v + ) + { + T const a = angle; + T const c = cos(a); + T const s = sin(a); + + vec<3, T, Q> const axis(v); + + vec<3, T, Q> const temp((static_cast(1) - c) * axis); + + mat<4, 4, T, Q> Rotate; + Rotate[0][0] = c + temp[0] * axis[0]; + Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; + Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; + + Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; + Rotate[1][1] = c + temp[1] * axis[1]; + Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; + + Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; + Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; + Rotate[2][2] = c + temp[2] * axis[2]; + + mat<4, 4, T, Q> Result; + Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + Result[3] = m[3]; + return Result; + } + + template + GLM_FUNC_QUALIFIER qua rotateNormalizedAxis + ( + qua const& q, + T const& angle, + vec<3, T, Q> const& v + ) + { + vec<3, T, Q> const Tmp(v); + + T const AngleRad(angle); + T const Sin = sin(AngleRad * T(0.5)); + + return q * qua::wxyz(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); + //return gtc::quaternion::cross(q, tquat(cos(AngleRad * T(0.5)), Tmp.x * fSin, Tmp.y * fSin, Tmp.z * fSin)); + } +}//namespace glm diff --git a/includes/glm/gtx/rotate_vector.hpp b/includes/glm/gtx/rotate_vector.hpp new file mode 100644 index 0000000..b7345bf --- /dev/null +++ b/includes/glm/gtx/rotate_vector.hpp @@ -0,0 +1,121 @@ +/// @ref gtx_rotate_vector +/// @file glm/gtx/rotate_vector.hpp +/// +/// @see core (dependence) +/// @see gtx_transform (dependence) +/// +/// @defgroup gtx_rotate_vector GLM_GTX_rotate_vector +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Function to directly rotate a vector + +#pragma once + +// Dependency: +#include "../gtx/transform.hpp" +#include "../gtc/epsilon.hpp" +#include "../ext/vector_relational.hpp" +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_rotate_vector is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_rotate_vector extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_rotate_vector + /// @{ + + /// Returns Spherical interpolation between two vectors + /// + /// @param x A first vector + /// @param y A second vector + /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. + /// + /// @see gtx_rotate_vector + template + GLM_FUNC_DECL vec<3, T, Q> slerp( + vec<3, T, Q> const& x, + vec<3, T, Q> const& y, + T const& a); + + //! Rotate a two dimensional vector. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<2, T, Q> rotate( + vec<2, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around an axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotate( + vec<3, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal); + + //! Rotate a four dimensional vector around an axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotate( + vec<4, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal); + + //! Rotate a three dimensional vector around the X axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateX( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around the Y axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateY( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a three dimensional vector around the Z axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<3, T, Q> rotateZ( + vec<3, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the X axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateX( + vec<4, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the Y axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateY( + vec<4, T, Q> const& v, + T const& angle); + + //! Rotate a four dimensional vector around the Z axis. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL vec<4, T, Q> rotateZ( + vec<4, T, Q> const& v, + T const& angle); + + //! Build a rotation matrix from a normal and a up vector. + //! From GLM_GTX_rotate_vector extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> orientation( + vec<3, T, Q> const& Normal, + vec<3, T, Q> const& Up); + + /// @} +}//namespace glm + +#include "rotate_vector.inl" diff --git a/includes/glm/gtx/rotate_vector.inl b/includes/glm/gtx/rotate_vector.inl new file mode 100644 index 0000000..f8136e7 --- /dev/null +++ b/includes/glm/gtx/rotate_vector.inl @@ -0,0 +1,187 @@ +/// @ref gtx_rotate_vector + +namespace glm +{ + template + GLM_FUNC_QUALIFIER vec<3, T, Q> slerp + ( + vec<3, T, Q> const& x, + vec<3, T, Q> const& y, + T const& a + ) + { + // get cosine of angle between vectors (-1 -> 1) + T CosAlpha = dot(x, y); + // get angle (0 -> pi) + T Alpha = acos(CosAlpha); + // get sine of angle between vectors (0 -> 1) + T SinAlpha = sin(Alpha); + // this breaks down when SinAlpha = 0, i.e. Alpha = 0 or pi + T t1 = sin((static_cast(1) - a) * Alpha) / SinAlpha; + T t2 = sin(a * Alpha) / SinAlpha; + + // interpolate src vectors + return x * t1 + y * t2; + } + + template + GLM_FUNC_QUALIFIER vec<2, T, Q> rotate + ( + vec<2, T, Q> const& v, + T const& angle + ) + { + vec<2, T, Q> Result; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotate + ( + vec<3, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal + ) + { + return mat<3, 3, T, Q>(glm::rotate(angle, normal)) * v; + } + /* + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateGTX( + const vec<3, T, Q>& x, + T angle, + const vec<3, T, Q>& normal) + { + const T Cos = cos(radians(angle)); + const T Sin = sin(radians(angle)); + return x * Cos + ((x * normal) * (T(1) - Cos)) * normal + cross(x, normal) * Sin; + } + */ + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotate + ( + vec<4, T, Q> const& v, + T const& angle, + vec<3, T, Q> const& normal + ) + { + return rotate(angle, normal) * v; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateX + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result(v); + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.y = v.y * Cos - v.z * Sin; + Result.z = v.y * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateY + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos + v.z * Sin; + Result.z = -v.x * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<3, T, Q> rotateZ + ( + vec<3, T, Q> const& v, + T const& angle + ) + { + vec<3, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateX + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.y = v.y * Cos - v.z * Sin; + Result.z = v.y * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateY + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos + v.z * Sin; + Result.z = -v.x * Sin + v.z * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER vec<4, T, Q> rotateZ + ( + vec<4, T, Q> const& v, + T const& angle + ) + { + vec<4, T, Q> Result = v; + T const Cos(cos(angle)); + T const Sin(sin(angle)); + + Result.x = v.x * Cos - v.y * Sin; + Result.y = v.x * Sin + v.y * Cos; + return Result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientation + ( + vec<3, T, Q> const& Normal, + vec<3, T, Q> const& Up + ) + { + if(all(equal(Normal, Up, epsilon()))) + return mat<4, 4, T, Q>(static_cast(1)); + + vec<3, T, Q> RotationAxis = cross(Up, Normal); + T Angle = acos(dot(Normal, Up)); + + return rotate(Angle, RotationAxis); + } +}//namespace glm diff --git a/includes/glm/gtx/scalar_multiplication.hpp b/includes/glm/gtx/scalar_multiplication.hpp new file mode 100644 index 0000000..97df000 --- /dev/null +++ b/includes/glm/gtx/scalar_multiplication.hpp @@ -0,0 +1,80 @@ +/// @ref gtx_scalar_multiplication +/// @file glm/gtx/scalar_multiplication.hpp +/// @author Joshua Moerman +/// +/// @defgroup gtx_scalar_multiplication GLM_GTX_scalar_multiplication +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Enables scalar multiplication for all types +/// +/// Since GLSL is very strict about types, the following (often used) combinations do not work: +/// double * vec4 +/// int * vec4 +/// vec4 / int +/// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) + +#pragma once + +#include "../detail/setup.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_scalar_multiplication extension included") +#endif + +#include "../vec2.hpp" +#include "../vec3.hpp" +#include "../vec4.hpp" +#include "../mat2x2.hpp" +#include + +namespace glm +{ + /// @addtogroup gtx_scalar_multiplication + /// @{ + + template + using return_type_scalar_multiplication = typename std::enable_if< + !std::is_same::value // T may not be a float + && std::is_arithmetic::value, Vec // But it may be an int or double (no vec3 or mat3, ...) + >::type; + +#define GLM_IMPLEMENT_SCAL_MULT(Vec) \ + template \ + return_type_scalar_multiplication \ + operator*(T const& s, Vec rh){ \ + return rh *= static_cast(s); \ + } \ + \ + template \ + return_type_scalar_multiplication \ + operator*(Vec lh, T const& s){ \ + return lh *= static_cast(s); \ + } \ + \ + template \ + return_type_scalar_multiplication \ + operator/(Vec lh, T const& s){ \ + return lh *= 1.0f / static_cast(s); \ + } + +GLM_IMPLEMENT_SCAL_MULT(vec2) +GLM_IMPLEMENT_SCAL_MULT(vec3) +GLM_IMPLEMENT_SCAL_MULT(vec4) + +GLM_IMPLEMENT_SCAL_MULT(mat2) +GLM_IMPLEMENT_SCAL_MULT(mat2x3) +GLM_IMPLEMENT_SCAL_MULT(mat2x4) +GLM_IMPLEMENT_SCAL_MULT(mat3x2) +GLM_IMPLEMENT_SCAL_MULT(mat3) +GLM_IMPLEMENT_SCAL_MULT(mat3x4) +GLM_IMPLEMENT_SCAL_MULT(mat4x2) +GLM_IMPLEMENT_SCAL_MULT(mat4x3) +GLM_IMPLEMENT_SCAL_MULT(mat4) + +#undef GLM_IMPLEMENT_SCAL_MULT + /// @} +} // namespace glm diff --git a/includes/glm/gtx/scalar_relational.hpp b/includes/glm/gtx/scalar_relational.hpp new file mode 100644 index 0000000..e840932 --- /dev/null +++ b/includes/glm/gtx/scalar_relational.hpp @@ -0,0 +1,34 @@ +/// @ref gtx_scalar_relational +/// @file glm/gtx/scalar_relational.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_scalar_relational GLM_GTX_scalar_relational +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Extend a position from a source to a position at a defined length. + +#pragma once + +// Dependency: +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_scalar_relational is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_scalar_relational extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_scalar_relational + /// @{ + + + + /// @} +}//namespace glm + +#include "scalar_relational.inl" diff --git a/includes/glm/gtx/scalar_relational.inl b/includes/glm/gtx/scalar_relational.inl new file mode 100644 index 0000000..c2a121c --- /dev/null +++ b/includes/glm/gtx/scalar_relational.inl @@ -0,0 +1,88 @@ +/// @ref gtx_scalar_relational + +namespace glm +{ + template + GLM_FUNC_QUALIFIER bool lessThan + ( + T const& x, + T const& y + ) + { + return x < y; + } + + template + GLM_FUNC_QUALIFIER bool lessThanEqual + ( + T const& x, + T const& y + ) + { + return x <= y; + } + + template + GLM_FUNC_QUALIFIER bool greaterThan + ( + T const& x, + T const& y + ) + { + return x > y; + } + + template + GLM_FUNC_QUALIFIER bool greaterThanEqual + ( + T const& x, + T const& y + ) + { + return x >= y; + } + + template + GLM_FUNC_QUALIFIER bool equal + ( + T const& x, + T const& y + ) + { + return detail::compute_equal::is_iec559>::call(x, y); + } + + template + GLM_FUNC_QUALIFIER bool notEqual + ( + T const& x, + T const& y + ) + { + return !detail::compute_equal::is_iec559>::call(x, y); + } + + GLM_FUNC_QUALIFIER bool any + ( + bool const& x + ) + { + return x; + } + + GLM_FUNC_QUALIFIER bool all + ( + bool const& x + ) + { + return x; + } + + GLM_FUNC_QUALIFIER bool not_ + ( + bool const& x + ) + { + return !x; + } +}//namespace glm diff --git a/includes/glm/gtx/spline.hpp b/includes/glm/gtx/spline.hpp new file mode 100644 index 0000000..8df5584 --- /dev/null +++ b/includes/glm/gtx/spline.hpp @@ -0,0 +1,63 @@ +/// @ref gtx_spline +/// @file glm/gtx/spline.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_spline GLM_GTX_spline +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Spline functions + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/optimum_pow.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_spline is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_spline extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_spline + /// @{ + + /// Return a point from a catmull rom curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType catmullRom( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s); + + /// Return a point from a hermite curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType hermite( + genType const& v1, + genType const& t1, + genType const& v2, + genType const& t2, + typename genType::value_type const& s); + + /// Return a point from a cubic curve. + /// @see gtx_spline extension. + template + GLM_FUNC_DECL genType cubic( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s); + + /// @} +}//namespace glm + +#include "spline.inl" diff --git a/includes/glm/gtx/spline.inl b/includes/glm/gtx/spline.inl new file mode 100644 index 0000000..c3fd056 --- /dev/null +++ b/includes/glm/gtx/spline.inl @@ -0,0 +1,60 @@ +/// @ref gtx_spline + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType catmullRom + ( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s + ) + { + typename genType::value_type s2 = pow2(s); + typename genType::value_type s3 = pow3(s); + + typename genType::value_type f1 = -s3 + typename genType::value_type(2) * s2 - s; + typename genType::value_type f2 = typename genType::value_type(3) * s3 - typename genType::value_type(5) * s2 + typename genType::value_type(2); + typename genType::value_type f3 = typename genType::value_type(-3) * s3 + typename genType::value_type(4) * s2 + s; + typename genType::value_type f4 = s3 - s2; + + return (f1 * v1 + f2 * v2 + f3 * v3 + f4 * v4) / typename genType::value_type(2); + + } + + template + GLM_FUNC_QUALIFIER genType hermite + ( + genType const& v1, + genType const& t1, + genType const& v2, + genType const& t2, + typename genType::value_type const& s + ) + { + typename genType::value_type s2 = pow2(s); + typename genType::value_type s3 = pow3(s); + + typename genType::value_type f1 = typename genType::value_type(2) * s3 - typename genType::value_type(3) * s2 + typename genType::value_type(1); + typename genType::value_type f2 = typename genType::value_type(-2) * s3 + typename genType::value_type(3) * s2; + typename genType::value_type f3 = s3 - typename genType::value_type(2) * s2 + s; + typename genType::value_type f4 = s3 - s2; + + return f1 * v1 + f2 * v2 + f3 * t1 + f4 * t2; + } + + template + GLM_FUNC_QUALIFIER genType cubic + ( + genType const& v1, + genType const& v2, + genType const& v3, + genType const& v4, + typename genType::value_type const& s + ) + { + return ((v1 * s + v2) * s + v3) * s + v4; + } +}//namespace glm diff --git a/includes/glm/gtx/std_based_type.hpp b/includes/glm/gtx/std_based_type.hpp new file mode 100644 index 0000000..864885d --- /dev/null +++ b/includes/glm/gtx/std_based_type.hpp @@ -0,0 +1,66 @@ +/// @ref gtx_std_based_type +/// @file glm/gtx/std_based_type.hpp +/// +/// @see core (dependence) +/// @see gtx_extented_min_max (dependence) +/// +/// @defgroup gtx_std_based_type GLM_GTX_std_based_type +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Adds vector types based on STL value types. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_std_based_type is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_std_based_type extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_std_based_type + /// @{ + + /// Vector type based of one std::size_t component. + /// @see GLM_GTX_std_based_type + typedef vec<1, std::size_t, defaultp> size1; + + /// Vector type based of two std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<2, std::size_t, defaultp> size2; + + /// Vector type based of three std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<3, std::size_t, defaultp> size3; + + /// Vector type based of four std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<4, std::size_t, defaultp> size4; + + /// Vector type based of one std::size_t component. + /// @see GLM_GTX_std_based_type + typedef vec<1, std::size_t, defaultp> size1_t; + + /// Vector type based of two std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<2, std::size_t, defaultp> size2_t; + + /// Vector type based of three std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<3, std::size_t, defaultp> size3_t; + + /// Vector type based of four std::size_t components. + /// @see GLM_GTX_std_based_type + typedef vec<4, std::size_t, defaultp> size4_t; + + /// @} +}//namespace glm + +#include "std_based_type.inl" diff --git a/includes/glm/gtx/std_based_type.inl b/includes/glm/gtx/std_based_type.inl new file mode 100644 index 0000000..9c34bdb --- /dev/null +++ b/includes/glm/gtx/std_based_type.inl @@ -0,0 +1,6 @@ +/// @ref gtx_std_based_type + +namespace glm +{ + +} diff --git a/includes/glm/gtx/string_cast.hpp b/includes/glm/gtx/string_cast.hpp new file mode 100644 index 0000000..2958edc --- /dev/null +++ b/includes/glm/gtx/string_cast.hpp @@ -0,0 +1,45 @@ +/// @ref gtx_string_cast +/// @file glm/gtx/string_cast.hpp +/// +/// @see core (dependence) +/// @see gtx_integer (dependence) +/// @see gtx_quaternion (dependence) +/// +/// @defgroup gtx_string_cast GLM_GTX_string_cast +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Setup strings for GLM type values + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/type_precision.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" +#include +#include +#include + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_string_cast is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_string_cast extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_string_cast + /// @{ + + /// Create a string from a GLM vector or matrix typed variable. + /// @see gtx_string_cast extension. + template + GLM_FUNC_DECL std::string to_string(genType const& x); + + /// @} +}//namespace glm + +#include "string_cast.inl" diff --git a/includes/glm/gtx/string_cast.inl b/includes/glm/gtx/string_cast.inl new file mode 100644 index 0000000..875f2be --- /dev/null +++ b/includes/glm/gtx/string_cast.inl @@ -0,0 +1,497 @@ +/// @ref gtx_string_cast + +#include +#include + +namespace glm{ +namespace detail +{ + template + struct cast + { + typedef T value_type; + }; + + template <> + struct cast + { + typedef double value_type; + }; + + GLM_FUNC_QUALIFIER std::string format(const char* message, ...) { + std::size_t const STRING_BUFFER(4096); + + assert(message != NULL); + assert(strlen(message) < STRING_BUFFER); + + char buffer[STRING_BUFFER]; + va_list list; + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wformat-nonliteral" +#endif + + va_start(list, message); + vsnprintf(buffer, STRING_BUFFER, message, list); + va_end(list); + +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic pop +#endif + + return buffer; + } + + static const char* LabelTrue = "true"; + static const char* LabelFalse = "false"; + + template + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%d";} + }; + + template + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%f";} + }; + +# if GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC + template<> + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} + }; + + template<> + struct literal + { + GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} + }; +# endif//GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC + + template + struct prefix{}; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "d";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "b";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u8";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i8";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u16";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i16";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "u64";} + }; + + template<> + struct prefix + { + GLM_FUNC_QUALIFIER static char const * value() {return "i64";} + }; + + template + struct compute_to_string + {}; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<1, bool, Q> const& x) + { + return detail::format("bvec1(%s)", + x[0] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<2, bool, Q> const& x) + { + return detail::format("bvec2(%s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<3, bool, Q> const& x) + { + return detail::format("bvec3(%s, %s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse, + x[2] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<4, bool, Q> const& x) + { + return detail::format("bvec4(%s, %s, %s, %s)", + x[0] ? detail::LabelTrue : detail::LabelFalse, + x[1] ? detail::LabelTrue : detail::LabelFalse, + x[2] ? detail::LabelTrue : detail::LabelFalse, + x[3] ? detail::LabelTrue : detail::LabelFalse); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<1, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec1(%s)", + PrefixStr, + LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec2(%s, %s)", + PrefixStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec3(%s, %s, %s)", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1]), + static_cast::value_type>(x[2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(vec<4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%svec4(%s, %s, %s, %s)", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0]), + static_cast::value_type>(x[1]), + static_cast::value_type>(x[2]), + static_cast::value_type>(x[3])); + } + }; + + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x2((%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x3((%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<2, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat2x4((%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x2((%s, %s), (%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<3, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat3x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 2, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x2((%s, %s), (%s, %s), (%s, %s), (%s, %s))", + PrefixStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr, + LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 3, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2])); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(mat<4, 4, T, Q> const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%smat4x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), + static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), + static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3]), + static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2]), static_cast::value_type>(x[3][3])); + } + }; + + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(qua const& q) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%squat(%s, {%s, %s, %s})", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(q.w), + static_cast::value_type>(q.x), + static_cast::value_type>(q.y), + static_cast::value_type>(q.z)); + } + }; + + template + struct compute_to_string > + { + GLM_FUNC_QUALIFIER static std::string call(tdualquat const& x) + { + char const * PrefixStr = prefix::value(); + char const * LiteralStr = literal::is_iec559>::value(); + std::string FormatStr(detail::format("%sdualquat((%s, {%s, %s, %s}), (%s, {%s, %s, %s}))", + PrefixStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr, + LiteralStr, LiteralStr, LiteralStr, LiteralStr)); + + return detail::format(FormatStr.c_str(), + static_cast::value_type>(x.real.w), + static_cast::value_type>(x.real.x), + static_cast::value_type>(x.real.y), + static_cast::value_type>(x.real.z), + static_cast::value_type>(x.dual.w), + static_cast::value_type>(x.dual.x), + static_cast::value_type>(x.dual.y), + static_cast::value_type>(x.dual.z)); + } + }; + +}//namespace detail + +template +GLM_FUNC_QUALIFIER std::string to_string(matType const& x) +{ + return detail::compute_to_string::call(x); +} + +}//namespace glm diff --git a/includes/glm/gtx/texture.hpp b/includes/glm/gtx/texture.hpp new file mode 100644 index 0000000..608c6ad --- /dev/null +++ b/includes/glm/gtx/texture.hpp @@ -0,0 +1,44 @@ +/// @ref gtx_texture +/// @file glm/gtx/texture.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_texture GLM_GTX_texture +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Wrapping mode of texture coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/integer.hpp" +#include "../gtx/component_wise.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_texture is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_texture extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_texture + /// @{ + + /// Compute the number of mipmaps levels necessary to create a mipmap complete texture + /// + /// @param Extent Extent of the texture base level mipmap + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point or signed integer scalar types + /// @tparam Q Value from qualifier enum + template + T levels(vec const& Extent); + + /// @} +}// namespace glm + +#include "texture.inl" + diff --git a/includes/glm/gtx/texture.inl b/includes/glm/gtx/texture.inl new file mode 100644 index 0000000..593c826 --- /dev/null +++ b/includes/glm/gtx/texture.inl @@ -0,0 +1,17 @@ +/// @ref gtx_texture + +namespace glm +{ + template + inline T levels(vec const& Extent) + { + return glm::log2(compMax(Extent)) + static_cast(1); + } + + template + inline T levels(T Extent) + { + return vec<1, T, defaultp>(Extent).x; + } +}//namespace glm + diff --git a/includes/glm/gtx/transform.hpp b/includes/glm/gtx/transform.hpp new file mode 100644 index 0000000..9707b50 --- /dev/null +++ b/includes/glm/gtx/transform.hpp @@ -0,0 +1,58 @@ +/// @ref gtx_transform +/// @file glm/gtx/transform.hpp +/// +/// @see core (dependence) +/// @see gtc_matrix_transform (dependence) +/// @see gtx_transform +/// @see gtx_transform2 +/// +/// @defgroup gtx_transform GLM_GTX_transform +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add transformation matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/matrix_transform.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_transform is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_transform extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_transform + /// @{ + + /// Transforms a matrix with a translation 4 * 4 matrix created from 3 scalars. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> translate( + vec<3, T, Q> const& v); + + /// Builds a rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> rotate( + T angle, + vec<3, T, Q> const& v); + + /// Transforms a matrix with a scale 4 * 4 matrix created from a vector of 3 components. + /// @see gtc_matrix_transform + /// @see gtx_transform + template + GLM_FUNC_DECL mat<4, 4, T, Q> scale( + vec<3, T, Q> const& v); + + /// @} +}// namespace glm + +#include "transform.inl" diff --git a/includes/glm/gtx/transform.inl b/includes/glm/gtx/transform.inl new file mode 100644 index 0000000..48ee680 --- /dev/null +++ b/includes/glm/gtx/transform.inl @@ -0,0 +1,23 @@ +/// @ref gtx_transform + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(vec<3, T, Q> const& v) + { + return translate(mat<4, 4, T, Q>(static_cast(1)), v); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(T angle, vec<3, T, Q> const& v) + { + return rotate(mat<4, 4, T, Q>(static_cast(1)), angle, v); + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(vec<3, T, Q> const& v) + { + return scale(mat<4, 4, T, Q>(static_cast(1)), v); + } + +}//namespace glm diff --git a/includes/glm/gtx/transform2.hpp b/includes/glm/gtx/transform2.hpp new file mode 100644 index 0000000..9604a92 --- /dev/null +++ b/includes/glm/gtx/transform2.hpp @@ -0,0 +1,87 @@ +/// @ref gtx_transform2 +/// @file glm/gtx/transform2.hpp +/// +/// @see core (dependence) +/// @see gtx_transform (dependence) +/// +/// @defgroup gtx_transform2 GLM_GTX_transform2 +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Add extra transformation matrices + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtx/transform.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_transform2 is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_transform2 extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_transform2 + /// @{ + + //! Transforms a matrix with a shearing on X axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T y); + + //! Transforms a matrix with a shearing on Y axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T x); + + //! Transforms a matrix with a shearing on X axis + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T y, T z); + + //! Transforms a matrix with a shearing on Y axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T x, T z); + + //! Transforms a matrix with a shearing on Z axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T x, T y); + + //template GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(const mat<4, 4, T, Q> & m, shearPlane, planePoint, angle) + // Identity + tan(angle) * cross(Normal, OnPlaneVector) 0 + // - dot(PointOnPlane, normal) * OnPlaneVector 1 + + // Reflect functions seem to don't work + //template mat<3, 3, T, Q> reflect2D(const mat<3, 3, T, Q> & m, const vec<3, T, Q>& normal){return reflect2DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) + //template mat<4, 4, T, Q> reflect3D(const mat<4, 4, T, Q> & m, const vec<3, T, Q>& normal){return reflect3DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) + + //! Build planar projection matrix along normal axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<3, 3, T, Q> proj2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal); + + //! Build planar projection matrix along normal axis. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> proj3D(mat<4, 4, T, Q> const & m, vec<3, T, Q> const& normal); + + //! Build a scale bias matrix. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(T scale, T bias); + + //! Build a scale bias matrix. + //! From GLM_GTX_transform2 extension. + template + GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias); + + /// @} +}// namespace glm + +#include "transform2.inl" diff --git a/includes/glm/gtx/transform2.inl b/includes/glm/gtx/transform2.inl new file mode 100644 index 0000000..0118ab0 --- /dev/null +++ b/includes/glm/gtx/transform2.inl @@ -0,0 +1,125 @@ +/// @ref gtx_transform2 + +namespace glm +{ + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T s) + { + mat<3, 3, T, Q> r(1); + r[1][0] = s; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T s) + { + mat<3, 3, T, Q> r(1); + r[0][1] = s; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[0][1] = s; + r[0][2] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[1][0] = s; + r[1][2] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T s, T t) + { + mat<4, 4, T, Q> r(1); + r[2][0] = s; + r[2][1] = t; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> reflect2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal) + { + mat<3, 3, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; + r[0][1] = -static_cast(2) * normal.x * normal.y; + r[1][0] = -static_cast(2) * normal.x * normal.y; + r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> reflect3D(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& normal) + { + mat<4, 4, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; + r[0][1] = -static_cast(2) * normal.x * normal.y; + r[0][2] = -static_cast(2) * normal.x * normal.z; + + r[1][0] = -static_cast(2) * normal.x * normal.y; + r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; + r[1][2] = -static_cast(2) * normal.y * normal.z; + + r[2][0] = -static_cast(2) * normal.x * normal.z; + r[2][1] = -static_cast(2) * normal.y * normal.z; + r[2][2] = static_cast(1) - static_cast(2) * normal.z * normal.z; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<3, 3, T, Q> proj2D( + const mat<3, 3, T, Q>& m, + const vec<3, T, Q>& normal) + { + mat<3, 3, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - normal.x * normal.x; + r[0][1] = - normal.x * normal.y; + r[1][0] = - normal.x * normal.y; + r[1][1] = static_cast(1) - normal.y * normal.y; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> proj3D( + const mat<4, 4, T, Q>& m, + const vec<3, T, Q>& normal) + { + mat<4, 4, T, Q> r(static_cast(1)); + r[0][0] = static_cast(1) - normal.x * normal.x; + r[0][1] = - normal.x * normal.y; + r[0][2] = - normal.x * normal.z; + r[1][0] = - normal.x * normal.y; + r[1][1] = static_cast(1) - normal.y * normal.y; + r[1][2] = - normal.y * normal.z; + r[2][0] = - normal.x * normal.z; + r[2][1] = - normal.y * normal.z; + r[2][2] = static_cast(1) - normal.z * normal.z; + return m * r; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(T scale, T bias) + { + mat<4, 4, T, Q> result; + result[3] = vec<4, T, Q>(vec<3, T, Q>(bias), static_cast(1)); + result[0][0] = scale; + result[1][1] = scale; + result[2][2] = scale; + return result; + } + + template + GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias) + { + return m * scaleBias(scale, bias); + } +}//namespace glm + diff --git a/includes/glm/gtx/type_aligned.hpp b/includes/glm/gtx/type_aligned.hpp new file mode 100644 index 0000000..ec40935 --- /dev/null +++ b/includes/glm/gtx/type_aligned.hpp @@ -0,0 +1,980 @@ +/// @ref gtx_type_aligned +/// @file glm/gtx/type_aligned.hpp +/// +/// @see core (dependence) +/// @see gtc_quaternion (dependence) +/// +/// @defgroup gtx_type_aligned GLM_GTX_type_aligned +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines aligned types. + +#pragma once + +// Dependency: +#include "../gtc/type_precision.hpp" +#include "../gtc/quaternion.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# pragma message("GLM: GLM_GTX_type_aligned is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_type_aligned extension included") +#endif + +namespace glm +{ + /////////////////////////// + // Signed int vector types + + /// @addtogroup gtx_type_aligned + /// @{ + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int8, aligned_lowp_int8, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int16, aligned_lowp_int16, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int32, aligned_lowp_int32, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int64, aligned_lowp_int64, 8); + + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int8_t, aligned_lowp_int8_t, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int16_t, aligned_lowp_int16_t, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int32_t, aligned_lowp_int32_t, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_int64_t, aligned_lowp_int64_t, 8); + + + /// Low qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i8, aligned_lowp_i8, 1); + + /// Low qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i16, aligned_lowp_i16, 2); + + /// Low qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i32, aligned_lowp_i32, 4); + + /// Low qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_i64, aligned_lowp_i64, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int8, aligned_mediump_int8, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int16, aligned_mediump_int16, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int32, aligned_mediump_int32, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int64, aligned_mediump_int64, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int8_t, aligned_mediump_int8_t, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int16_t, aligned_mediump_int16_t, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int32_t, aligned_mediump_int32_t, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_int64_t, aligned_mediump_int64_t, 8); + + + /// Medium qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i8, aligned_mediump_i8, 1); + + /// Medium qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i16, aligned_mediump_i16, 2); + + /// Medium qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i32, aligned_mediump_i32, 4); + + /// Medium qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_i64, aligned_mediump_i64, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int8, aligned_highp_int8, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int16, aligned_highp_int16, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int32, aligned_highp_int32, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int64, aligned_highp_int64, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int8_t, aligned_highp_int8_t, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int16_t, aligned_highp_int16_t, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int32_t, aligned_highp_int32_t, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_int64_t, aligned_highp_int64_t, 8); + + + /// High qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i8, aligned_highp_i8, 1); + + /// High qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i16, aligned_highp_i16, 2); + + /// High qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i32, aligned_highp_i32, 4); + + /// High qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_i64, aligned_highp_i64, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int8, aligned_int8, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int16, aligned_int16, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int32, aligned_int32, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int64, aligned_int64, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int8_t, aligned_int8_t, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int16_t, aligned_int16_t, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int32_t, aligned_int32_t, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(int64_t, aligned_int64_t, 8); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8, aligned_i8, 1); + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16, aligned_i16, 2); + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32, aligned_i32, 4); + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64, aligned_i64, 8); + + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec1, aligned_ivec1, 4); + + /// Default qualifier 32 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec2, aligned_ivec2, 8); + + /// Default qualifier 32 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec3, aligned_ivec3, 16); + + /// Default qualifier 32 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(ivec4, aligned_ivec4, 16); + + + /// Default qualifier 8 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec1, aligned_i8vec1, 1); + + /// Default qualifier 8 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec2, aligned_i8vec2, 2); + + /// Default qualifier 8 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec3, aligned_i8vec3, 4); + + /// Default qualifier 8 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i8vec4, aligned_i8vec4, 4); + + + /// Default qualifier 16 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec1, aligned_i16vec1, 2); + + /// Default qualifier 16 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec2, aligned_i16vec2, 4); + + /// Default qualifier 16 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec3, aligned_i16vec3, 8); + + /// Default qualifier 16 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i16vec4, aligned_i16vec4, 8); + + + /// Default qualifier 32 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec1, aligned_i32vec1, 4); + + /// Default qualifier 32 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec2, aligned_i32vec2, 8); + + /// Default qualifier 32 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec3, aligned_i32vec3, 16); + + /// Default qualifier 32 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i32vec4, aligned_i32vec4, 16); + + + /// Default qualifier 64 bit signed integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec1, aligned_i64vec1, 8); + + /// Default qualifier 64 bit signed integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec2, aligned_i64vec2, 16); + + /// Default qualifier 64 bit signed integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec3, aligned_i64vec3, 32); + + /// Default qualifier 64 bit signed integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(i64vec4, aligned_i64vec4, 32); + + + ///////////////////////////// + // Unsigned int vector types + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint8, aligned_lowp_uint8, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint16, aligned_lowp_uint16, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint32, aligned_lowp_uint32, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint64, aligned_lowp_uint64, 8); + + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint8_t, aligned_lowp_uint8_t, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint16_t, aligned_lowp_uint16_t, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint32_t, aligned_lowp_uint32_t, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_uint64_t, aligned_lowp_uint64_t, 8); + + + /// Low qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u8, aligned_lowp_u8, 1); + + /// Low qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u16, aligned_lowp_u16, 2); + + /// Low qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u32, aligned_lowp_u32, 4); + + /// Low qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(lowp_u64, aligned_lowp_u64, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint8, aligned_mediump_uint8, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint16, aligned_mediump_uint16, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint32, aligned_mediump_uint32, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint64, aligned_mediump_uint64, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint8_t, aligned_mediump_uint8_t, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint16_t, aligned_mediump_uint16_t, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint32_t, aligned_mediump_uint32_t, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_uint64_t, aligned_mediump_uint64_t, 8); + + + /// Medium qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u8, aligned_mediump_u8, 1); + + /// Medium qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u16, aligned_mediump_u16, 2); + + /// Medium qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u32, aligned_mediump_u32, 4); + + /// Medium qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mediump_u64, aligned_mediump_u64, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint8, aligned_highp_uint8, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint16, aligned_highp_uint16, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint32, aligned_highp_uint32, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint64, aligned_highp_uint64, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint8_t, aligned_highp_uint8_t, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint16_t, aligned_highp_uint16_t, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint32_t, aligned_highp_uint32_t, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_uint64_t, aligned_highp_uint64_t, 8); + + + /// High qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u8, aligned_highp_u8, 1); + + /// High qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u16, aligned_highp_u16, 2); + + /// High qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u32, aligned_highp_u32, 4); + + /// High qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(highp_u64, aligned_highp_u64, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint8, aligned_uint8, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint16, aligned_uint16, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint32, aligned_uint32, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint64, aligned_uint64, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint8_t, aligned_uint8_t, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint16_t, aligned_uint16_t, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint32_t, aligned_uint32_t, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uint64_t, aligned_uint64_t, 8); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8, aligned_u8, 1); + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16, aligned_u16, 2); + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32, aligned_u32, 4); + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64, aligned_u64, 8); + + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec1, aligned_uvec1, 4); + + /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec2, aligned_uvec2, 8); + + /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec3, aligned_uvec3, 16); + + /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(uvec4, aligned_uvec4, 16); + + + /// Default qualifier 8 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec1, aligned_u8vec1, 1); + + /// Default qualifier 8 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec2, aligned_u8vec2, 2); + + /// Default qualifier 8 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec3, aligned_u8vec3, 4); + + /// Default qualifier 8 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u8vec4, aligned_u8vec4, 4); + + + /// Default qualifier 16 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec1, aligned_u16vec1, 2); + + /// Default qualifier 16 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec2, aligned_u16vec2, 4); + + /// Default qualifier 16 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec3, aligned_u16vec3, 8); + + /// Default qualifier 16 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u16vec4, aligned_u16vec4, 8); + + + /// Default qualifier 32 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec1, aligned_u32vec1, 4); + + /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec2, aligned_u32vec2, 8); + + /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec3, aligned_u32vec3, 16); + + /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u32vec4, aligned_u32vec4, 16); + + + /// Default qualifier 64 bit unsigned integer aligned scalar type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec1, aligned_u64vec1, 8); + + /// Default qualifier 64 bit unsigned integer aligned vector of 2 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec2, aligned_u64vec2, 16); + + /// Default qualifier 64 bit unsigned integer aligned vector of 3 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec3, aligned_u64vec3, 32); + + /// Default qualifier 64 bit unsigned integer aligned vector of 4 components type. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(u64vec4, aligned_u64vec4, 32); + + + ////////////////////// + // Float vector types + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32, aligned_float32, 4); + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32_t, aligned_float32_t, 4); + + /// 32 bit single-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float32, aligned_f32, 4); + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64, aligned_float64, 8); + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64_t, aligned_float64_t, 8); + + /// 64 bit double-qualifier floating-point aligned scalar. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(float64, aligned_f64, 8); + +# endif//GLM_FORCE_SINGLE_ONLY + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec1, aligned_vec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec2, aligned_vec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec3, aligned_vec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(vec4, aligned_vec4, 16); + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec1, aligned_fvec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec2, aligned_fvec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec3, aligned_fvec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fvec4, aligned_fvec4, 16); + + + /// Single-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec1, aligned_f32vec1, 4); + + /// Single-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec2, aligned_f32vec2, 8); + + /// Single-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec3, aligned_f32vec3, 16); + + /// Single-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32vec4, aligned_f32vec4, 16); + + + /// Double-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec1, aligned_dvec1, 8); + + /// Double-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec2, aligned_dvec2, 16); + + /// Double-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec3, aligned_dvec3, 32); + + /// Double-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dvec4, aligned_dvec4, 32); + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned vector of 1 component. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec1, aligned_f64vec1, 8); + + /// Double-qualifier floating-point aligned vector of 2 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec2, aligned_f64vec2, 16); + + /// Double-qualifier floating-point aligned vector of 3 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec3, aligned_f64vec3, 32); + + /// Double-qualifier floating-point aligned vector of 4 components. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64vec4, aligned_f64vec4, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + ////////////////////// + // Float matrix types + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1 mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat2, aligned_mat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat3, aligned_mat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat4, aligned_mat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat2x2, aligned_mat2x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat3x3, aligned_mat3x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(mat4x4, aligned_mat4x4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 fmat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f32 fmat1x1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2x2, 16); + + /// Single-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x3, aligned_fmat2x3, 16); + + /// Single-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat2x4, aligned_fmat2x4, 16); + + /// Single-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x2, aligned_fmat3x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3x3, 16); + + /// Single-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat3x4, aligned_fmat3x4, 16); + + /// Single-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x2, aligned_fmat4x2, 16); + + /// Single-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x3, aligned_fmat4x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4x4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 f32mat1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4, 16); + + + /// Single-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f32 f32mat1x1; + + /// Single-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2x2, 16); + + /// Single-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x3, aligned_f32mat2x3, 16); + + /// Single-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat2x4, aligned_f32mat2x4, 16); + + /// Single-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x2, aligned_f32mat3x2, 16); + + /// Single-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3x3, 16); + + /// Single-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat3x4, aligned_f32mat3x4, 16); + + /// Single-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x2, aligned_f32mat4x2, 16); + + /// Single-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x3, aligned_f32mat4x3, 16); + + /// Single-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4x4, 16); + + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef detail::tmat1x1 f64mat1; + + /// Double-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2, 32); + + /// Double-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3, 32); + + /// Double-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4, 32); + + + /// Double-qualifier floating-point aligned 1x1 matrix. + /// @see gtx_type_aligned + //typedef f64 f64mat1x1; + + /// Double-qualifier floating-point aligned 2x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2x2, 32); + + /// Double-qualifier floating-point aligned 2x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x3, aligned_f64mat2x3, 32); + + /// Double-qualifier floating-point aligned 2x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat2x4, aligned_f64mat2x4, 32); + + /// Double-qualifier floating-point aligned 3x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x2, aligned_f64mat3x2, 32); + + /// Double-qualifier floating-point aligned 3x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3x3, 32); + + /// Double-qualifier floating-point aligned 3x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat3x4, aligned_f64mat3x4, 32); + + /// Double-qualifier floating-point aligned 4x2 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x2, aligned_f64mat4x2, 32); + + /// Double-qualifier floating-point aligned 4x3 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x3, aligned_f64mat4x3, 32); + + /// Double-qualifier floating-point aligned 4x4 matrix. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4x4, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + + ////////////////////////// + // Quaternion types + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(quat, aligned_quat, 16); + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(quat, aligned_fquat, 16); + + /// Double-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(dquat, aligned_dquat, 32); + + /// Single-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f32quat, aligned_f32quat, 16); + +# ifndef GLM_FORCE_SINGLE_ONLY + + /// Double-qualifier floating-point aligned quaternion. + /// @see gtx_type_aligned + GLM_ALIGNED_TYPEDEF(f64quat, aligned_f64quat, 32); + +# endif//GLM_FORCE_SINGLE_ONLY + + /// @} +}//namespace glm + +#include "type_aligned.inl" diff --git a/includes/glm/gtx/type_aligned.inl b/includes/glm/gtx/type_aligned.inl new file mode 100644 index 0000000..54c1b81 --- /dev/null +++ b/includes/glm/gtx/type_aligned.inl @@ -0,0 +1,6 @@ +/// @ref gtc_type_aligned + +namespace glm +{ + +} diff --git a/includes/glm/gtx/type_trait.hpp b/includes/glm/gtx/type_trait.hpp new file mode 100644 index 0000000..17ddbad --- /dev/null +++ b/includes/glm/gtx/type_trait.hpp @@ -0,0 +1,83 @@ +/// @ref gtx_type_trait +/// @file glm/gtx/type_trait.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_type_trait GLM_GTX_type_trait +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Defines traits for each type. + +#pragma once + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_type_trait is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_type_trait extension included") +#endif + +// Dependency: +#include "../detail/qualifier.hpp" +#include "../gtc/quaternion.hpp" +#include "../gtx/dual_quaternion.hpp" + +namespace glm +{ + /// @addtogroup gtx_type_trait + /// @{ + + template + struct type + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = false; + static length_t const components = 0; + static length_t const cols = 0; + static length_t const rows = 0; + }; + + template + struct type > + { + static bool const is_vec = true; + static bool const is_mat = false; + static bool const is_quat = false; + static length_t const components = L; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = true; + static bool const is_quat = false; + static length_t const components = C; + static length_t const cols = C; + static length_t const rows = R; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = true; + static length_t const components = 4; + }; + + template + struct type > + { + static bool const is_vec = false; + static bool const is_mat = false; + static bool const is_quat = true; + static length_t const components = 8; + }; + + /// @} +}//namespace glm + +#include "type_trait.inl" diff --git a/includes/glm/gtx/type_trait.inl b/includes/glm/gtx/type_trait.inl new file mode 100644 index 0000000..045de95 --- /dev/null +++ b/includes/glm/gtx/type_trait.inl @@ -0,0 +1,61 @@ +/// @ref gtx_type_trait + +namespace glm +{ + template + bool const type::is_vec; + template + bool const type::is_mat; + template + bool const type::is_quat; + template + length_t const type::components; + template + length_t const type::cols; + template + length_t const type::rows; + + // vec + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + + // mat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + template + length_t const type >::cols; + template + length_t const type >::rows; + + // tquat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; + + // tdualquat + template + bool const type >::is_vec; + template + bool const type >::is_mat; + template + bool const type >::is_quat; + template + length_t const type >::components; +}//namespace glm diff --git a/includes/glm/gtx/vec_swizzle.hpp b/includes/glm/gtx/vec_swizzle.hpp new file mode 100644 index 0000000..bce96e8 --- /dev/null +++ b/includes/glm/gtx/vec_swizzle.hpp @@ -0,0 +1,2769 @@ +/// @ref gtx_vec_swizzle +/// @file glm/gtx/vec_swizzle.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_vec_swizzle GLM_GTX_vec_swizzle +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Functions to perform swizzle operation. + +#pragma once + +#include "../glm.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_vec_swizzle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_vec_swizzle extension included") +#endif + +namespace glm { + /// @addtogroup gtx_vec_swizzle + /// @{ + + // xx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<1, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.x); + } + + // xy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.y); + } + + // xz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.z); + } + + // xw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> xw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.x, v.w); + } + + // yx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.x); + } + + // yy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<2, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.y); + } + + // yz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.z); + } + + // yw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> yw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.y, v.w); + } + + // zx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zx(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.x); + } + + // zy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zy(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.y); + } + + // zz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zz(const glm::vec<3, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.z); + } + + // zw + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> zw(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.z, v.w); + } + + // wx + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wx(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.x); + } + + // wy + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wy(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.y); + } + + // wz + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> wz(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.z); + } + + // ww + template + GLM_FUNC_QUALIFIER glm::vec<2, T, Q> ww(const glm::vec<4, T, Q> &v) { + return glm::vec<2, T, Q>(v.w, v.w); + } + + // xxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<1, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.x); + } + + // xxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.y); + } + + // xxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.z); + } + + // xxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.x, v.w); + } + + // xyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.x); + } + + // xyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.y); + } + + // xyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.z); + } + + // xyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.y, v.w); + } + + // xzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.x); + } + + // xzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.y); + } + + // xzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.z); + } + + // xzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.z, v.w); + } + + // xwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.x); + } + + // xwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.y); + } + + // xwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.z); + } + + // xww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> xww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.x, v.w, v.w); + } + + // yxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.x); + } + + // yxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.y); + } + + // yxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.z); + } + + // yxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.x, v.w); + } + + // yyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.x); + } + + // yyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<2, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.y); + } + + // yyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.z); + } + + // yyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.y, v.w); + } + + // yzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.x); + } + + // yzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.y); + } + + // yzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.z); + } + + // yzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.z, v.w); + } + + // ywx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.x); + } + + // ywy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.y); + } + + // ywz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> ywz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.z); + } + + // yww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> yww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.y, v.w, v.w); + } + + // zxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.x); + } + + // zxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.y); + } + + // zxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.z); + } + + // zxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.x, v.w); + } + + // zyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.x); + } + + // zyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.y); + } + + // zyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.z); + } + + // zyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.y, v.w); + } + + // zzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzx(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.x); + } + + // zzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzy(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.y); + } + + // zzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzz(const glm::vec<3, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.z); + } + + // zzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.z, v.w); + } + + // zwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.x); + } + + // zwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.y); + } + + // zwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.z); + } + + // zww + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> zww(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.z, v.w, v.w); + } + + // wxx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.x); + } + + // wxy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.y); + } + + // wxz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.z); + } + + // wxw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wxw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.x, v.w); + } + + // wyx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.x); + } + + // wyy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.y); + } + + // wyz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.z); + } + + // wyw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wyw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.y, v.w); + } + + // wzx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.x); + } + + // wzy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.y); + } + + // wzz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.z); + } + + // wzw + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wzw(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.z, v.w); + } + + // wwx + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwx(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.x); + } + + // wwy + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwy(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.y); + } + + // wwz + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> wwz(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.z); + } + + // www + template + GLM_FUNC_QUALIFIER glm::vec<3, T, Q> www(const glm::vec<4, T, Q> &v) { + return glm::vec<3, T, Q>(v.w, v.w, v.w); + } + + // xxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<1, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); + } + + // xxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); + } + + // xxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); + } + + // xxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.x, v.w); + } + + // xxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); + } + + // xxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); + } + + // xxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); + } + + // xxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.y, v.w); + } + + // xxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); + } + + // xxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); + } + + // xxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); + } + + // xxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.z, v.w); + } + + // xxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.x); + } + + // xxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.y); + } + + // xxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.z); + } + + // xxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.x, v.w, v.w); + } + + // xyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); + } + + // xyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); + } + + // xyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); + } + + // xyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.x, v.w); + } + + // xyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); + } + + // xyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); + } + + // xyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); + } + + // xyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.y, v.w); + } + + // xyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); + } + + // xyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); + } + + + // xyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.z, v.w); + } + + // xywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.x); + } + + // xywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.y); + } + + // xywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.z); + } + + // xyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.y, v.w, v.w); + } + + // xzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); + } + + // xzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); + } + + // xzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); + } + + // xzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.x, v.w); + } + + // xzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); + } + + // xzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); + } + + // xzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); + } + + // xzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.y, v.w); + } + + // xzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); + } + + // xzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); + } + + // xzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); + } + + // xzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.z, v.w); + } + + // xzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.x); + } + + // xzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.y); + } + + // xzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.z); + } + + // xzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.z, v.w, v.w); + } + + // xwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.x); + } + + // xwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.y); + } + + // xwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.z); + } + + // xwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.x, v.w); + } + + // xwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.x); + } + + // xwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.y); + } + + // xwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.z); + } + + // xwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.y, v.w); + } + + // xwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.x); + } + + // xwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.y); + } + + // xwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.z); + } + + // xwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.z, v.w); + } + + // xwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.x); + } + + // xwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.y); + } + + // xwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.z); + } + + // xwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> xwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.x, v.w, v.w, v.w); + } + + // yxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); + } + + // yxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); + } + + // yxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); + } + + // yxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.x, v.w); + } + + // yxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); + } + + // yxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); + } + + // yxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); + } + + // yxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.y, v.w); + } + + // yxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); + } + + // yxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); + } + + // yxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); + } + + // yxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.z, v.w); + } + + // yxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.x); + } + + // yxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.y); + } + + // yxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.z); + } + + // yxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.x, v.w, v.w); + } + + // yyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); + } + + // yyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); + } + + // yyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); + } + + // yyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.x, v.w); + } + + // yyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); + } + + // yyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<2, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); + } + + // yyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); + } + + // yyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.y, v.w); + } + + // yyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); + } + + // yyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); + } + + // yyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); + } + + // yyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.z, v.w); + } + + // yywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.x); + } + + // yywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.y); + } + + // yywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.z); + } + + // yyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.y, v.w, v.w); + } + + // yzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); + } + + // yzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); + } + + // yzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); + } + + // yzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.x, v.w); + } + + // yzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); + } + + // yzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); + } + + // yzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); + } + + // yzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.y, v.w); + } + + // yzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); + } + + // yzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); + } + + // yzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); + } + + // yzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.z, v.w); + } + + // yzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.x); + } + + // yzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.y); + } + + // yzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.z); + } + + // yzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> yzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.z, v.w, v.w); + } + + // ywxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.x); + } + + // ywxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.y); + } + + // ywxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.z); + } + + // ywxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.x, v.w); + } + + // ywyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.x); + } + + // ywyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.y); + } + + // ywyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.z); + } + + // ywyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.y, v.w); + } + + // ywzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.x); + } + + // ywzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.y); + } + + // ywzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.z); + } + + // ywzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.z, v.w); + } + + // ywwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.x); + } + + // ywwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.y); + } + + // ywwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.z); + } + + // ywww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> ywww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.y, v.w, v.w, v.w); + } + + // zxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); + } + + // zxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); + } + + // zxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); + } + + // zxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.x, v.w); + } + + // zxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); + } + + // zxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); + } + + // zxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); + } + + // zxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.y, v.w); + } + + // zxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); + } + + // zxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); + } + + // zxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); + } + + // zxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.z, v.w); + } + + // zxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.x); + } + + // zxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.y); + } + + // zxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.z); + } + + // zxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.x, v.w, v.w); + } + + // zyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); + } + + // zyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); + } + + // zyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); + } + + // zyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.x, v.w); + } + + // zyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); + } + + // zyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); + } + + // zyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); + } + + // zyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.y, v.w); + } + + // zyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); + } + + // zyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); + } + + // zyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); + } + + // zyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.z, v.w); + } + + // zywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.x); + } + + // zywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.y); + } + + // zywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.z); + } + + // zyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.y, v.w, v.w); + } + + // zzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); + } + + // zzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); + } + + // zzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); + } + + // zzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.x, v.w); + } + + // zzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); + } + + // zzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); + } + + // zzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); + } + + // zzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.y, v.w); + } + + // zzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzx(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); + } + + // zzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzy(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); + } + + // zzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzz(const glm::vec<3, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); + } + + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); + } + + // zzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.z, v.w); + } + + // zzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.x); + } + + // zzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.y); + } + + // zzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.z); + } + + // zzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.z, v.w, v.w); + } + + // zwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.x); + } + + // zwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.y); + } + + // zwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.z); + } + + // zwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.x, v.w); + } + + // zwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.x); + } + + // zwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.y); + } + + // zwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.z); + } + + // zwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.y, v.w); + } + + // zwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.x); + } + + // zwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.y); + } + + // zwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.z); + } + + // zwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.z, v.w); + } + + // zwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.x); + } + + // zwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.y); + } + + // zwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.z); + } + + // zwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> zwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.z, v.w, v.w, v.w); + } + + // wxxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.x); + } + + // wxxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.y); + } + + // wxxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.z); + } + + // wxxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.x, v.w); + } + + // wxyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.x); + } + + // wxyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.y); + } + + // wxyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.z); + } + + // wxyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.y, v.w); + } + + // wxzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.x); + } + + // wxzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.y); + } + + // wxzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.z); + } + + // wxzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.z, v.w); + } + + // wxwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.x); + } + + // wxwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.y); + } + + // wxwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.z); + } + + // wxww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wxww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.x, v.w, v.w); + } + + // wyxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.x); + } + + // wyxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.y); + } + + // wyxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.z); + } + + // wyxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.x, v.w); + } + + // wyyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.x); + } + + // wyyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.y); + } + + // wyyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.z); + } + + // wyyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.y, v.w); + } + + // wyzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.x); + } + + // wyzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.y); + } + + // wyzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.z); + } + + // wyzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.z, v.w); + } + + // wywx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.x); + } + + // wywy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.y); + } + + // wywz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wywz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.z); + } + + // wyww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wyww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.y, v.w, v.w); + } + + // wzxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.x); + } + + // wzxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.y); + } + + // wzxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.z); + } + + // wzxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.x, v.w); + } + + // wzyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.x); + } + + // wzyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.y); + } + + // wzyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.z); + } + + // wzyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.y, v.w); + } + + // wzzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.x); + } + + // wzzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.y); + } + + // wzzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.z); + } + + // wzzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.z, v.w); + } + + // wzwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.x); + } + + // wzwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.y); + } + + // wzwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.z); + } + + // wzww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wzww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.z, v.w, v.w); + } + + // wwxx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.x); + } + + // wwxy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.y); + } + + // wwxz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.z); + } + + // wwxw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwxw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.x, v.w); + } + + // wwyx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.x); + } + + // wwyy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.y); + } + + // wwyz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.z); + } + + // wwyw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwyw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.y, v.w); + } + + // wwzx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.x); + } + + // wwzy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.y); + } + + // wwzz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.z); + } + + // wwzw + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwzw(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.z, v.w); + } + + // wwwx + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwx(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.x); + } + + // wwwy + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwy(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.y); + } + + // wwwz + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwwz(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.z); + } + + // wwww + template + GLM_FUNC_QUALIFIER glm::vec<4, T, Q> wwww(const glm::vec<4, T, Q> &v) { + return glm::vec<4, T, Q>(v.w, v.w, v.w, v.w); + } + + /// @} +}//namespace glm diff --git a/includes/glm/gtx/vector_angle.hpp b/includes/glm/gtx/vector_angle.hpp new file mode 100644 index 0000000..9ff4127 --- /dev/null +++ b/includes/glm/gtx/vector_angle.hpp @@ -0,0 +1,55 @@ +/// @ref gtx_vector_angle +/// @file glm/gtx/vector_angle.hpp +/// +/// @see core (dependence) +/// @see gtx_quaternion (dependence) +/// @see gtx_epsilon (dependence) +/// +/// @defgroup gtx_vector_angle GLM_GTX_vector_angle +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Compute angle between vectors + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../gtc/epsilon.hpp" +#include "../gtx/quaternion.hpp" +#include "../gtx/rotate_vector.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_vector_angle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_vector_angle extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_vector_angle + /// @{ + + //! Returns the absolute angle between two vectors. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T angle(vec const& x, vec const& y); + + //! Returns the oriented angle between two 2d vectors. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y); + + //! Returns the oriented angle between two 3d vectors based from a reference axis. + //! Parameters need to be normalized. + /// @see gtx_vector_angle extension. + template + GLM_FUNC_DECL T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref); + + /// @} +}// namespace glm + +#include "vector_angle.inl" diff --git a/includes/glm/gtx/vector_angle.inl b/includes/glm/gtx/vector_angle.inl new file mode 100644 index 0000000..11e1a21 --- /dev/null +++ b/includes/glm/gtx/vector_angle.inl @@ -0,0 +1,45 @@ +/// @ref gtx_vector_angle + +namespace glm +{ + template + GLM_FUNC_QUALIFIER genType angle + ( + genType const& x, + genType const& y + ) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); + return acos(clamp(dot(x, y), genType(-1), genType(1))); + } + + template + GLM_FUNC_QUALIFIER T angle(vec const& x, vec const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'angle' only accept floating-point inputs"); + return acos(clamp(dot(x, y), T(-1), T(1))); + } + + template + GLM_FUNC_QUALIFIER T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'orientedAngle' only accept floating-point inputs"); + T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); + + T const partialCross = x.x * y.y - y.x * x.y; + + if (partialCross > T(0)) + return Angle; + else + return -Angle; + } + + template + GLM_FUNC_QUALIFIER T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_FLOAT, "'orientedAngle' only accept floating-point inputs"); + + T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); + return mix(Angle, -Angle, dot(ref, cross(x, y)) < T(0)); + } +}//namespace glm diff --git a/includes/glm/gtx/vector_query.hpp b/includes/glm/gtx/vector_query.hpp new file mode 100644 index 0000000..ab52df0 --- /dev/null +++ b/includes/glm/gtx/vector_query.hpp @@ -0,0 +1,64 @@ +/// @ref gtx_vector_query +/// @file glm/gtx/vector_query.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_vector_query GLM_GTX_vector_query +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Query information of vector types + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include +#include + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_vector_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_vector_query extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_vector_query + /// @{ + + //! Check whether two vectors are collinears. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areCollinear(vec const& v0, vec const& v1, T const& epsilon); + + //! Check whether two vectors are orthogonals. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon); + + //! Check whether a vector is normalized. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool isNormalized(vec const& v, T const& epsilon); + + //! Check whether a vector is null. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool isNull(vec const& v, T const& epsilon); + + //! Check whether a each component of a vector is null. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL vec isCompNull(vec const& v, T const& epsilon); + + //! Check whether two vectors are orthonormal. + /// @see gtx_vector_query extensions. + template + GLM_FUNC_DECL bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon); + + /// @} +}// namespace glm + +#include "vector_query.inl" diff --git a/includes/glm/gtx/vector_query.inl b/includes/glm/gtx/vector_query.inl new file mode 100644 index 0000000..d1a5c9b --- /dev/null +++ b/includes/glm/gtx/vector_query.inl @@ -0,0 +1,154 @@ +/// @ref gtx_vector_query + +#include + +namespace glm{ +namespace detail +{ + template + struct compute_areCollinear{}; + + template + struct compute_areCollinear<2, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<2, T, Q> const& v0, vec<2, T, Q> const& v1, T const& epsilon) + { + return length(cross(vec<3, T, Q>(v0, static_cast(0)), vec<3, T, Q>(v1, static_cast(0)))) < epsilon; + } + }; + + template + struct compute_areCollinear<3, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, T const& epsilon) + { + return length(cross(v0, v1)) < epsilon; + } + }; + + template + struct compute_areCollinear<4, T, Q> + { + GLM_FUNC_QUALIFIER static bool call(vec<4, T, Q> const& v0, vec<4, T, Q> const& v1, T const& epsilon) + { + return length(cross(vec<3, T, Q>(v0), vec<3, T, Q>(v1))) < epsilon; + } + }; + + template + struct compute_isCompNull{}; + + template + struct compute_isCompNull<2, T, Q> + { + GLM_FUNC_QUALIFIER static vec<2, bool, Q> call(vec<2, T, Q> const& v, T const& epsilon) + { + return vec<2, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon)); + } + }; + + template + struct compute_isCompNull<3, T, Q> + { + GLM_FUNC_QUALIFIER static vec<3, bool, Q> call(vec<3, T, Q> const& v, T const& epsilon) + { + return vec<3, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon), + (abs(v.z) < epsilon)); + } + }; + + template + struct compute_isCompNull<4, T, Q> + { + GLM_FUNC_QUALIFIER static vec<4, bool, Q> call(vec<4, T, Q> const& v, T const& epsilon) + { + return vec<4, bool, Q>( + (abs(v.x) < epsilon), + (abs(v.y) < epsilon), + (abs(v.z) < epsilon), + (abs(v.w) < epsilon)); + } + }; + +}//namespace detail + + template + GLM_FUNC_QUALIFIER bool areCollinear(vec const& v0, vec const& v1, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areCollinear' only accept floating-point inputs"); + + return detail::compute_areCollinear::call(v0, v1, epsilon); + } + + template + GLM_FUNC_QUALIFIER bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areOrthogonal' only accept floating-point inputs"); + + return abs(dot(v0, v1)) <= max( + static_cast(1), + length(v0)) * max(static_cast(1), length(v1)) * epsilon; + } + + template + GLM_FUNC_QUALIFIER bool isNormalized(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNormalized' only accept floating-point inputs"); + + return abs(length(v) - static_cast(1)) <= static_cast(2) * epsilon; + } + + template + GLM_FUNC_QUALIFIER bool isNull(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNull' only accept floating-point inputs"); + + return length(v) <= epsilon; + } + + template + GLM_FUNC_QUALIFIER vec isCompNull(vec const& v, T const& epsilon) + { + GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isCompNull' only accept floating-point inputs"); + + return detail::compute_isCompNull::call(v, epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<2, bool, Q> isCompNull(vec<2, T, Q> const& v, T const& epsilon) + { + return vec<2, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<3, bool, Q> isCompNull(vec<3, T, Q> const& v, T const& epsilon) + { + return vec<3, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon, + abs(v.z) < epsilon); + } + + template + GLM_FUNC_QUALIFIER vec<4, bool, Q> isCompNull(vec<4, T, Q> const& v, T const& epsilon) + { + return vec<4, bool, Q>( + abs(v.x) < epsilon, + abs(v.y) < epsilon, + abs(v.z) < epsilon, + abs(v.w) < epsilon); + } + + template + GLM_FUNC_QUALIFIER bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon) + { + return isNormalized(v0, epsilon) && isNormalized(v1, epsilon) && (abs(dot(v0, v1)) <= epsilon); + } + +}//namespace glm diff --git a/includes/glm/gtx/wrap.hpp b/includes/glm/gtx/wrap.hpp new file mode 100644 index 0000000..b7ac5af --- /dev/null +++ b/includes/glm/gtx/wrap.hpp @@ -0,0 +1,35 @@ +/// @ref gtx_wrap +/// @file glm/gtx/wrap.hpp +/// +/// @see core (dependence) +/// +/// @defgroup gtx_wrap GLM_GTX_wrap +/// @ingroup gtx +/// +/// Include to use the features of this extension. +/// +/// Wrapping mode of texture coordinates. + +#pragma once + +// Dependency: +#include "../glm.hpp" +#include "../ext/scalar_common.hpp" +#include "../ext/vector_common.hpp" +#include "../gtc/vec1.hpp" + +#ifndef GLM_ENABLE_EXPERIMENTAL +# error "GLM: GLM_GTX_wrap is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." +#elif GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) +# pragma message("GLM: GLM_GTX_wrap extension included") +#endif + +namespace glm +{ + /// @addtogroup gtx_wrap + /// @{ + + /// @} +}// namespace glm + +#include "wrap.inl" diff --git a/includes/glm/gtx/wrap.inl b/includes/glm/gtx/wrap.inl new file mode 100644 index 0000000..4be3b4c --- /dev/null +++ b/includes/glm/gtx/wrap.inl @@ -0,0 +1,6 @@ +/// @ref gtx_wrap + +namespace glm +{ + +}//namespace glm diff --git a/includes/glm/integer.hpp b/includes/glm/integer.hpp new file mode 100644 index 0000000..36c67be --- /dev/null +++ b/includes/glm/integer.hpp @@ -0,0 +1,212 @@ +/// @ref core +/// @file glm/integer.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.8 Integer Functions +/// +/// @defgroup core_func_integer Integer functions +/// @ingroup core +/// +/// Provides GLSL functions on integer types +/// +/// These all operate component-wise. The description is per component. +/// The notation [a, b] means the set of bits from bit-number a through bit-number +/// b, inclusive. The lowest-order bit is bit 0. +/// +/// Include to use these core features. + +#pragma once + +#include "detail/qualifier.hpp" +#include "common.hpp" +#include "vector_relational.hpp" + +namespace glm +{ + /// @addtogroup core_func_integer + /// @{ + + /// Adds 32-bit unsigned integer x and y, returning the sum + /// modulo pow(2, 32). The value carry is set to 0 if the sum was + /// less than pow(2, 32), or to 1 otherwise. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL uaddCarry man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec uaddCarry( + vec const& x, + vec const& y, + vec & carry); + + /// Subtracts the 32-bit unsigned integer y from x, returning + /// the difference if non-negative, or pow(2, 32) plus the difference + /// otherwise. The value borrow is set to 0 if x >= y, or to 1 otherwise. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL usubBorrow man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec usubBorrow( + vec const& x, + vec const& y, + vec & borrow); + + /// Multiplies 32-bit integers x and y, producing a 64-bit + /// result. The 32 least-significant bits are returned in lsb. + /// The 32 most-significant bits are returned in msb. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL umulExtended man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DISCARD_DECL void umulExtended( + vec const& x, + vec const& y, + vec & msb, + vec & lsb); + + /// Multiplies 32-bit integers x and y, producing a 64-bit + /// result. The 32 least-significant bits are returned in lsb. + /// The 32 most-significant bits are returned in msb. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL imulExtended man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DISCARD_DECL void imulExtended( + vec const& x, + vec const& y, + vec & msb, + vec & lsb); + + /// Extracts bits [offset, offset + bits - 1] from value, + /// returning them in the least significant bits of the result. + /// For unsigned data types, the most significant bits of the + /// result will be set to zero. For signed data types, the + /// most significant bits will be set to the value of bit offset + base - 1. + /// + /// If bits is zero, the result will be zero. The result will be + /// undefined if offset or bits is negative, or if the sum of + /// offset and bits is greater than the number of bits used + /// to store the operand. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL bitfieldExtract man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldExtract( + vec const& Value, + int Offset, + int Bits); + + /// Returns the insertion the bits least-significant bits of insert into base. + /// + /// The result will have bits [offset, offset + bits - 1] taken + /// from bits [0, bits - 1] of insert, and all other bits taken + /// directly from the corresponding bits of base. If bits is + /// zero, the result will simply be base. The result will be + /// undefined if offset or bits is negative, or if the sum of + /// offset and bits is greater than the number of bits used to + /// store the operand. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitfieldInsert man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldInsert( + vec const& Base, + vec const& Insert, + int Offset, + int Bits); + + /// Returns the reversal of the bits of value. + /// The bit numbered n of the result will be taken from bit (bits - 1) - n of value, + /// where bits is the total number of bits used to represent value. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitfieldReverse man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitfieldReverse(vec const& v); + + /// Returns the number of bits set to 1 in the binary representation of value. + /// + /// @tparam genType Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitCount man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int bitCount(genType v); + + /// Returns the number of bits set to 1 in the binary representation of value. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar or vector types. + /// + /// @see GLSL bitCount man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec bitCount(vec const& v); + + /// Returns the bit number of the least significant bit set to + /// 1 in the binary representation of value. + /// If value is zero, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see GLSL findLSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int findLSB(genIUType x); + + /// Returns the bit number of the least significant bit set to + /// 1 in the binary representation of value. + /// If value is zero, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL findLSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec findLSB(vec const& v); + + /// Returns the bit number of the most significant bit in the binary representation of value. + /// For positive integers, the result will be the bit number of the most significant bit set to 1. + /// For negative integers, the result will be the bit number of the most significant + /// bit set to 0. For a value of zero or negative one, -1 will be returned. + /// + /// @tparam genIUType Signed or unsigned integer scalar types. + /// + /// @see GLSL findMSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL int findMSB(genIUType x); + + /// Returns the bit number of the most significant bit in the binary representation of value. + /// For positive integers, the result will be the bit number of the most significant bit set to 1. + /// For negative integers, the result will be the bit number of the most significant + /// bit set to 0. For a value of zero or negative one, -1 will be returned. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T Signed or unsigned integer scalar types. + /// + /// @see GLSL findMSB man page + /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions + template + GLM_FUNC_DECL vec findMSB(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_integer.inl" diff --git a/includes/glm/mat2x2.hpp b/includes/glm/mat2x2.hpp new file mode 100644 index 0000000..96bec96 --- /dev/null +++ b/includes/glm/mat2x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x2.hpp + +#pragma once +#include "./ext/matrix_double2x2.hpp" +#include "./ext/matrix_double2x2_precision.hpp" +#include "./ext/matrix_float2x2.hpp" +#include "./ext/matrix_float2x2_precision.hpp" + diff --git a/includes/glm/mat2x3.hpp b/includes/glm/mat2x3.hpp new file mode 100644 index 0000000..d68dc25 --- /dev/null +++ b/includes/glm/mat2x3.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x3.hpp + +#pragma once +#include "./ext/matrix_double2x3.hpp" +#include "./ext/matrix_double2x3_precision.hpp" +#include "./ext/matrix_float2x3.hpp" +#include "./ext/matrix_float2x3_precision.hpp" + diff --git a/includes/glm/mat2x4.hpp b/includes/glm/mat2x4.hpp new file mode 100644 index 0000000..b04b738 --- /dev/null +++ b/includes/glm/mat2x4.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat2x4.hpp + +#pragma once +#include "./ext/matrix_double2x4.hpp" +#include "./ext/matrix_double2x4_precision.hpp" +#include "./ext/matrix_float2x4.hpp" +#include "./ext/matrix_float2x4_precision.hpp" + diff --git a/includes/glm/mat3x2.hpp b/includes/glm/mat3x2.hpp new file mode 100644 index 0000000..c853153 --- /dev/null +++ b/includes/glm/mat3x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat3x2.hpp + +#pragma once +#include "./ext/matrix_double3x2.hpp" +#include "./ext/matrix_double3x2_precision.hpp" +#include "./ext/matrix_float3x2.hpp" +#include "./ext/matrix_float3x2_precision.hpp" + diff --git a/includes/glm/mat3x3.hpp b/includes/glm/mat3x3.hpp new file mode 100644 index 0000000..fd4fa31 --- /dev/null +++ b/includes/glm/mat3x3.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat3x3.hpp + +#pragma once +#include "./ext/matrix_double3x3.hpp" +#include "./ext/matrix_double3x3_precision.hpp" +#include "./ext/matrix_float3x3.hpp" +#include "./ext/matrix_float3x3_precision.hpp" diff --git a/includes/glm/mat3x4.hpp b/includes/glm/mat3x4.hpp new file mode 100644 index 0000000..6342bf5 --- /dev/null +++ b/includes/glm/mat3x4.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat3x4.hpp + +#pragma once +#include "./ext/matrix_double3x4.hpp" +#include "./ext/matrix_double3x4_precision.hpp" +#include "./ext/matrix_float3x4.hpp" +#include "./ext/matrix_float3x4_precision.hpp" diff --git a/includes/glm/mat4x2.hpp b/includes/glm/mat4x2.hpp new file mode 100644 index 0000000..e013e46 --- /dev/null +++ b/includes/glm/mat4x2.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat4x2.hpp + +#pragma once +#include "./ext/matrix_double4x2.hpp" +#include "./ext/matrix_double4x2_precision.hpp" +#include "./ext/matrix_float4x2.hpp" +#include "./ext/matrix_float4x2_precision.hpp" + diff --git a/includes/glm/mat4x3.hpp b/includes/glm/mat4x3.hpp new file mode 100644 index 0000000..205725a --- /dev/null +++ b/includes/glm/mat4x3.hpp @@ -0,0 +1,8 @@ +/// @ref core +/// @file glm/mat4x3.hpp + +#pragma once +#include "./ext/matrix_double4x3.hpp" +#include "./ext/matrix_double4x3_precision.hpp" +#include "./ext/matrix_float4x3.hpp" +#include "./ext/matrix_float4x3_precision.hpp" diff --git a/includes/glm/mat4x4.hpp b/includes/glm/mat4x4.hpp new file mode 100644 index 0000000..3515f7f --- /dev/null +++ b/includes/glm/mat4x4.hpp @@ -0,0 +1,9 @@ +/// @ref core +/// @file glm/mat4x4.hpp + +#pragma once +#include "./ext/matrix_double4x4.hpp" +#include "./ext/matrix_double4x4_precision.hpp" +#include "./ext/matrix_float4x4.hpp" +#include "./ext/matrix_float4x4_precision.hpp" + diff --git a/includes/glm/matrix.hpp b/includes/glm/matrix.hpp new file mode 100644 index 0000000..4584c92 --- /dev/null +++ b/includes/glm/matrix.hpp @@ -0,0 +1,161 @@ +/// @ref core +/// @file glm/matrix.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions +/// +/// @defgroup core_func_matrix Matrix functions +/// @ingroup core +/// +/// Provides GLSL matrix functions. +/// +/// Include to use these core features. + +#pragma once + +// Dependencies +#include "detail/qualifier.hpp" +#include "detail/setup.hpp" +#include "vec2.hpp" +#include "vec3.hpp" +#include "vec4.hpp" +#include "mat2x2.hpp" +#include "mat2x3.hpp" +#include "mat2x4.hpp" +#include "mat3x2.hpp" +#include "mat3x3.hpp" +#include "mat3x4.hpp" +#include "mat4x2.hpp" +#include "mat4x3.hpp" +#include "mat4x4.hpp" + +namespace glm { +namespace detail +{ + template + struct outerProduct_trait{}; + + template + struct outerProduct_trait<2, 2, T, Q> + { + typedef mat<2, 2, T, Q> type; + }; + + template + struct outerProduct_trait<2, 3, T, Q> + { + typedef mat<3, 2, T, Q> type; + }; + + template + struct outerProduct_trait<2, 4, T, Q> + { + typedef mat<4, 2, T, Q> type; + }; + + template + struct outerProduct_trait<3, 2, T, Q> + { + typedef mat<2, 3, T, Q> type; + }; + + template + struct outerProduct_trait<3, 3, T, Q> + { + typedef mat<3, 3, T, Q> type; + }; + + template + struct outerProduct_trait<3, 4, T, Q> + { + typedef mat<4, 3, T, Q> type; + }; + + template + struct outerProduct_trait<4, 2, T, Q> + { + typedef mat<2, 4, T, Q> type; + }; + + template + struct outerProduct_trait<4, 3, T, Q> + { + typedef mat<3, 4, T, Q> type; + }; + + template + struct outerProduct_trait<4, 4, T, Q> + { + typedef mat<4, 4, T, Q> type; + }; +}//namespace detail + + /// @addtogroup core_func_matrix + /// @{ + + /// Multiply matrix x by matrix y component-wise, i.e., + /// result[i][j] is the scalar product of x[i][j] and y[i][j]. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL matrixCompMult man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); + + /// Treats the first parameter c as a column vector + /// and the second parameter r as a row vector + /// and does a linear algebraic matrix multiply c * r. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL outerProduct man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); + + /// Returns the transposed matrix of x + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL transpose man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); + + /// Return the determinant of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL determinant man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL T determinant(mat const& m); + + /// Return the inverse of a squared matrix. + /// + /// @tparam C Integer between 1 and 4 included that qualify the number a column + /// @tparam R Integer between 1 and 4 included that qualify the number a row + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL inverse man page + /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions + template + GLM_FUNC_DECL mat inverse(mat const& m); + + /// @} +}//namespace glm + +#include "detail/func_matrix.inl" diff --git a/includes/glm/packing.hpp b/includes/glm/packing.hpp new file mode 100644 index 0000000..ca83ac1 --- /dev/null +++ b/includes/glm/packing.hpp @@ -0,0 +1,173 @@ +/// @ref core +/// @file glm/packing.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions +/// @see gtc_packing +/// +/// @defgroup core_func_packing Floating-Point Pack and Unpack Functions +/// @ingroup core +/// +/// Provides GLSL functions to pack and unpack half, single and double-precision floating point values into more compact integer types. +/// +/// These functions do not operate component-wise, rather as described in each case. +/// +/// Include to use these core features. + +#pragma once + +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float4.hpp" + +namespace glm +{ + /// @addtogroup core_func_packing + /// @{ + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm2x16: round(clamp(c, 0, +1) * 65535.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packUnorm2x16(vec2 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm2x16: round(clamp(v, -1, +1) * 32767.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packSnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packSnorm2x16(vec2 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packUnorm4x8: round(clamp(c, 0, +1) * 255.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packUnorm4x8(vec4 const& v); + + /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. + /// Then, the results are packed into the returned 32-bit unsigned integer. + /// + /// The conversion for component c of v to fixed point is done as follows: + /// packSnorm4x8: round(clamp(c, -1, +1) * 127.0) + /// + /// The first component of the vector will be written to the least significant bits of the output; + /// the last component will be written to the most significant bits. + /// + /// @see GLSL packSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packSnorm4x8(vec4 const& v); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm2x16: f / 65535.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackUnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackUnorm2x16(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm2x16: clamp(f / 32767.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackSnorm2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackSnorm2x16(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackUnorm4x8: f / 255.0 + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackUnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackUnorm4x8(uint p); + + /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. + /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. + /// + /// The conversion for unpacked fixed-point value f to floating point is done as follows: + /// unpackSnorm4x8: clamp(f / 127.0, -1, +1) + /// + /// The first component of the returned vector will be extracted from the least significant bits of the input; + /// the last component will be extracted from the most significant bits. + /// + /// @see GLSL unpackSnorm4x8 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec4 unpackSnorm4x8(uint p); + + /// Returns a double-qualifier value obtained by packing the components of v into a 64-bit value. + /// If an IEEE 754 Inf or NaN is created, it will not signal, and the resulting floating point value is unspecified. + /// Otherwise, the bit- level representation of v is preserved. + /// The first vector component specifies the 32 least significant bits; + /// the second component specifies the 32 most significant bits. + /// + /// @see GLSL packDouble2x32 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL double packDouble2x32(uvec2 const& v); + + /// Returns a two-component unsigned integer vector representation of v. + /// The bit-level representation of v is preserved. + /// The first component of the vector contains the 32 least significant bits of the double; + /// the second component consists the 32 most significant bits. + /// + /// @see GLSL unpackDouble2x32 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uvec2 unpackDouble2x32(double v); + + /// Returns an unsigned integer obtained by converting the components of a two-component floating-point vector + /// to the 16-bit floating-point representation found in the OpenGL Specification, + /// and then packing these two 16- bit integers into a 32-bit unsigned integer. + /// The first vector component specifies the 16 least-significant bits of the result; + /// the second component specifies the 16 most-significant bits. + /// + /// @see GLSL packHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL uint packHalf2x16(vec2 const& v); + + /// Returns a two-component floating-point vector with components obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit values, + /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, + /// and converting them to 32-bit floating-point values. + /// The first component of the vector is obtained from the 16 least-significant bits of v; + /// the second component is obtained from the 16 most-significant bits of v. + /// + /// @see GLSL unpackHalf2x16 man page + /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions + GLM_FUNC_DECL vec2 unpackHalf2x16(uint v); + + /// @} +}//namespace glm + +#include "detail/func_packing.inl" diff --git a/includes/glm/simd/common.h b/includes/glm/simd/common.h new file mode 100644 index 0000000..c11338a --- /dev/null +++ b/includes/glm/simd/common.h @@ -0,0 +1,249 @@ +/// @ref simd +/// @file glm/simd/common.h + +#pragma once + +#include "platform.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_add_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_add_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_sub_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_sub_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_mul_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_mul_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_div_ps(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b) +{ + return _mm_div_ss(a, b); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b) +{ + return glm_vec4_mul(a, _mm_rcp_ps(b)); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a) +{ +# if GLM_ARCH & GLM_ARCH_AVX2_BIT + return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0)); +# else + return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) +{ +# ifdef GLM_FORCE_FMA + return _mm_fmadd_ss(a, b, c); +# else + return _mm_add_ss(_mm_mul_ss(a, b), c); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) +{ +# ifdef GLM_FORCE_FMA + return _mm_fmadd_ps(a, b, c); +# else + return glm_vec4_add(glm_vec4_mul(a, b), c); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4d_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) +{ +# ifdef GLM_FORCE_FMA + return _mm_fmadd_ps(a, b, c); +# else + return glm_vec4_add(glm_vec4_mul(a, b), c); +# endif +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x) +{ + return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); +} + +GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSSE3_BIT + return _mm_sign_epi32(x, x); +# else + glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); + glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); + glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); + return sub0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) +{ + glm_vec4 const zro0 = _mm_setzero_ps(); + glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); + glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); + glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); + glm_vec4 const or0 = _mm_or_ps(and0, and1); + return or0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); +# else + glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); + glm_vec4 const and0 = _mm_and_ps(sgn0, x); + glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); + glm_vec4 const add0 = glm_vec4_add(x, or0); + glm_vec4 const sub0 = glm_vec4_sub(add0, or0); + return sub0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_floor_ps(x); +# else + glm_vec4 const rnd0 = glm_vec4_round(x); + glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); + glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); + return sub0; +# endif +} + +/* trunc TODO +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) +{ + return glm_vec4(); +} +*/ + +//roundEven +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) +{ + glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); + glm_vec4 const and0 = _mm_and_ps(sgn0, x); + glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); + glm_vec4 const add0 = glm_vec4_add(x, or0); + glm_vec4 const sub0 = glm_vec4_sub(add0, or0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) +{ +# if GLM_ARCH & GLM_ARCH_SSE41_BIT + return _mm_ceil_ps(x); +# else + glm_vec4 const rnd0 = glm_vec4_round(x); + glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); + glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); + glm_vec4 const add0 = glm_vec4_add(rnd0, and0); + return add0; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) +{ + glm_vec4 const flr0 = glm_vec4_floor(x); + glm_vec4 const sub0 = glm_vec4_sub(x, flr0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) +{ + glm_vec4 const div0 = glm_vec4_div(x, y); + glm_vec4 const flr0 = glm_vec4_floor(div0); + glm_vec4 const mul0 = glm_vec4_mul(y, flr0); + glm_vec4 const sub0 = glm_vec4_sub(x, mul0); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) +{ + glm_vec4 const min0 = _mm_min_ps(v, maxVal); + glm_vec4 const max0 = _mm_max_ps(min0, minVal); + return max0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) +{ + glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); + glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); + glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); + return mad0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) +{ + glm_vec4 const cmp = _mm_cmple_ps(x, edge); + return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) +{ + glm_vec4 const sub0 = glm_vec4_sub(x, edge0); + glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); + glm_vec4 const div0 = glm_vec4_div(sub0, sub1); + glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); + glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); + glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); + glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); + glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); + return mul2; +} + +// Agner Fog method +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) +{ + glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer + glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit + glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask + glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent + glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction + glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); + glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); + glm_ivec4 const And = _mm_and_si128(Equal, Nequal); + return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0 +} + +// Agner Fog method +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) +{ + glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer + glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit + return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0 +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/exponential.h b/includes/glm/simd/exponential.h new file mode 100644 index 0000000..bc351d0 --- /dev/null +++ b/includes/glm/simd/exponential.h @@ -0,0 +1,20 @@ +/// @ref simd +/// @file glm/simd/experimental.h + +#pragma once + +#include "platform.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x) +{ + return _mm_mul_ss(_mm_rsqrt_ss(x), x); +} + +GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x) +{ + return _mm_mul_ps(_mm_rsqrt_ps(x), x); +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/geometric.h b/includes/glm/simd/geometric.h new file mode 100644 index 0000000..afbe590 --- /dev/null +++ b/includes/glm/simd/geometric.h @@ -0,0 +1,130 @@ +/// @ref simd +/// @file glm/simd/geometric.h + +#pragma once + +#include "common.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2); +GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2); + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x) +{ + glm_vec4 const dot0 = glm_vec4_dot(x, x); + glm_vec4 const sqt0 = _mm_sqrt_ps(dot0); + return sqt0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1) +{ + glm_vec4 const sub0 = _mm_sub_ps(p0, p1); + glm_vec4 const len0 = glm_vec4_length(sub0); + return len0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2) +{ +# if GLM_ARCH & GLM_ARCH_AVX_BIT + return _mm_dp_ps(v1, v2, 0xff); +# elif GLM_ARCH & GLM_ARCH_SSE3_BIT + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0); + glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0); + return hadd1; +# else + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); + glm_vec4 const add0 = _mm_add_ps(mul0, swp0); + glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); + glm_vec4 const add1 = _mm_add_ps(add0, swp1); + return add1; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2) +{ +# if GLM_ARCH & GLM_ARCH_AVX_BIT + return _mm_dp_ps(v1, v2, 0xff); +# elif GLM_ARCH & GLM_ARCH_SSE3_BIT + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0); + glm_vec4 const had1 = _mm_hadd_ps(had0, had0); + return had1; +# else + glm_vec4 const mul0 = _mm_mul_ps(v1, v2); + glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0); + glm_vec4 const add0 = _mm_add_ps(mov0, mul0); + glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); + glm_vec4 const add1 = _mm_add_ss(add0, swp1); + return add1; +# endif +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2) +{ + glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); + glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); + glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); + glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); + glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3); + glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2); + glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1); + return sub0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v) +{ + glm_vec4 const dot0 = glm_vec4_dot(v, v); + glm_vec4 const isr0 = _mm_rsqrt_ps(dot0); + glm_vec4 const mul0 = _mm_mul_ps(v, isr0); + return mul0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref) +{ + glm_vec4 const dot0 = glm_vec4_dot(Nref, I); + glm_vec4 const sgn0 = glm_vec4_sign(dot0); + glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f)); + glm_vec4 const mul1 = _mm_mul_ps(N, mul0); + return mul1; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N) +{ + glm_vec4 const dot0 = glm_vec4_dot(N, I); + glm_vec4 const mul0 = _mm_mul_ps(N, dot0); + glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f)); + glm_vec4 const sub0 = _mm_sub_ps(I, mul1); + return sub0; +} + +GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta) +{ + // k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)); + // if (k < 0.0) + // R = genType(0.0); // or genDType(0.0) + // else + // R = eta * I - (eta * dot(N, I) + sqrt(k)) * N; + + glm_vec4 const dot0 = glm_vec4_dot(N, I); // dot(N, I) + glm_vec4 const mul0 = _mm_mul_ps(eta, eta); // eta * eta + glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0); // dot(N, I) * dot(N, I) + glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1); // (1.0 - dot(N, I) * dot(N, I)) + glm_vec4 const mul2 = _mm_mul_ps(mul0, sub1); // eta * eta * (1.0 - dot(N, I) * dot(N, I)) + glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul2); // 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) + + if(_mm_movemask_ps(_mm_cmplt_ss(sub0, _mm_set1_ps(0.0f))) == 0) + return _mm_set1_ps(0.0f); + + glm_vec4 const sqt0 = _mm_sqrt_ps(sub0); + glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0); + glm_vec4 const mul4 = _mm_mul_ps(mad0, N); + glm_vec4 const mul5 = _mm_mul_ps(eta, I); + glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4); + + return sub2; +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/integer.h b/includes/glm/simd/integer.h new file mode 100644 index 0000000..9381418 --- /dev/null +++ b/includes/glm/simd/integer.h @@ -0,0 +1,115 @@ +/// @ref simd +/// @file glm/simd/integer.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x) +{ + glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); + glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); + glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); + glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); + glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); + + glm_uvec4 Reg1; + glm_uvec4 Reg2; + + // REG1 = x; + // REG2 = y; + //Reg1 = _mm_unpacklo_epi64(x, y); + Reg1 = x; + + //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); + //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); + Reg2 = _mm_slli_si128(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask4); + + //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); + //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); + Reg2 = _mm_slli_si128(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask3); + + //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); + //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); + Reg2 = _mm_slli_epi32(Reg1, 4); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask2); + + //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); + //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); + Reg2 = _mm_slli_epi32(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask1); + + //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); + //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask0); + + //return REG1 | (REG2 << 1); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg2 = _mm_srli_si128(Reg2, 8); + Reg1 = _mm_or_si128(Reg1, Reg2); + + return Reg1; +} + +GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y) +{ + glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); + glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); + glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); + glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); + glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); + + glm_uvec4 Reg1; + glm_uvec4 Reg2; + + // REG1 = x; + // REG2 = y; + Reg1 = _mm_unpacklo_epi64(x, y); + + //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); + //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); + Reg2 = _mm_slli_si128(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask4); + + //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); + //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); + Reg2 = _mm_slli_si128(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask3); + + //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); + //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); + Reg2 = _mm_slli_epi32(Reg1, 4); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask2); + + //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); + //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); + Reg2 = _mm_slli_epi32(Reg1, 2); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask1); + + //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); + //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg1 = _mm_or_si128(Reg2, Reg1); + Reg1 = _mm_and_si128(Reg1, Mask0); + + //return REG1 | (REG2 << 1); + Reg2 = _mm_slli_epi32(Reg1, 1); + Reg2 = _mm_srli_si128(Reg2, 8); + Reg1 = _mm_or_si128(Reg1, Reg2); + + return Reg1; +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/matrix.h b/includes/glm/simd/matrix.h new file mode 100644 index 0000000..8f9461c --- /dev/null +++ b/includes/glm/simd/matrix.h @@ -0,0 +1,1040 @@ +/// @ref simd +/// @file glm/simd/matrix.h + +#pragma once + +#include "geometric.h" + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_mul_ps(in1[0], in2[0]); + out[1] = _mm_mul_ps(in1[1], in2[1]); + out[2] = _mm_mul_ps(in1[2], in2[2]); + out[3] = _mm_mul_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_add_ps(in1[0], in2[0]); + out[1] = _mm_add_ps(in1[1], in2[1]); + out[2] = _mm_add_ps(in1[2], in2[2]); + out[3] = _mm_add_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + out[0] = _mm_sub_ps(in1[0], in2[0]); + out[1] = _mm_sub_ps(in1[1], in2[1]); + out[2] = _mm_sub_ps(in1[2], in2[2]); + out[3] = _mm_sub_ps(in1[3], in2[3]); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v) +{ + __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(m[0], v0); + __m128 m1 = _mm_mul_ps(m[1], v1); + __m128 m2 = _mm_mul_ps(m[2], v2); + __m128 m3 = _mm_mul_ps(m[3], v3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + return a2; +} + +GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4]) +{ + __m128 i0 = m[0]; + __m128 i1 = m[1]; + __m128 i2 = m[2]; + __m128 i3 = m[3]; + + __m128 m0 = _mm_mul_ps(v, i0); + __m128 m1 = _mm_mul_ps(v, i1); + __m128 m2 = _mm_mul_ps(v, i2); + __m128 m3 = _mm_mul_ps(v, i3); + + __m128 u0 = _mm_unpacklo_ps(m0, m1); + __m128 u1 = _mm_unpackhi_ps(m0, m1); + __m128 a0 = _mm_add_ps(u0, u1); + + __m128 u2 = _mm_unpacklo_ps(m2, m3); + __m128 u3 = _mm_unpackhi_ps(m2, m3); + __m128 a1 = _mm_add_ps(u2, u3); + + __m128 f0 = _mm_movelh_ps(a0, a1); + __m128 f1 = _mm_movehl_ps(a1, a0); + __m128 f2 = _mm_add_ps(f0, f1); + + return f2; +} + +GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) +{ + { + __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[0] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[1] = a2; + } + + { + __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[2] = a2; + } + + { + //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) + __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 m0 = _mm_mul_ps(in1[0], e0); + __m128 m1 = _mm_mul_ps(in1[1], e1); + __m128 m2 = _mm_mul_ps(in1[2], e2); + __m128 m3 = _mm_mul_ps(in1[3], e3); + + __m128 a0 = _mm_add_ps(m0, m1); + __m128 a1 = _mm_add_ps(m2, m3); + __m128 a2 = _mm_add_ps(a0, a1); + + out[3] = a2; + } +} + +GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); + __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); + __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); + __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); + + out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); + out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); + out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); + out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); +} + +GLM_FUNC_QUALIFIER void glm_mat3_transpose(glm_vec4 const in[3], glm_vec4 out[3]) +{ + __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); + __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); + __m128 tmp1 = _mm_shuffle_ps(in[2], in[2], 0x44); + __m128 tmp3 = _mm_shuffle_ps(in[2], in[2], 0xEE); + + out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); + out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); + out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + return Det0; +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4]) +{ + // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( + + //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + // First 2 columns + __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2))); + __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3))); + __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); + + // Second 2 columns + __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3))); + __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2))); + __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); + + // Columns subtraction + __m128 SubE = _mm_sub_ps(MulA, MulB); + + // Last 2 rows + __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2))); + __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0))); + __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); + __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); + + //vec<4, T, Q> DetCof( + // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0))); + __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1))); + __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); + + __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); + __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1]; + __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2))); + __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); + + __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); + + __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); + __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0))); + __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3))); + __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); + + __m128 AddRes = _mm_add_ps(SubRes, MulFacC); + __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); + + //return m[0][0] * DetCof[0] + // + m[0][1] * DetCof[1] + // + m[0][2] * DetCof[2] + // + m[0][3] * DetCof[3]; + + return glm_vec4_dot(m[0], DetCof); +} + +GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4]) +{ + // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) + + //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + + // First 2 columns + __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2)); + __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3)); + __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); + + // Second 2 columns + __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3)); + __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2)); + __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); + + // Columns subtraction + __m128 SubE = _mm_sub_ps(MulA, MulB); + + // Last 2 rows + __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2)); + __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0)); + __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); + __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); + + //vec<4, T, Q> DetCof( + // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), + // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), + // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), + // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); + + __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0)); + __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1)); + __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); + + __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); + __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1]; + __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2)); + __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); + + __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); + + __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); + __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0)); + __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3)); + __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); + + __m128 AddRes = _mm_add_ps(SubRes, MulFacC); + __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); + + //return m[0][0] * DetCof[0] + // + m[0][1] * DetCof[1] + // + m[0][2] * DetCof[2] + // + m[0][3] * DetCof[3]; + + return glm_vec4_dot(m[0], DetCof); +} + +GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0); + //__m128 Rcp0 = _mm_rcp_ps(Det0); + + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} + +GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4]) +{ + __m128 Fac0; + { + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; + // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; + // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac0 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac1; + { + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; + // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; + // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac1 = _mm_sub_ps(Mul00, Mul01); + } + + + __m128 Fac2; + { + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; + // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; + // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac2 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac3; + { + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; + // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; + // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac3 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac4; + { + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; + // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; + // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac4 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 Fac5; + { + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; + // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; + // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; + + __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); + + __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); + __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); + + __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); + __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); + Fac5 = _mm_sub_ps(Mul00, Mul01); + } + + __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); + __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); + + // m[1][0] + // m[0][0] + // m[0][0] + // m[0][0] + __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][1] + // m[0][1] + // m[0][1] + // m[0][1] + __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); + __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][2] + // m[0][2] + // m[0][2] + // m[0][2] + __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); + __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); + + // m[1][3] + // m[0][3] + // m[0][3] + // m[0][3] + __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); + __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); + + // col0 + // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), + // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), + // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), + // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), + __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); + __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); + __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); + __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); + __m128 Add00 = _mm_add_ps(Sub00, Mul02); + __m128 Inv0 = _mm_mul_ps(SignB, Add00); + + // col1 + // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), + // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), + // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), + // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), + __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); + __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); + __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); + __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); + __m128 Add01 = _mm_add_ps(Sub01, Mul05); + __m128 Inv1 = _mm_mul_ps(SignA, Add01); + + // col2 + // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), + // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), + // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), + // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), + __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); + __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); + __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); + __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); + __m128 Add02 = _mm_add_ps(Sub02, Mul08); + __m128 Inv2 = _mm_mul_ps(SignB, Add02); + + // col3 + // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), + // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), + // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), + // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); + __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); + __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); + __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); + __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); + __m128 Add03 = _mm_add_ps(Sub03, Mul11); + __m128 Inv3 = _mm_mul_ps(SignA, Add03); + + __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); + + // valType Determinant = m[0][0] * Inverse[0][0] + // + m[0][1] * Inverse[1][0] + // + m[0][2] * Inverse[2][0] + // + m[0][3] * Inverse[3][0]; + __m128 Det0 = glm_vec4_dot(in[0], Row2); + __m128 Rcp0 = _mm_rcp_ps(Det0); + //__m128 Rcp0 = _mm_div_ps(one, Det0); + // Inverse /= Determinant; + out[0] = _mm_mul_ps(Inv0, Rcp0); + out[1] = _mm_mul_ps(Inv1, Rcp0); + out[2] = _mm_mul_ps(Inv2, Rcp0); + out[3] = _mm_mul_ps(Inv3, Rcp0); +} +/* +GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) +{ + float a = glm::radians(Angle); + float c = cos(a); + float s = sin(a); + + glm::vec4 AxisA(v[0], v[1], v[2], float(0)); + __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); + __m128 AxisC = detail::sse_nrm_ps(AxisB); + + __m128 Cos0 = _mm_set_ss(c); + __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 Sin0 = _mm_set_ss(s); + __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); + + // vec<3, T, Q> temp = (valType(1) - c) * axis; + __m128 Temp0 = _mm_sub_ps(one, CosA); + __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); + + //Rotate[0][0] = c + temp[0] * axis[0]; + //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; + //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; + __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); + __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); + __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); + __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); + __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); + __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); + __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); + + //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; + //Rotate[1][1] = c + temp[1] * axis[1]; + //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; + __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); + __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); + __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); + __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); + __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); + __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); + __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); + + //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; + //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; + //Rotate[2][2] = c + temp[2] * axis[2]; + __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); + __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); + __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); + __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); + __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); + __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); + __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); + + __m128 Result[4]; + Result[0] = TmpA4; + Result[1] = TmpB4; + Result[2] = TmpC4; + Result[3] = _mm_set_ps(1, 0, 0, 0); + + //mat<4, 4, valType> Result; + //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; + //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; + //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; + //Result[3] = m[3]; + //return Result; + sse_mul_ps(in, Result, out); +} +*/ +GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4]) +{ + out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); + out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); + out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2))); + out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3))); +} + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/neon.h b/includes/glm/simd/neon.h new file mode 100644 index 0000000..4bb7c2e --- /dev/null +++ b/includes/glm/simd/neon.h @@ -0,0 +1,159 @@ +/// @ref simd_neon +/// @file glm/simd/neon.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_NEON_BIT +#include + +namespace glm { + namespace neon { + static inline float32x4_t dupq_lane(float32x4_t vsrc, int lane) { + switch(lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + case 0: return vdupq_laneq_f32(vsrc, 0); + case 1: return vdupq_laneq_f32(vsrc, 1); + case 2: return vdupq_laneq_f32(vsrc, 2); + case 3: return vdupq_laneq_f32(vsrc, 3); +#else + case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0)); + case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1)); + case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2)); + case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3)); +#endif + } + assert(false); //Unreachable code executed! + return vdupq_n_f32(0.0f); + } + + static inline float32x2_t dup_lane(float32x4_t vsrc, int lane) { + switch(lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + case 0: return vdup_laneq_f32(vsrc, 0); + case 1: return vdup_laneq_f32(vsrc, 1); + case 2: return vdup_laneq_f32(vsrc, 2); + case 3: return vdup_laneq_f32(vsrc, 3); +#else + case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0)); + case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1)); + case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2)); + case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3)); +#endif + } + assert(false); //Unreachable code executed! + return vdup_n_f32(0.0f); + } + + static inline float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + switch(dlane) { + case 0: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3); + } + assert(false); //Unreachable code executed! + break; + case 1: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3); + } + assert(false); //Unreachable code executed! + break; + case 2: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3); + } + assert(false); //Unreachable code executed! + break; + case 3: + switch(slane) { + case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0); + case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1); + case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2); + case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3); + } + assert(false); //Unreachable code executed! + break; + } +#else + + float l; + switch(slane) { + case 0: l = vgetq_lane_f32(vsrc, 0); break; + case 1: l = vgetq_lane_f32(vsrc, 1); break; + case 2: l = vgetq_lane_f32(vsrc, 2); break; + case 3: l = vgetq_lane_f32(vsrc, 3); break; + default: + assert(false); //Unreachable code executed! + } + switch(dlane) { + case 0: return vsetq_lane_f32(l, vdst, 0); + case 1: return vsetq_lane_f32(l, vdst, 1); + case 2: return vsetq_lane_f32(l, vdst, 2); + case 3: return vsetq_lane_f32(l, vdst, 3); + } +#endif + assert(false); //Unreachable code executed! + return vdupq_n_f32(0.0f); + } + + static inline float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT + switch(lane) { + case 0: return vmulq_laneq_f32(v, vlane, 0); break; + case 1: return vmulq_laneq_f32(v, vlane, 1); break; + case 2: return vmulq_laneq_f32(v, vlane, 2); break; + case 3: return vmulq_laneq_f32(v, vlane, 3); break; + default: + assert(false); //Unreachable code executed! + } + assert(false); //Unreachable code executed! + return vdupq_n_f32(0.0f); +#else + return vmulq_f32(v, dupq_lane(vlane, lane)); +#endif + } + + static inline float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) { +#if GLM_ARCH & GLM_ARCH_ARMV8_BIT +#ifdef GLM_CONFIG_FORCE_FMA +# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0) +#else +# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0) +#endif + + switch(lane) { + case 0: + FMADD_LANE(acc, v, vlane, 0); + return acc; + case 1: + FMADD_LANE(acc, v, vlane, 1); + return acc; + case 2: + FMADD_LANE(acc, v, vlane, 2); + return acc; + case 3: + FMADD_LANE(acc, v, vlane, 3); + return acc; + default: + assert(false); //Unreachable code executed! + } + assert(false); //Unreachable code executed! + return vdupq_n_f32(0.0f); +# undef FMADD_LANE +#else + return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane))); +#endif + } + } //namespace neon +} // namespace glm +#endif // GLM_ARCH & GLM_ARCH_NEON_BIT diff --git a/includes/glm/simd/packing.h b/includes/glm/simd/packing.h new file mode 100644 index 0000000..609163e --- /dev/null +++ b/includes/glm/simd/packing.h @@ -0,0 +1,8 @@ +/// @ref simd +/// @file glm/simd/packing.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/simd/platform.h b/includes/glm/simd/platform.h new file mode 100644 index 0000000..a318b09 --- /dev/null +++ b/includes/glm/simd/platform.h @@ -0,0 +1,469 @@ +#pragma once + +/////////////////////////////////////////////////////////////////////////////////// +// Platform + +#define GLM_PLATFORM_UNKNOWN 0x00000000 +#define GLM_PLATFORM_WINDOWS 0x00010000 +#define GLM_PLATFORM_LINUX 0x00020000 +#define GLM_PLATFORM_APPLE 0x00040000 +//#define GLM_PLATFORM_IOS 0x00080000 +#define GLM_PLATFORM_ANDROID 0x00100000 +#define GLM_PLATFORM_CHROME_NACL 0x00200000 +#define GLM_PLATFORM_UNIX 0x00400000 +#define GLM_PLATFORM_QNXNTO 0x00800000 +#define GLM_PLATFORM_WINCE 0x01000000 +#define GLM_PLATFORM_CYGWIN 0x02000000 + +#ifdef GLM_FORCE_PLATFORM_UNKNOWN +# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN +#elif defined(__CYGWIN__) +# define GLM_PLATFORM GLM_PLATFORM_CYGWIN +#elif defined(__QNXNTO__) +# define GLM_PLATFORM GLM_PLATFORM_QNXNTO +#elif defined(__APPLE__) +# define GLM_PLATFORM GLM_PLATFORM_APPLE +#elif defined(WINCE) +# define GLM_PLATFORM GLM_PLATFORM_WINCE +#elif defined(_WIN32) +# define GLM_PLATFORM GLM_PLATFORM_WINDOWS +#elif defined(__native_client__) +# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL +#elif defined(__ANDROID__) +# define GLM_PLATFORM GLM_PLATFORM_ANDROID +#elif defined(__linux) +# define GLM_PLATFORM GLM_PLATFORM_LINUX +#elif defined(__unix) +# define GLM_PLATFORM GLM_PLATFORM_UNIX +#else +# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN +#endif// + +/////////////////////////////////////////////////////////////////////////////////// +// Compiler + +#define GLM_COMPILER_UNKNOWN 0x00000000 + +// Intel +#define GLM_COMPILER_INTEL 0x00100000 +#define GLM_COMPILER_INTEL14 0x00100040 +#define GLM_COMPILER_INTEL15 0x00100050 +#define GLM_COMPILER_INTEL16 0x00100060 +#define GLM_COMPILER_INTEL17 0x00100070 +#define GLM_COMPILER_INTEL18 0x00100080 +#define GLM_COMPILER_INTEL19 0x00100090 +#define GLM_COMPILER_INTEL21 0x001000A0 + +// Visual C++ defines +#define GLM_COMPILER_VC 0x01000000 +#define GLM_COMPILER_VC12 0x01000001 // Visual Studio 2013 +#define GLM_COMPILER_VC14 0x01000002 // Visual Studio 2015 +#define GLM_COMPILER_VC15 0x01000003 // Visual Studio 2017 +#define GLM_COMPILER_VC15_3 0x01000004 +#define GLM_COMPILER_VC15_5 0x01000005 +#define GLM_COMPILER_VC15_6 0x01000006 +#define GLM_COMPILER_VC15_7 0x01000007 +#define GLM_COMPILER_VC15_8 0x01000008 +#define GLM_COMPILER_VC15_9 0x01000009 +#define GLM_COMPILER_VC16 0x0100000A // Visual Studio 2019 +#define GLM_COMPILER_VC17 0x0100000B // Visual Studio 2022 + +// GCC defines +#define GLM_COMPILER_GCC 0x02000000 +#define GLM_COMPILER_GCC46 0x020000D0 +#define GLM_COMPILER_GCC47 0x020000E0 +#define GLM_COMPILER_GCC48 0x020000F0 +#define GLM_COMPILER_GCC49 0x02000100 +#define GLM_COMPILER_GCC5 0x02000200 +#define GLM_COMPILER_GCC6 0x02000300 +#define GLM_COMPILER_GCC61 0x02000800 +#define GLM_COMPILER_GCC7 0x02000400 +#define GLM_COMPILER_GCC8 0x02000500 +#define GLM_COMPILER_GCC9 0x02000600 +#define GLM_COMPILER_GCC10 0x02000700 +#define GLM_COMPILER_GCC11 0x02000800 +#define GLM_COMPILER_GCC12 0x02000900 +#define GLM_COMPILER_GCC13 0x02000A00 +#define GLM_COMPILER_GCC14 0x02000B00 + +// CUDA +#define GLM_COMPILER_CUDA 0x10000000 +#define GLM_COMPILER_CUDA75 0x10000001 +#define GLM_COMPILER_CUDA80 0x10000002 +#define GLM_COMPILER_CUDA90 0x10000004 +#define GLM_COMPILER_CUDA_RTC 0x10000100 + +// Clang +#define GLM_COMPILER_CLANG 0x20000000 +#define GLM_COMPILER_CLANG34 0x20000050 +#define GLM_COMPILER_CLANG35 0x20000060 +#define GLM_COMPILER_CLANG36 0x20000070 +#define GLM_COMPILER_CLANG37 0x20000080 +#define GLM_COMPILER_CLANG38 0x20000090 +#define GLM_COMPILER_CLANG39 0x200000A0 +#define GLM_COMPILER_CLANG4 0x200000B0 +#define GLM_COMPILER_CLANG5 0x200000C0 +#define GLM_COMPILER_CLANG6 0x200000D0 +#define GLM_COMPILER_CLANG7 0x200000E0 +#define GLM_COMPILER_CLANG8 0x200000F0 +#define GLM_COMPILER_CLANG9 0x20000100 +#define GLM_COMPILER_CLANG10 0x20000200 +#define GLM_COMPILER_CLANG11 0x20000300 +#define GLM_COMPILER_CLANG12 0x20000400 +#define GLM_COMPILER_CLANG13 0x20000500 +#define GLM_COMPILER_CLANG14 0x20000600 +#define GLM_COMPILER_CLANG15 0x20000700 +#define GLM_COMPILER_CLANG16 0x20000800 +#define GLM_COMPILER_CLANG17 0x20000900 +#define GLM_COMPILER_CLANG18 0x20000A00 +#define GLM_COMPILER_CLANG19 0x20000B00 + +// HIP +#define GLM_COMPILER_HIP 0x40000000 + +// Build model +#define GLM_MODEL_32 0x00000010 +#define GLM_MODEL_64 0x00000020 + +// Force generic C++ compiler +#ifdef GLM_FORCE_COMPILER_UNKNOWN +# define GLM_COMPILER GLM_COMPILER_UNKNOWN + +#elif defined(__INTEL_COMPILER) +# if __INTEL_COMPILER >= 2021 +# define GLM_COMPILER GLM_COMPILER_INTEL21 +# elif __INTEL_COMPILER >= 1900 +# define GLM_COMPILER GLM_COMPILER_INTEL19 +# elif __INTEL_COMPILER >= 1800 +# define GLM_COMPILER GLM_COMPILER_INTEL18 +# elif __INTEL_COMPILER >= 1700 +# define GLM_COMPILER GLM_COMPILER_INTEL17 +# elif __INTEL_COMPILER >= 1600 +# define GLM_COMPILER GLM_COMPILER_INTEL16 +# elif __INTEL_COMPILER >= 1500 +# define GLM_COMPILER GLM_COMPILER_INTEL15 +# elif __INTEL_COMPILER >= 1400 +# define GLM_COMPILER GLM_COMPILER_INTEL14 +# elif __INTEL_COMPILER < 1400 +# error "GLM requires ICC 2013 SP1 or newer" +# endif + +// CUDA +#elif defined(__CUDACC__) +# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA) +# include // make sure version is defined since nvcc does not define it itself! +# endif +# if defined(__CUDACC_RTC__) +# define GLM_COMPILER GLM_COMPILER_CUDA_RTC +# elif CUDA_VERSION >= 8000 +# define GLM_COMPILER GLM_COMPILER_CUDA80 +# elif CUDA_VERSION >= 7500 +# define GLM_COMPILER GLM_COMPILER_CUDA75 +# elif CUDA_VERSION >= 7000 +# define GLM_COMPILER GLM_COMPILER_CUDA70 +# elif CUDA_VERSION < 7000 +# error "GLM requires CUDA 7.0 or higher" +# endif + +// HIP +#elif defined(__HIP__) +# define GLM_COMPILER GLM_COMPILER_HIP + +// Clang +#elif defined(__clang__) +# if defined(__apple_build_version__) +# if (__clang_major__ < 6) +# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher" +# elif __clang_major__ == 6 && __clang_minor__ == 0 +# define GLM_COMPILER GLM_COMPILER_CLANG35 +# elif __clang_major__ == 6 && __clang_minor__ >= 1 +# define GLM_COMPILER GLM_COMPILER_CLANG36 +# elif __clang_major__ >= 7 +# define GLM_COMPILER GLM_COMPILER_CLANG37 +# endif +# else +# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3) +# error "GLM requires Clang 3.4 or higher" +# elif __clang_major__ == 3 && __clang_minor__ == 4 +# define GLM_COMPILER GLM_COMPILER_CLANG34 +# elif __clang_major__ == 3 && __clang_minor__ == 5 +# define GLM_COMPILER GLM_COMPILER_CLANG35 +# elif __clang_major__ == 3 && __clang_minor__ == 6 +# define GLM_COMPILER GLM_COMPILER_CLANG36 +# elif __clang_major__ == 3 && __clang_minor__ == 7 +# define GLM_COMPILER GLM_COMPILER_CLANG37 +# elif __clang_major__ == 3 && __clang_minor__ == 8 +# define GLM_COMPILER GLM_COMPILER_CLANG38 +# elif __clang_major__ == 3 && __clang_minor__ >= 9 +# define GLM_COMPILER GLM_COMPILER_CLANG39 +# elif __clang_major__ == 4 && __clang_minor__ == 0 +# define GLM_COMPILER GLM_COMPILER_CLANG4 +# elif __clang_major__ == 5 +# define GLM_COMPILER GLM_COMPILER_CLANG5 +# elif __clang_major__ == 6 +# define GLM_COMPILER GLM_COMPILER_CLANG6 +# elif __clang_major__ == 7 +# define GLM_COMPILER GLM_COMPILER_CLANG7 +# elif __clang_major__ == 8 +# define GLM_COMPILER GLM_COMPILER_CLANG8 +# elif __clang_major__ == 9 +# define GLM_COMPILER GLM_COMPILER_CLANG9 +# elif __clang_major__ == 10 +# define GLM_COMPILER GLM_COMPILER_CLANG10 +# elif __clang_major__ == 11 +# define GLM_COMPILER GLM_COMPILER_CLANG11 +# elif __clang_major__ == 12 +# define GLM_COMPILER GLM_COMPILER_CLANG12 +# elif __clang_major__ == 13 +# define GLM_COMPILER GLM_COMPILER_CLANG13 +# elif __clang_major__ == 14 +# define GLM_COMPILER GLM_COMPILER_CLANG14 +# elif __clang_major__ == 15 +# define GLM_COMPILER GLM_COMPILER_CLANG15 +# elif __clang_major__ == 16 +# define GLM_COMPILER GLM_COMPILER_CLANG16 +# elif __clang_major__ == 17 +# define GLM_COMPILER GLM_COMPILER_CLANG17 +# elif __clang_major__ == 18 +# define GLM_COMPILER GLM_COMPILER_CLANG18 +# elif __clang_major__ >= 19 +# define GLM_COMPILER GLM_COMPILER_CLANG19 +# endif +# endif + +// Visual C++ +#elif defined(_MSC_VER) +# if _MSC_VER >= 1930 +# define GLM_COMPILER GLM_COMPILER_VC17 +# elif _MSC_VER >= 1920 +# define GLM_COMPILER GLM_COMPILER_VC16 +# elif _MSC_VER >= 1916 +# define GLM_COMPILER GLM_COMPILER_VC15_9 +# elif _MSC_VER >= 1915 +# define GLM_COMPILER GLM_COMPILER_VC15_8 +# elif _MSC_VER >= 1914 +# define GLM_COMPILER GLM_COMPILER_VC15_7 +# elif _MSC_VER >= 1913 +# define GLM_COMPILER GLM_COMPILER_VC15_6 +# elif _MSC_VER >= 1912 +# define GLM_COMPILER GLM_COMPILER_VC15_5 +# elif _MSC_VER >= 1911 +# define GLM_COMPILER GLM_COMPILER_VC15_3 +# elif _MSC_VER >= 1910 +# define GLM_COMPILER GLM_COMPILER_VC15 +# elif _MSC_VER >= 1900 +# define GLM_COMPILER GLM_COMPILER_VC14 +# elif _MSC_VER >= 1800 +# define GLM_COMPILER GLM_COMPILER_VC12 +# elif _MSC_VER < 1800 +# error "GLM requires Visual C++ 12 - 2013 or higher" +# endif//_MSC_VER + +// G++ +#elif defined(__GNUC__) || defined(__MINGW32__) +# if __GNUC__ >= 14 +# define GLM_COMPILER GLM_COMPILER_GCC14 +# elif __GNUC__ >= 13 +# define GLM_COMPILER GLM_COMPILER_GCC13 +# elif __GNUC__ >= 12 +# define GLM_COMPILER GLM_COMPILER_GCC12 +# elif __GNUC__ >= 11 +# define GLM_COMPILER GLM_COMPILER_GCC11 +# elif __GNUC__ >= 10 +# define GLM_COMPILER GLM_COMPILER_GCC10 +# elif __GNUC__ >= 9 +# define GLM_COMPILER GLM_COMPILER_GCC9 +# elif __GNUC__ >= 8 +# define GLM_COMPILER GLM_COMPILER_GCC8 +# elif __GNUC__ >= 7 +# define GLM_COMPILER GLM_COMPILER_GCC7 +# elif __GNUC__ >= 6 +# define GLM_COMPILER GLM_COMPILER_GCC6 +# elif __GNUC__ >= 5 +# define GLM_COMPILER GLM_COMPILER_GCC5 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 +# define GLM_COMPILER GLM_COMPILER_GCC49 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8 +# define GLM_COMPILER GLM_COMPILER_GCC48 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7 +# define GLM_COMPILER GLM_COMPILER_GCC47 +# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6 +# define GLM_COMPILER GLM_COMPILER_GCC46 +# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4) +# error "GLM requires GCC 4.6 or higher" +# endif + +#else +# define GLM_COMPILER GLM_COMPILER_UNKNOWN +#endif + +#ifndef GLM_COMPILER +# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message." +#endif//GLM_COMPILER + +/////////////////////////////////////////////////////////////////////////////////// +// Instruction sets + +// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2 + +#define GLM_ARCH_MIPS_BIT (0x10000000) +#define GLM_ARCH_PPC_BIT (0x20000000) +#define GLM_ARCH_ARM_BIT (0x40000000) +#define GLM_ARCH_ARMV8_BIT (0x01000000) +#define GLM_ARCH_X86_BIT (0x80000000) + +#define GLM_ARCH_SIMD_BIT (0x00001000) + +#define GLM_ARCH_NEON_BIT (0x00000001) +#define GLM_ARCH_SSE_BIT (0x00000002) +#define GLM_ARCH_SSE2_BIT (0x00000004) +#define GLM_ARCH_SSE3_BIT (0x00000008) +#define GLM_ARCH_SSSE3_BIT (0x00000010) +#define GLM_ARCH_SSE41_BIT (0x00000020) +#define GLM_ARCH_SSE42_BIT (0x00000040) +#define GLM_ARCH_AVX_BIT (0x00000080) +#define GLM_ARCH_AVX2_BIT (0x00000100) + +#define GLM_ARCH_UNKNOWN (0) +#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT) +#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86) +#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE) +#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2) +#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3) +#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3) +#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41) +#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42) +#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX) +#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT) +#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT) +#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM) +#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT) +#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT) + +#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE) +# define GLM_ARCH GLM_ARCH_UNKNOWN +#elif defined(GLM_FORCE_NEON) +# if __ARM_ARCH >= 8 +# define GLM_ARCH (GLM_ARCH_ARMV8) +# else +# define GLM_ARCH (GLM_ARCH_NEON) +# endif +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_AVX2) +# define GLM_ARCH (GLM_ARCH_AVX2) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_AVX) +# define GLM_ARCH (GLM_ARCH_AVX) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE42) +# define GLM_ARCH (GLM_ARCH_SSE42) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE41) +# define GLM_ARCH (GLM_ARCH_SSE41) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSSE3) +# define GLM_ARCH (GLM_ARCH_SSSE3) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE3) +# define GLM_ARCH (GLM_ARCH_SSE3) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE2) +# define GLM_ARCH (GLM_ARCH_SSE2) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_SSE) +# define GLM_ARCH (GLM_ARCH_SSE) +# define GLM_FORCE_INTRINSICS +#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY) +# if defined(__AVX2__) +# define GLM_ARCH (GLM_ARCH_AVX2) +# elif defined(__AVX__) +# define GLM_ARCH (GLM_ARCH_AVX) +# elif defined(__SSE4_2__) +# define GLM_ARCH (GLM_ARCH_SSE42) +# elif defined(__SSE4_1__) +# define GLM_ARCH (GLM_ARCH_SSE41) +# elif defined(__SSSE3__) +# define GLM_ARCH (GLM_ARCH_SSSE3) +# elif defined(__SSE3__) +# define GLM_ARCH (GLM_ARCH_SSE3) +# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP) +# define GLM_ARCH (GLM_ARCH_SSE2) +# elif defined(__i386__) +# define GLM_ARCH (GLM_ARCH_X86) +# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) +# define GLM_ARCH (GLM_ARCH_ARMV8) +# elif defined(__ARM_NEON) +# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON) +# elif defined(__arm__ ) || defined(_M_ARM) +# define GLM_ARCH (GLM_ARCH_ARM) +# elif defined(__mips__ ) +# define GLM_ARCH (GLM_ARCH_MIPS) +# elif defined(__powerpc__ ) || defined(_M_PPC) +# define GLM_ARCH (GLM_ARCH_PPC) +# else +# define GLM_ARCH (GLM_ARCH_UNKNOWN) +# endif +#else +# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__) +# define GLM_ARCH (GLM_ARCH_X86) +# elif defined(__arm__) || defined(_M_ARM) +# define GLM_ARCH (GLM_ARCH_ARM) +# elif defined(__powerpc__) || defined(_M_PPC) +# define GLM_ARCH (GLM_ARCH_PPC) +# elif defined(__mips__) +# define GLM_ARCH (GLM_ARCH_MIPS) +# else +# define GLM_ARCH (GLM_ARCH_UNKNOWN) +# endif +#endif + +#if GLM_ARCH & GLM_ARCH_AVX2_BIT +# include +#elif GLM_ARCH & GLM_ARCH_AVX_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE42_BIT +# if GLM_COMPILER & GLM_COMPILER_CLANG +# include +# endif +# include +#elif GLM_ARCH & GLM_ARCH_SSE41_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE3_BIT +# include +#elif GLM_ARCH & GLM_ARCH_SSE2_BIT +# include +#elif GLM_ARCH & GLM_ARCH_NEON_BIT +# include "neon.h" +#endif//GLM_ARCH + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + typedef __m128 glm_f32vec4; + typedef __m128i glm_i32vec4; + typedef __m128i glm_u32vec4; + typedef __m128d glm_f64vec2; + typedef __m128i glm_i64vec2; + typedef __m128i glm_u64vec2; + + typedef glm_f32vec4 glm_vec4; + typedef glm_i32vec4 glm_ivec4; + typedef glm_u32vec4 glm_uvec4; + typedef glm_f64vec2 glm_dvec2; +#endif + +#if GLM_ARCH & GLM_ARCH_AVX_BIT + typedef __m256d glm_f64vec4; + typedef glm_f64vec4 glm_dvec4; +#endif + +#if GLM_ARCH & GLM_ARCH_AVX2_BIT + typedef __m256i glm_i64vec4; + typedef __m256i glm_u64vec4; +#endif + +#if GLM_ARCH & GLM_ARCH_NEON_BIT + typedef float32x4_t glm_f32vec4; + typedef int32x4_t glm_i32vec4; + typedef uint32x4_t glm_u32vec4; +#endif diff --git a/includes/glm/simd/trigonometric.h b/includes/glm/simd/trigonometric.h new file mode 100644 index 0000000..739b796 --- /dev/null +++ b/includes/glm/simd/trigonometric.h @@ -0,0 +1,9 @@ +/// @ref simd +/// @file glm/simd/trigonometric.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT + diff --git a/includes/glm/simd/vector_relational.h b/includes/glm/simd/vector_relational.h new file mode 100644 index 0000000..f7385e9 --- /dev/null +++ b/includes/glm/simd/vector_relational.h @@ -0,0 +1,8 @@ +/// @ref simd +/// @file glm/simd/vector_relational.h + +#pragma once + +#if GLM_ARCH & GLM_ARCH_SSE2_BIT + +#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/includes/glm/trigonometric.hpp b/includes/glm/trigonometric.hpp new file mode 100644 index 0000000..51d49c1 --- /dev/null +++ b/includes/glm/trigonometric.hpp @@ -0,0 +1,210 @@ +/// @ref core +/// @file glm/trigonometric.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions +/// +/// @defgroup core_func_trigonometric Angle and Trigonometry Functions +/// @ingroup core +/// +/// Function parameters specified as angle are assumed to be in units of radians. +/// In no case will any of these functions result in a divide by zero error. If +/// the divisor of a ratio is 0, then results will be undefined. +/// +/// These all operate component-wise. The description is per component. +/// +/// Include to use these core features. +/// +/// @see ext_vector_trigonometric + +#pragma once + +#include "detail/setup.hpp" +#include "detail/qualifier.hpp" + +namespace glm +{ + /// @addtogroup core_func_trigonometric + /// @{ + + /// Converts degrees to radians and returns the result. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL radians man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec radians(vec const& degrees); + + /// Converts radians to degrees and returns the result. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL degrees man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec degrees(vec const& radians); + + /// The standard trigonometric sine function. + /// The values returned by this function will range from [-1, 1]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sin man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec sin(vec const& angle); + + /// The standard trigonometric cosine function. + /// The values returned by this function will range from [-1, 1]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL cos man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec cos(vec const& angle); + + /// The standard trigonometric tangent function. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL tan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec tan(vec const& angle); + + /// Arc sine. Returns an angle whose sine is x. + /// The range of values returned by this function is [-PI/2, PI/2]. + /// Results are undefined if |x| > 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL asin man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec asin(vec const& x); + + /// Arc cosine. Returns an angle whose cosine is x. + /// The range of values returned by this function is [0, PI]. + /// Results are undefined if |x| > 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL acos man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec acos(vec const& x); + + /// Arc tangent. Returns an angle whose tangent is y/x. + /// The signs of x and y are used to determine what + /// quadrant the angle is in. The range of values returned + /// by this function is [-PI, PI]. Results are undefined + /// if x and y are both 0. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atan(vec const& y, vec const& x); + + /// Arc tangent. Returns an angle whose tangent is y_over_x. + /// The range of values returned by this function is [-PI/2, PI/2]. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atan man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atan(vec const& y_over_x); + + /// Returns the hyperbolic sine function, (exp(x) - exp(-x)) / 2 + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL sinh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec sinh(vec const& angle); + + /// Returns the hyperbolic cosine function, (exp(x) + exp(-x)) / 2 + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL cosh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec cosh(vec const& angle); + + /// Returns the hyperbolic tangent function, sinh(angle) / cosh(angle) + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL tanh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec tanh(vec const& angle); + + /// Arc hyperbolic sine; returns the inverse of sinh. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL asinh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec asinh(vec const& x); + + /// Arc hyperbolic cosine; returns the non-negative inverse + /// of cosh. Results are undefined if x < 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL acosh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec acosh(vec const& x); + + /// Arc hyperbolic tangent; returns the inverse of tanh. + /// Results are undefined if abs(x) >= 1. + /// + /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector + /// @tparam T Floating-point scalar types + /// @tparam Q Value from qualifier enum + /// + /// @see GLSL atanh man page + /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions + template + GLM_FUNC_DECL vec atanh(vec const& x); + + /// @} +}//namespace glm + +#include "detail/func_trigonometric.inl" diff --git a/includes/glm/vec2.hpp b/includes/glm/vec2.hpp new file mode 100644 index 0000000..cd4e070 --- /dev/null +++ b/includes/glm/vec2.hpp @@ -0,0 +1,14 @@ +/// @ref core +/// @file glm/vec2.hpp + +#pragma once +#include "./ext/vector_bool2.hpp" +#include "./ext/vector_bool2_precision.hpp" +#include "./ext/vector_float2.hpp" +#include "./ext/vector_float2_precision.hpp" +#include "./ext/vector_double2.hpp" +#include "./ext/vector_double2_precision.hpp" +#include "./ext/vector_int2.hpp" +#include "./ext/vector_int2_sized.hpp" +#include "./ext/vector_uint2.hpp" +#include "./ext/vector_uint2_sized.hpp" diff --git a/includes/glm/vec3.hpp b/includes/glm/vec3.hpp new file mode 100644 index 0000000..f5a927d --- /dev/null +++ b/includes/glm/vec3.hpp @@ -0,0 +1,14 @@ +/// @ref core +/// @file glm/vec3.hpp + +#pragma once +#include "./ext/vector_bool3.hpp" +#include "./ext/vector_bool3_precision.hpp" +#include "./ext/vector_float3.hpp" +#include "./ext/vector_float3_precision.hpp" +#include "./ext/vector_double3.hpp" +#include "./ext/vector_double3_precision.hpp" +#include "./ext/vector_int3.hpp" +#include "./ext/vector_int3_sized.hpp" +#include "./ext/vector_uint3.hpp" +#include "./ext/vector_uint3_sized.hpp" diff --git a/includes/glm/vec4.hpp b/includes/glm/vec4.hpp new file mode 100644 index 0000000..c6ea9f1 --- /dev/null +++ b/includes/glm/vec4.hpp @@ -0,0 +1,15 @@ +/// @ref core +/// @file glm/vec4.hpp + +#pragma once +#include "./ext/vector_bool4.hpp" +#include "./ext/vector_bool4_precision.hpp" +#include "./ext/vector_float4.hpp" +#include "./ext/vector_float4_precision.hpp" +#include "./ext/vector_double4.hpp" +#include "./ext/vector_double4_precision.hpp" +#include "./ext/vector_int4.hpp" +#include "./ext/vector_int4_sized.hpp" +#include "./ext/vector_uint4.hpp" +#include "./ext/vector_uint4_sized.hpp" + diff --git a/includes/glm/vector_relational.hpp b/includes/glm/vector_relational.hpp new file mode 100644 index 0000000..a0fe17e --- /dev/null +++ b/includes/glm/vector_relational.hpp @@ -0,0 +1,121 @@ +/// @ref core +/// @file glm/vector_relational.hpp +/// +/// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions +/// +/// @defgroup core_func_vector_relational Vector Relational Functions +/// @ingroup core +/// +/// Relational and equality operators (<, <=, >, >=, ==, !=) are defined to +/// operate on scalars and produce scalar Boolean results. For vector results, +/// use the following built-in functions. +/// +/// In all cases, the sizes of all the input and return vectors for any particular +/// call must match. +/// +/// Include to use these core features. +/// +/// @see ext_vector_relational + +#pragma once + +#include "detail/qualifier.hpp" +#include "detail/setup.hpp" + +namespace glm +{ + /// @addtogroup core_func_vector_relational + /// @{ + + /// Returns the component-wise comparison result of x < y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL lessThan man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x <= y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL lessThanEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x > y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL greaterThan man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x >= y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point or integer scalar type. + /// + /// @see GLSL greaterThanEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x == y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point, integer or bool scalar type. + /// + /// @see GLSL equal man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y); + + /// Returns the component-wise comparison of result x != y. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// @tparam T A floating-point, integer or bool scalar type. + /// + /// @see GLSL notEqual man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y); + + /// Returns true if any component of x is true. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL any man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR bool any(vec const& v); + + /// Returns true if all components of x are true. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL all man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR bool all(vec const& v); + + /// Returns the component-wise logical complement of x. + /// /!\ Because of language incompatibilities between C++ and GLSL, GLM defines the function not but not_ instead. + /// + /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. + /// + /// @see GLSL not man page + /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions + template + GLM_FUNC_DECL GLM_CONSTEXPR vec not_(vec const& v); + + /// @} +}//namespace glm + +#include "detail/func_vector_relational.inl" diff --git a/srcs/Camera.cpp b/srcs/Camera.cpp index 79b7795..fc13cb2 100644 --- a/srcs/Camera.cpp +++ b/srcs/Camera.cpp @@ -12,23 +12,46 @@ #include "Camera.hpp" -Camera::Camera(void) +Camera::Camera(glm::vec3 start_pos, glm::vec3 start_up, float start_yaw, float start_pitch) + : _forward(glm::vec3(0.0f, 0.0f, -1.0f)), _yaw(start_yaw), _pitch(start_pitch), _position(start_pos), _up(start_up) { -} - -Camera::Camera(Camera const &src) -{ - *this = src; + update_camera_vectors(); } Camera::~Camera(void) { } -Camera &Camera::operator=(Camera const &rhs) +void Camera::update_camera_vectors() { - if (this != &rhs) + glm::vec3 frontTemp; + + frontTemp.x = cos(glm::radians(_yaw)) * cos(glm::radians(_pitch)); + frontTemp.y = sin(glm::radians(_pitch)); + frontTemp.z = sin(glm::radians(_yaw)) * cos(glm::radians(_pitch)); + _forward = glm::normalize(frontTemp); + + _right = glm::normalize(glm::cross(_forward, _up)); + _up = glm::normalize(glm::cross(_right, _forward)); +} + +glm::mat4 Camera::get_view_matrix() +{ + return (glm::lookAt(_position, _position + _forward, _up)); +} + +void Camera::process_movement(float xoffset, float yoffset, bool constraint_pitch = true) +{ + _yaw += xoffset; + _pitch += yoffset; + + if (constraint_pitch) { + if (_pitch > 89.0f) _pitch = 89.0f; + if (_pitch < -89.0f) _pitch = -89.0f; } - return (*this); + + update_camera_vectors(); + + std::cout << _yaw << " " << _pitch << std::endl; } \ No newline at end of file diff --git a/srcs/Window.cpp b/srcs/Window.cpp index e8c9dde..d018b92 100644 --- a/srcs/Window.cpp +++ b/srcs/Window.cpp @@ -14,6 +14,8 @@ Window::Window(int width, int height, const char *title, int sleep) { + camera = new Camera(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f), -90.0f, 0.0f); + if (!glfwInit()) { fprintf( stderr, "Failed to initialize GLFW\n" ); @@ -42,18 +44,11 @@ Window::Window(int width, int height, const char *title, int sleep) gladLoadGL(glfwGetProcAddress); glfwSwapInterval(sleep); } -Window::Window(Window const &src) -{ - *this = src; -} -Window &Window::operator=(Window const &rhs) -{ - if (this != &rhs) - _window = rhs._window; - return (*this); -} + Window::~Window(void) { + delete camera; + glfwTerminate(); } @@ -69,13 +64,32 @@ void Window::keyCallback(GLFWwindow* window, int key, int scancode, int action, } void Window::mouseMoveCallback(GLFWwindow* window, double xpos, double ypos) { - Window* win = static_cast(glfwGetWindowUserPointer(window)); - (void) win; (void) xpos; (void) ypos; + Window* win = static_cast(glfwGetWindowUserPointer(window)); + (void) win; (void) xpos; (void) ypos; - win->_prevMousePos = win->_mousePos; - win->_mousePos = RT::Vec2i(xpos, ypos); + static double lastX = 0; + static double lastY = 0; - win->_mouseDelta = win->_mousePos - win->_prevMousePos; + if (lastX == 0 && lastY == 0) + { + lastX = xpos; + lastY = ypos; + } + + double xoffset = xpos - lastX; + double yoffset = lastY - ypos; + + if (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS) + { + win->camera->process_movement(xoffset, yoffset, true); + glfwSetCursorPos(window, WIDTH / 2, HEIGHT / 2); + + // scene.frameCount = 0; + } + + + lastX = xpos; + lastY = ypos; } void Window::mouseButtonCallback(GLFWwindow* window, int button, int action, int mods) { @@ -112,10 +126,6 @@ GLFWwindow *Window::getWindow(void) const { return (_window); } -RT::Vec2i Window::getMousePos(void) const -{ - return (_mousePos); -} float Window::getFps(void) const {