Browse Source

Merge branch 'master' into mobile_hacks

mobile_hacks
Andrey Akhmichin 1 year ago
parent
commit
6a0b914122
  1. 356
      cl_dll/neon_mathfun.h
  2. 7
      cl_dll/saytext.cpp
  3. 235
      cl_dll/studio_util.cpp
  4. 2
      dlls/bigmomma.cpp

356
cl_dll/neon_mathfun.h

@ -1,356 +0,0 @@ @@ -1,356 +0,0 @@
/* NEON implementation of sin, cos, exp and log
Inspired by Intel Approximate Math library, and based on the
corresponding algorithms of the cephes math library
*/
/* Copyright (C) 2011 Julien Pommier
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
(this is the zlib license)
*/
#include <arm_neon.h>
typedef float32x4_t v4sf; // vector of 4 float
typedef uint32x4_t v4su; // vector of 4 uint32
typedef int32x4_t v4si; // vector of 4 uint32
#define s4f_x(s4f) vgetq_lane_f32(s4f, 0)
#define s4f_y(s4f) vgetq_lane_f32(s4f, 1)
#define s4f_z(s4f) vgetq_lane_f32(s4f, 2)
#define s4f_w(s4f) vgetq_lane_f32(s4f, 3)
#define c_inv_mant_mask ~0x7f800000u
#define c_cephes_SQRTHF 0.707106781186547524
#define c_cephes_log_p0 7.0376836292E-2
#define c_cephes_log_p1 - 1.1514610310E-1
#define c_cephes_log_p2 1.1676998740E-1
#define c_cephes_log_p3 - 1.2420140846E-1
#define c_cephes_log_p4 + 1.4249322787E-1
#define c_cephes_log_p5 - 1.6668057665E-1
#define c_cephes_log_p6 + 2.0000714765E-1
#define c_cephes_log_p7 - 2.4999993993E-1
#define c_cephes_log_p8 + 3.3333331174E-1
#define c_cephes_log_q1 -2.12194440e-4
#define c_cephes_log_q2 0.693359375
/* natural logarithm computed for 4 simultaneous float
return NaN for x <= 0
*/
inline v4sf log_ps(v4sf x) {
v4sf one = vdupq_n_f32(1);
x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */
v4su invalid_mask = vcleq_f32(x, vdupq_n_f32(0));
v4si ux = vreinterpretq_s32_f32(x);
v4si emm0 = vshrq_n_s32(ux, 23);
/* keep only the fractional part */
ux = vandq_s32(ux, vdupq_n_s32(c_inv_mant_mask));
ux = vorrq_s32(ux, vreinterpretq_s32_f32(vdupq_n_f32(0.5f)));
x = vreinterpretq_f32_s32(ux);
emm0 = vsubq_s32(emm0, vdupq_n_s32(0x7f));
v4sf e = vcvtq_f32_s32(emm0);
e = vaddq_f32(e, one);
/* part2:
if( x < SQRTHF ) {
e -= 1;
x = x + x - 1.0;
} else { x = x - 1.0; }
*/
v4su mask = vcltq_f32(x, vdupq_n_f32(c_cephes_SQRTHF));
v4sf tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
x = vsubq_f32(x, one);
e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(one), mask)));
x = vaddq_f32(x, tmp);
v4sf z = vmulq_f32(x,x);
v4sf y = vdupq_n_f32(c_cephes_log_p0);
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p1));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p2));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p3));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p4));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p5));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p6));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p7));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p8));
y = vmulq_f32(y, x);
y = vmulq_f32(y, z);
tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q1));
y = vaddq_f32(y, tmp);
tmp = vmulq_f32(z, vdupq_n_f32(0.5f));
y = vsubq_f32(y, tmp);
tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q2));
x = vaddq_f32(x, y);
x = vaddq_f32(x, tmp);
x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN
return x;
}
#define c_exp_hi 88.3762626647949f
#define c_exp_lo -88.3762626647949f
#define c_cephes_LOG2EF 1.44269504088896341
#define c_cephes_exp_C1 0.693359375
#define c_cephes_exp_C2 -2.12194440e-4
#define c_cephes_exp_p0 1.9875691500E-4
#define c_cephes_exp_p1 1.3981999507E-3
#define c_cephes_exp_p2 8.3334519073E-3
#define c_cephes_exp_p3 4.1665795894E-2
#define c_cephes_exp_p4 1.6666665459E-1
#define c_cephes_exp_p5 5.0000001201E-1
/* exp() computed for 4 float at once */
inline v4sf exp_ps(v4sf x) {
v4sf tmp, fx;
v4sf one = vdupq_n_f32(1);
x = vminq_f32(x, vdupq_n_f32(c_exp_hi));
x = vmaxq_f32(x, vdupq_n_f32(c_exp_lo));
/* express exp(x) as exp(g + n*log(2)) */
fx = vmlaq_f32(vdupq_n_f32(0.5f), x, vdupq_n_f32(c_cephes_LOG2EF));
/* perform a floorf */
tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
/* if greater, substract 1 */
v4su mask = vcgtq_f32(tmp, fx);
mask = vandq_u32(mask, vreinterpretq_u32_f32(one));
fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
tmp = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C1));
v4sf z = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C2));
x = vsubq_f32(x, tmp);
x = vsubq_f32(x, z);
static const float cephes_exp_p[6] = { c_cephes_exp_p0, c_cephes_exp_p1, c_cephes_exp_p2, c_cephes_exp_p3, c_cephes_exp_p4, c_cephes_exp_p5 };
v4sf y = vld1q_dup_f32(cephes_exp_p+0);
v4sf c1 = vld1q_dup_f32(cephes_exp_p+1);
v4sf c2 = vld1q_dup_f32(cephes_exp_p+2);
v4sf c3 = vld1q_dup_f32(cephes_exp_p+3);
v4sf c4 = vld1q_dup_f32(cephes_exp_p+4);
v4sf c5 = vld1q_dup_f32(cephes_exp_p+5);
y = vmulq_f32(y, x);
z = vmulq_f32(x,x);
y = vaddq_f32(y, c1);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c2);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c3);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c4);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c5);
y = vmulq_f32(y, z);
y = vaddq_f32(y, x);
y = vaddq_f32(y, one);
/* build 2^n */
int32x4_t mm;
mm = vcvtq_s32_f32(fx);
mm = vaddq_s32(mm, vdupq_n_s32(0x7f));
mm = vshlq_n_s32(mm, 23);
v4sf pow2n = vreinterpretq_f32_s32(mm);
y = vmulq_f32(y, pow2n);
return y;
}
#define c_minus_cephes_DP1 -0.78515625
#define c_minus_cephes_DP2 -2.4187564849853515625e-4
#define c_minus_cephes_DP3 -3.77489497744594108e-8
#define c_sincof_p0 -1.9515295891E-4
#define c_sincof_p1 8.3321608736E-3
#define c_sincof_p2 -1.6666654611E-1
#define c_coscof_p0 2.443315711809948E-005
#define c_coscof_p1 -1.388731625493765E-003
#define c_coscof_p2 4.166664568298827E-002
#define c_cephes_FOPI 1.27323954473516 // 4 / M_PI
/* evaluation of 4 sines & cosines at once.
The code is the exact rewriting of the cephes sinf function.
Precision is excellent as long as x < 8192 (I did not bother to
take into account the special handling they have for greater values
-- it does not return garbage for arguments over 8192, though, but
the extra precision is missing).
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
surprising but correct result.
Note also that when you compute sin(x), cos(x) is available at
almost no extra price so both sin_ps and cos_ps make use of
sincos_ps..
*/
inline void sincos_ps(v4sf x, v4sf *ysin, v4sf *ycos) { // any x
v4sf y;
v4su emm2;
v4su sign_mask_sin, sign_mask_cos;
sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0));
x = vabsq_f32(x);
/* scale by 4/Pi */
y = vmulq_n_f32(x, c_cephes_FOPI);
/* store the integer part of y in mm0 */
emm2 = vcvtq_u32_f32(y);
/* j=(j+1) & (~1) (see the cephes sources) */
emm2 = vaddq_u32(emm2, vdupq_n_u32(1));
emm2 = vandq_u32(emm2, vdupq_n_u32(~1));
y = vcvtq_f32_u32(emm2);
/* get the polynom selection mask
there is one polynom for 0 <= x <= Pi/4
and another one for Pi/4<x<=Pi/2
Both branches will be computed.
*/
v4su poly_mask = vtstq_u32(emm2, vdupq_n_u32(2));
/* The magic pass: "Extended precision modular arithmetic"
x = ((x - y * DP1) - y * DP2) - y * DP3; */
x = vfmaq_n_f32(x, y, c_minus_cephes_DP1);
x = vfmaq_n_f32(x, y, c_minus_cephes_DP2);
x = vfmaq_n_f32(x, y, c_minus_cephes_DP3);
sign_mask_sin = veorq_u32(sign_mask_sin, vtstq_u32(emm2, vdupq_n_u32(4)));
sign_mask_cos = vtstq_u32(vsubq_u32(emm2, vdupq_n_u32(2)), vdupq_n_u32(4));
/* Evaluate the first polynom (0 <= x <= Pi/4) in y1,
and the second polynom (Pi/4 <= x <= 0) in y2 */
v4sf z = vmulq_f32(x,x);
v4sf y1, y2;
y1 = vfmaq_n_f32(vdupq_n_f32(c_coscof_p1), z, c_coscof_p0);
y2 = vfmaq_n_f32(vdupq_n_f32(c_sincof_p1), z, c_sincof_p0);
y1 = vfmaq_f32(vdupq_n_f32(c_coscof_p2), y1, z);
y2 = vfmaq_f32(vdupq_n_f32(c_sincof_p2), y2, z);
y1 = vmulq_f32(y1, z);
y2 = vmulq_f32(y2, z);
y1 = vmulq_f32(y1, z);
y1 = vfmsq_n_f32(y1, z, 0.5f);
y2 = vfmaq_f32(x, y2, x);
y1 = vaddq_f32(y1, vdupq_n_f32(1));
/* select the correct result from the two polynoms */
v4sf ys = vbslq_f32(poly_mask, y1, y2);
v4sf yc = vbslq_f32(poly_mask, y2, y1);
*ysin = vbslq_f32(sign_mask_sin, vnegq_f32(ys), ys);
*ycos = vbslq_f32(sign_mask_cos, yc, vnegq_f32(yc));
}
inline v4sf sin_ps(v4sf x) {
v4sf ysin, ycos;
sincos_ps(x, &ysin, &ycos);
return ysin;
}
inline v4sf cos_ps(v4sf x) {
v4sf ysin, ycos;
sincos_ps(x, &ysin, &ycos);
return ycos;
}
static const float asinf_lut[7] = {
1.5707961728,
-0.2145852647,
0.0887556286,
-0.0488025043,
0.0268999482,
-0.0111462294,
0.0022959648
};
inline void asincos_ps(float32x4_t x, float32x4_t* yasin, float32x4_t* yacos)
{
float32x4_t one = vdupq_n_f32(1);
float32x4_t negone = vdupq_n_f32(-1);
float32x4_t lut[7];
float32x4_t xv[5];
float32x4_t sat = vdupq_n_f32(0.9999999f);
float32x4_t m_pi_2 = vdupq_n_f32(1.570796326);
for (int i = 0; i <= 6; i++)
lut[i] = vdupq_n_f32(asinf_lut[i]);
uint32x4_t sign_mask_asin = vcltq_f32(x, vdupq_n_f32(0));
x = vabsq_f32(x);
uint32x4_t saturate = vcgeq_f32(x, one);
x = vbslq_f32(saturate, sat, x);
float32x4_t y = vsubq_f32(one, x);
y = vsqrtq_f32(y);
xv[0] = vmulq_f32(x, x);
for (int i = 1; i < 5; i++)
xv[i] = vmulq_f32(xv[i - 1], x);
float32x4_t a0 = vaddq_f32(lut[0], vmulq_f32(lut[1], x));
float32x4_t a1 = vaddq_f32(vmulq_f32(lut[2], xv[0]), vmulq_f32(lut[3], xv[1]));
float32x4_t a2 = vaddq_f32(vmulq_f32(lut[4], xv[2]), vmulq_f32(lut[5], xv[3]));
float32x4_t a3 = vmulq_f32(lut[6], xv[4]);
float32x4_t phx = vaddq_f32(vaddq_f32(a0, vaddq_f32(a1, a2)), a3);
float32x4_t arcsinx = vmulq_f32(y, phx);
arcsinx = vsubq_f32(m_pi_2, arcsinx);
float32x4_t arcnsinx = vmulq_f32(negone, arcsinx);
arcsinx = vbslq_f32(sign_mask_asin, arcnsinx, arcsinx);
*yasin = arcsinx;
*yacos = vsubq_f32(m_pi_2, arcsinx);
}
inline float32x4_t asin_ps(float32x4_t x)
{
float32x4_t yasin, yacos;
asincos_ps(x, &yasin, &yacos);
return yasin;
}
inline float32x4_t acos_ps(float32x4_t x)
{
float32x4_t yasin, yacos;
asincos_ps(x, &yasin, &yacos);
return yacos;
}

7
cl_dll/saytext.cpp

@ -100,10 +100,11 @@ int CHudSayText::Draw( float flTime ) @@ -100,10 +100,11 @@ int CHudSayText::Draw( float flTime )
int y = Y_START;
#if USE_VGUI
if( ( gViewPort && gViewPort->AllowedToPrintText() == FALSE ) || !m_HUD_saytext->value )
if( ( gViewPort && gViewPort->AllowedToPrintText() == FALSE ) )
return 1;
#endif
if ( !m_HUD_saytext->value )
return 1;
// make sure the scrolltime is within reasonable bounds, to guard against the clock being reset
flScrollTime = Q_min( flScrollTime, flTime + m_HUD_saytext_time->value );
@ -173,8 +174,6 @@ void CHudSayText::SayTextPrint( const char *pszBuf, int iBufSize, int clientInde @@ -173,8 +174,6 @@ void CHudSayText::SayTextPrint( const char *pszBuf, int iBufSize, int clientInde
ConsolePrint( pszBuf );
return;
}
#else
ConsolePrint( pszBuf );
#endif
int i;

235
cl_dll/studio_util.cpp

@ -10,12 +10,6 @@ @@ -10,12 +10,6 @@
#include "const.h"
#include "com_model.h"
#include "studio_util.h"
#include "build.h"
#if XASH_ARMv8
#define XASH_SIMD_NEON 1
#include <arm_neon.h>
#include "neon_mathfun.h"
#endif
/*
====================
@ -23,59 +17,8 @@ AngleMatrix @@ -23,59 +17,8 @@ AngleMatrix
====================
*/
#if XASH_SIMD_NEON
static const uint32x4_t AngleMatrix_sign0 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 0);
static const uint32x4_t AngleMatrix_sign1 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 1);
static const uint32x4_t AngleMatrix_sign2 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0), 2);
#endif
void AngleMatrix( const float *angles, float (*matrix)[4] )
{
#if XASH_SIMD_NEON
float32x4x3_t out_reg;
float32x4_t angles_reg = {};
memcpy(&angles_reg, angles, sizeof(float) * 3);
float32x4x2_t sp_sy_sr_0_cp_cy_cr_1;
sincos_ps(vmulq_n_f32(angles_reg, (M_PI * 2 / 360)), &sp_sy_sr_0_cp_cy_cr_1.val[0], &sp_sy_sr_0_cp_cy_cr_1.val[1]);
float32x4x2_t sp_sr_cp_cr_sy_0_cy_1 = vuzpq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_sy_sr_0_cp_cy_cr_1.val[1]);
float32x4x2_t sp_cp_sy_cy_sr_cr_0_1 = vzipq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_sy_sr_0_cp_cy_cr_1.val[1]);
float32x4_t _0_sr_cr_0 = vextq_f32(sp_sy_sr_0_cp_cy_cr_1.val[0], sp_cp_sy_cy_sr_cr_0_1.val[1], 3);
float32x4_t cp_cr_sr_0 = vcombine_f32(vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[0]), vget_high_f32(sp_sy_sr_0_cp_cy_cr_1.val[0]));
float32x4_t cy_sy_sy_0 = vcombine_f32(vrev64_f32(vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[0])), vget_low_f32(sp_sr_cp_cr_sy_0_cy_1.val[1]));
float32x4_t sy_cy_cy_1 = vcombine_f32(vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[0]), vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[1]));
float32x4_t _0_srsp_crsp_0 = vmulq_laneq_f32(_0_sr_cr_0, sp_sy_sr_0_cp_cy_cr_1.val[0], 0); // *sp
out_reg.val[0] = vmulq_laneq_f32(_0_srsp_crsp_0, sp_sy_sr_0_cp_cy_cr_1.val[1], 1); // *cy
out_reg.val[1] = vmulq_laneq_f32(_0_srsp_crsp_0, sp_sy_sr_0_cp_cy_cr_1.val[0], 1); // *sy
cy_sy_sy_0 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(cy_sy_sy_0), AngleMatrix_sign1));
sy_cy_cy_1 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(sy_cy_cy_1), AngleMatrix_sign2));
out_reg.val[0] = vfmaq_f32(out_reg.val[0], cp_cr_sr_0, cy_sy_sy_0);
out_reg.val[1] = vfmaq_f32(out_reg.val[1], cp_cr_sr_0, sy_cy_cy_1);
float32x4_t cp_cr_0_1 = vcombine_f32(vget_high_f32(sp_sr_cp_cr_sy_0_cy_1.val[0]), vget_high_f32(sp_cp_sy_cy_sr_cr_0_1.val[1]));
float32x4_t _1_cp_cr_0 = vextq_f32(cp_cr_0_1, cp_cr_0_1, 3);
out_reg.val[2] = vmulq_f32(sp_sr_cp_cr_sy_0_cy_1.val[0], _1_cp_cr_0);
out_reg.val[2] = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(out_reg.val[2]), AngleMatrix_sign0));
memcpy(matrix, &out_reg, sizeof(float) * 3 * 4);
/*
matrix[0][0] = cp*cy;
matrix[0][1] = sr*sp*cy-cr*sy;
matrix[0][2] = cr*sp*cy+sr*sy;
matrix[0][3] = 0.0;
matrix[1][0] = cp*sy;
matrix[1][1] = sr*sp*sy+cr*cy;
matrix[1][2] = cr*sp*sy-sr*cy;
matrix[1][3] = 0.0;
matrix[2][0] = -sp*1;
matrix[2][1] = sr*cp;
matrix[2][2] = cp*cr;
matrix[2][3] = cr*0;
*/
#else
float angle;
float sr, sp, sy, cr, cp, cy;
@ -102,7 +45,6 @@ void AngleMatrix( const float *angles, float (*matrix)[4] ) @@ -102,7 +45,6 @@ void AngleMatrix( const float *angles, float (*matrix)[4] )
matrix[0][3] = 0.0f;
matrix[1][3] = 0.0f;
matrix[2][3] = 0.0f;
#endif
}
/*
@ -113,13 +55,6 @@ VectorCompare @@ -113,13 +55,6 @@ VectorCompare
*/
int VectorCompare( const float *v1, const float *v2 )
{
#if XASH_SIMD_NEON
// is this really works?
float32x4_t v1_reg = {}, v2_reg = {};
memcpy(&v1_reg, v1, sizeof(float) * 3);
memcpy(&v2_reg, v2, sizeof(float) * 3);
return !vaddvq_u32(vceqq_f32(v1_reg, v2_reg));
#else
int i;
for( i = 0; i < 3; i++ )
@ -127,7 +62,6 @@ int VectorCompare( const float *v1, const float *v2 ) @@ -127,7 +62,6 @@ int VectorCompare( const float *v1, const float *v2 )
return 0;
return 1;
#endif
}
/*
@ -138,23 +72,9 @@ CrossProduct @@ -138,23 +72,9 @@ CrossProduct
*/
void CrossProduct( const float *v1, const float *v2, float *cross )
{
#if XASH_SIMD_NEON
float32x4_t v1_reg = {}, v2_reg = {};
memcpy(&v1_reg, v1, sizeof(float) * 3);
memcpy(&v2_reg, v2, sizeof(float) * 3);
float32x4_t yzxy_a = vextq_f32(vextq_f32(v1_reg, v1_reg, 3), v1_reg, 2); // [aj, ak, ai, aj]
float32x4_t yzxy_b = vextq_f32(vextq_f32(v2_reg, v2_reg, 3), v2_reg, 2); // [bj, bk, bi, bj]
float32x4_t zxyy_a = vextq_f32(yzxy_a, yzxy_a, 1); // [ak, ai, aj, aj]
float32x4_t zxyy_b = vextq_f32(yzxy_b, yzxy_b, 1); // [bk, ai, bj, bj]
float32x4_t cross_reg = vfmsq_f32(vmulq_f32(yzxy_a, zxyy_b), zxyy_a, yzxy_b); // [ajbk-akbj, akbi-aibk, aibj-ajbi, 0]
memcpy(cross, &cross_reg, sizeof(float) * 3);
#else
cross[0] = v1[1] * v2[2] - v1[2] * v2[1];
cross[1] = v1[2] * v2[0] - v1[0] * v2[2];
cross[2] = v1[0] * v2[1] - v1[1] * v2[0];
#endif
}
/*
@ -165,26 +85,9 @@ VectorTransform @@ -165,26 +85,9 @@ VectorTransform
*/
void VectorTransform( const float *in1, float in2[3][4], float *out )
{
#if XASH_SIMD_NEON
float32x4_t in1_reg = {};
memcpy(&in1_reg, &in1, sizeof(float) * 3);
float32x4x4_t in_t;
memcpy(&in_t, &in2, sizeof(float) * 3 * 4);
//memset(&in_t.val[3], 0, sizeof(in_t.val[3]));
in_t = vld4q_f32((const float*)&in_t);
float32x4_t out_reg = in_t.val[3];
out_reg = vfmaq_laneq_f32(out_reg, in_t.val[0], in1_reg, 0);
out_reg = vfmaq_laneq_f32(out_reg, in_t.val[1], in1_reg, 1);
out_reg = vfmaq_laneq_f32(out_reg, in_t.val[2], in1_reg, 2);
memcpy(out, &out_reg, sizeof(float) * 3);
#else
out[0] = DotProduct(in1, in2[0]) + in2[0][3];
out[1] = DotProduct(in1, in2[1]) + in2[1][3];
out[2] = DotProduct(in1, in2[2]) + in2[2][3];
#endif
}
/*
@ -195,29 +98,6 @@ ConcatTransforms @@ -195,29 +98,6 @@ ConcatTransforms
*/
void ConcatTransforms( float in1[3][4], float in2[3][4], float out[3][4] )
{
#if XASH_SIMD_NEON
float32x4x3_t in1_reg, in2_reg;
memcpy(&in1_reg, in1, sizeof(float) * 3 * 4);
memcpy(&in2_reg, in2, sizeof(float) * 3 * 4);
float32x4x3_t out_reg = {};
out_reg.val[0] = vcopyq_laneq_f32(out_reg.val[0], 3, in1_reg.val[0], 3); // out[0][3] = in[0][3]
out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[0], in1_reg.val[0], 0); // out[0][n] += in2[0][n] * in1[0][0]
out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[1], in1_reg.val[0], 1); // out[0][n] += in2[1][n] * in1[0][1]
out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], in2_reg.val[2], in1_reg.val[0], 2); // out[0][n] += in2[2][n] * in1[0][2]
out_reg.val[1] = vcopyq_laneq_f32(out_reg.val[1], 3, in1_reg.val[1], 3);
out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[0], in1_reg.val[1], 0);
out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[1], in1_reg.val[1], 1);
out_reg.val[1] = vfmaq_laneq_f32(out_reg.val[1], in2_reg.val[2], in1_reg.val[1], 2);
out_reg.val[2] = vcopyq_laneq_f32(out_reg.val[2], 3, in1_reg.val[2], 3);
out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[0], in1_reg.val[2], 0);
out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[1], in1_reg.val[2], 1);
out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], in2_reg.val[2], in1_reg.val[2], 2);
memcpy(out, &out_reg, sizeof(float) * 3 * 4);
#else
out[0][0] = in1[0][0] * in2[0][0] + in1[0][1] * in2[1][0] +
in1[0][2] * in2[2][0];
out[0][1] = in1[0][0] * in2[0][1] + in1[0][1] * in2[1][1] +
@ -242,7 +122,6 @@ void ConcatTransforms( float in1[3][4], float in2[3][4], float out[3][4] ) @@ -242,7 +122,6 @@ void ConcatTransforms( float in1[3][4], float in2[3][4], float out[3][4] )
in1[2][2] * in2[2][2];
out[2][3] = in1[2][0] * in2[0][3] + in1[2][1] * in2[1][3] +
in1[2][2] * in2[2][3] + in1[2][3];
#endif
}
// angles index are not the same as ROLL, PITCH, YAW
@ -253,36 +132,8 @@ AngleQuaternion @@ -253,36 +132,8 @@ AngleQuaternion
====================
*/
#if XASH_SIMD_NEON
static const uint32x4_t AngleQuaternion_sign2 = vzipq_u32(vdupq_n_u32(0x80000000), vdupq_n_u32(0x00000000)).val[0]; // { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
#endif
void AngleQuaternion( float *angles, vec4_t quaternion )
{
#if XASH_SIMD_NEON
float32x4_t angles_reg = {};
memcpy(&angles_reg, angles, sizeof(float) * 3);
float32x4x2_t sr_sp_sy_0_cr_cp_cy_1;
sincos_ps(vmulq_n_f32(angles_reg, 0.5), &sr_sp_sy_0_cr_cp_cy_1.val[0], &sr_sp_sy_0_cr_cp_cy_1.val[1]);
float32x4x2_t sr_sy_cr_cy_sp_0_cp_1 = vuzpq_f32(sr_sp_sy_0_cr_cp_cy_1.val[0], sr_sp_sy_0_cr_cp_cy_1.val[1]);
float32x4_t cp_cp_cp_cp = vdupq_laneq_f32(sr_sp_sy_0_cr_cp_cy_1.val[1], 1);
float32x4_t sp_sp_sp_sp = vdupq_laneq_f32(sr_sp_sy_0_cr_cp_cy_1.val[0], 1);
float32x4_t sr_sy_cr_cy = sr_sy_cr_cy_sp_0_cp_1.val[0];
float32x4_t sy_cr_cy_sr = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 1);
float32x4_t cr_cy_sr_sy = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 2);
float32x4_t cy_sr_sy_cr = vextq_f32(sr_sy_cr_cy_sp_0_cp_1.val[0], sr_sy_cr_cy_sp_0_cp_1.val[0], 3);
float32x4_t sp_sp_sp_sp_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(sp_sp_sp_sp), AngleQuaternion_sign2));
float32x4_t left = vmulq_f32(vmulq_f32(sr_sy_cr_cy, cp_cp_cp_cp), cy_sr_sy_cr);
float32x4_t out_reg = vfmaq_f32(left, vmulq_f32(cr_cy_sr_sy, sp_sp_sp_sp_signed), sy_cr_cy_sr);
memcpy(quaternion, &out_reg, sizeof(float) * 4);
//quaternion[0] = sr * cp * cy - cr * sp * sy; // X
//quaternion[1] = sy * cp * sr + cy * sp * cr; // Y
//quaternion[2] = cr * cp * sy - sr * sp * cy; // Z
//quaternion[3] = cy * cp * cr + sy * sp * sr; // W
#else
float angle;
float sr, sp, sy, cr, cp, cy;
@ -301,7 +152,6 @@ void AngleQuaternion( float *angles, vec4_t quaternion ) @@ -301,7 +152,6 @@ void AngleQuaternion( float *angles, vec4_t quaternion )
quaternion[1] = cr * sp * cy + sr * cp * sy; // Y
quaternion[2] = cr * cp * sy - sr * sp * cy; // Z
quaternion[3] = cr * cp * cy + sr * sp * sy; // W
#endif
}
/*
@ -312,37 +162,6 @@ QuaternionSlerp @@ -312,37 +162,6 @@ QuaternionSlerp
*/
void QuaternionSlerp( vec4_t p, vec4_t q, float t, vec4_t qt )
{
#if XASH_SIMD_NEON
float32x4_t p_reg = {}, q_reg = {};
memcpy(&p_reg, p, sizeof(float) * 4);
memcpy(&q_reg, q, sizeof(float) * 4);
// q = (cos(a/2), xsin(a/2), ysin(a/2), zsin(a/2))
// cos(a-b) = cosacosb+sinasinb
const uint32x4_t signmask = vdupq_n_u32(0x80000000);
const float32x4_t one_minus_epsilon = vdupq_n_f32(1.0f - 0.00001f);
float32x4_t vcosom = vdupq_n_f32(DotProduct(p, q));
uint32x4_t sign = vandq_u32(vreinterpretq_u32_f32(vcosom), signmask);
q_reg = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(p_reg), sign));
vcosom = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(vcosom), sign));
float x[4] = {(1.0f - t), t, 1, 0}; // cosom -> 1, sinom -> 0, sinx ~ x
float32x4_t x_reg;
memcpy(&x_reg, x, sizeof(float) * 4);
// if ((1.0 - cosom) > 0.000001) x = sin(x * omega)
uint32x4_t cosom_less_then_one = vcltq_f32(vcosom, one_minus_epsilon);
float32x4_t vomega = acos_ps(vcosom);
x_reg = vbslq_f32(cosom_less_then_one, x_reg, sin_ps(vmulq_f32(x_reg, vomega)));
// qt = (x[0] * p + x[1] * q) / x[2];
float32x4_t qt_reg = vmulq_laneq_f32(p_reg, x_reg, 0);
qt_reg = vfmaq_laneq_f32(qt_reg, q_reg, x_reg, 1);
qt_reg = vdivq_f32(qt_reg, vdupq_laneq_f32(x_reg, 2)); // vdivq_laneq_f32 ?
memcpy(qt, &qt_reg, sizeof(float) * 4);
#else
int i;
float omega, cosom, sinom, sclp, sclq;
@ -397,7 +216,6 @@ void QuaternionSlerp( vec4_t p, vec4_t q, float t, vec4_t qt ) @@ -397,7 +216,6 @@ void QuaternionSlerp( vec4_t p, vec4_t q, float t, vec4_t qt )
qt[i] = sclp * p[i] + sclq * qt[i];
}
}
#endif
}
/*
@ -406,60 +224,8 @@ QuaternionMatrix @@ -406,60 +224,8 @@ QuaternionMatrix
====================
*/
#if XASH_SIMD_NEON
static const uint32x4_t QuaternionMatrix_sign1 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0x00000000), 0); // { 0x80000000, 0x00000000, 0x00000000, 0x00000000 };
static const uint32x4_t QuaternionMatrix_sign2 = vsetq_lane_u32(0x80000000, vdupq_n_u32(0x00000000), 1); // { 0x00000000, 0x80000000, 0x00000000, 0x00000000 };
static const uint32x4_t QuaternionMatrix_sign3 = vsetq_lane_u32(0x00000000, vdupq_n_u32(0x80000000), 2); // { 0x80000000, 0x80000000, 0x00000000, 0x80000000 };
static const float32x4_t matrix3x4_identity_0 = vsetq_lane_f32(1, vdupq_n_f32(0), 0); // { 1, 0, 0, 0 }
static const float32x4_t matrix3x4_identity_1 = vsetq_lane_f32(1, vdupq_n_f32(0), 1); // { 0, 1, 0, 0 }
static const float32x4_t matrix3x4_identity_2 = vsetq_lane_f32(1, vdupq_n_f32(0), 2); // { 0, 0, 1, 0 }
#endif
void QuaternionMatrix( vec4_t quaternion, float (*matrix)[4] )
{
#if XASH_SIMD_NEON
float32x4_t quaternion_reg;
memcpy(&quaternion_reg, quaternion, sizeof(float) * 4);
float32x4_t q1032 = vrev64q_f32(quaternion_reg);
float32x4_t q1032_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q1032), QuaternionMatrix_sign1));
float32x4_t q2301 = vextq_f32(quaternion_reg, quaternion_reg, 2);
float32x4_t q2301_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q2301), QuaternionMatrix_sign3));
float32x4_t q3210 = vrev64q_f32(q2301);
float32x4_t q3210_signed = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(q3210), QuaternionMatrix_sign2));
float32x4x3_t out_reg;
out_reg.val[0] = vmulq_laneq_f32(q2301_signed, quaternion_reg, 2);
out_reg.val[0] = vfmaq_laneq_f32(out_reg.val[0], q1032_signed, quaternion_reg, 1);
out_reg.val[0] = vfmaq_n_f32(matrix3x4_identity_0, out_reg.val[0], 2.0f);
out_reg.val[1] = vmulq_laneq_f32(q3210_signed, quaternion_reg, 2);
out_reg.val[1] = vfmsq_laneq_f32(out_reg.val[1], q1032_signed, quaternion_reg, 0);
out_reg.val[1] = vfmaq_n_f32(matrix3x4_identity_1, out_reg.val[1], 2.0f);
out_reg.val[2] = vmulq_laneq_f32(q3210_signed, quaternion_reg, 1);
out_reg.val[2] = vfmaq_laneq_f32(out_reg.val[2], q2301_signed, quaternion_reg, 0);
out_reg.val[2] = vfmsq_n_f32(matrix3x4_identity_2, out_reg.val[2], 2.0f);
memcpy(matrix, &out_reg, sizeof(float) * 3 * 4);
/*
matrix[0][0] = 1.0 + 2.0 * ( quaternion[1] * -quaternion[1] + -quaternion[2] * quaternion[2] );
matrix[0][1] = 0.0 + 2.0 * ( quaternion[1] * quaternion[0] + -quaternion[3] * quaternion[2] );
matrix[0][2] = 0.0 + 2.0 * ( quaternion[1] * quaternion[3] + quaternion[0] * quaternion[2] );
matrix[0][3] = 0.0 + 2.0 * ( quaternion[1] * quaternion[2] + -quaternion[1] * quaternion[2] );
matrix[1][0] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[1] + quaternion[3] * quaternion[2] );
matrix[1][1] = 1.0 + 2.0 * ( -quaternion[0] * quaternion[0] + -quaternion[2] * quaternion[2] );
matrix[1][2] = 0.0 + 2.0 * ( -quaternion[0] * quaternion[3] + quaternion[1] * quaternion[2] );
matrix[1][3] = 0.0 + 2.0 * ( -quaternion[0] * quaternion[2] + quaternion[0] * quaternion[2] );
matrix[2][0] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[2] + -quaternion[3] * quaternion[1] );
matrix[2][1] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[3] + quaternion[2] * quaternion[1] );
matrix[2][2] = 1.0 + 2.0 * ( -quaternion[0] * quaternion[0] + -quaternion[1] * quaternion[1] );
matrix[2][3] = 0.0 + 2.0 * ( -quaternion[0] * -quaternion[1] + -quaternion[0] * quaternion[1] );
*/
#else
matrix[0][0] = 1.0f - 2.0f * quaternion[1] * quaternion[1] - 2.0f * quaternion[2] * quaternion[2];
matrix[1][0] = 2.0f * quaternion[0] * quaternion[1] + 2.0f * quaternion[3] * quaternion[2];
matrix[2][0] = 2.0f * quaternion[0] * quaternion[2] - 2.0f * quaternion[3] * quaternion[1];
@ -471,7 +237,6 @@ void QuaternionMatrix( vec4_t quaternion, float (*matrix)[4] ) @@ -471,7 +237,6 @@ void QuaternionMatrix( vec4_t quaternion, float (*matrix)[4] )
matrix[0][2] = 2.0f * quaternion[0] * quaternion[2] + 2.0f * quaternion[3] * quaternion[1];
matrix[1][2] = 2.0f * quaternion[1] * quaternion[2] - 2.0f * quaternion[3] * quaternion[0];
matrix[2][2] = 1.0f - 2.0f * quaternion[0] * quaternion[0] - 2.0f * quaternion[1] * quaternion[1];
#endif
}
/*

2
dlls/bigmomma.cpp

@ -931,7 +931,7 @@ void CBigMomma::StartTask( Task_t *pTask ) @@ -931,7 +931,7 @@ void CBigMomma::StartTask( Task_t *pTask )
TaskComplete();
break;
case TASK_WAIT_NODE:
m_flWaitFinished = gpGlobals->time + GetNodeDelay();
m_flWait = gpGlobals->time + GetNodeDelay();
if( m_hTargetEnt->pev->spawnflags & SF_INFOBM_WAIT )
ALERT( at_aiconsole, "BM: Wait at node %s forever\n", STRING( pev->netname ) );
else

Loading…
Cancel
Save