|
@@ -2491,12 +2491,12 @@ static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
|
|
|
static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
|
|
static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
|
|
|
{
|
|
{
|
|
|
int res = 0;
|
|
int res = 0;
|
|
|
- DECLARE_ALIGNED_16(int64_t, sh);
|
|
|
|
|
|
|
+ DECLARE_ALIGNED_16(xmm_reg, sh);
|
|
|
x86_reg o = -(order << 1);
|
|
x86_reg o = -(order << 1);
|
|
|
|
|
|
|
|
v1 += order;
|
|
v1 += order;
|
|
|
v2 += order;
|
|
v2 += order;
|
|
|
- sh = shift;
|
|
|
|
|
|
|
+ sh.a = shift;
|
|
|
__asm__ volatile(
|
|
__asm__ volatile(
|
|
|
"pxor %%xmm7, %%xmm7 \n\t"
|
|
"pxor %%xmm7, %%xmm7 \n\t"
|
|
|
"1: \n\t"
|
|
"1: \n\t"
|