in modules/imgproc/src/imgwarp.cpp [4775:5220]
void cv::convertMaps( InputArray _map1, InputArray _map2,
OutputArray _dstmap1, OutputArray _dstmap2,
int dstm1type, bool nninterpolate )
{
Mat map1 = _map1.getMat(), map2 = _map2.getMat(), dstmap1, dstmap2;
Size size = map1.size();
const Mat *m1 = &map1, *m2 = &map2;
int m1type = m1->type(), m2type = m2->type();
CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) ||
(m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) ||
(m1type == CV_32FC1 && m2type == CV_32FC1) ||
(m1type == CV_32FC2 && m2->empty()) );
if( m2type == CV_16SC2 )
{
std::swap( m1, m2 );
std::swap( m1type, m2type );
}
if( dstm1type <= 0 )
dstm1type = m1type == CV_16SC2 ? CV_32FC2 : CV_16SC2;
CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 );
_dstmap1.create( size, dstm1type );
dstmap1 = _dstmap1.getMat();
if( !nninterpolate && dstm1type != CV_32FC2 )
{
_dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
dstmap2 = _dstmap2.getMat();
}
else
_dstmap2.release();
if( m1type == dstm1type || (nninterpolate &&
((m1type == CV_16SC2 && dstm1type == CV_32FC2) ||
(m1type == CV_32FC2 && dstm1type == CV_16SC2))) )
{
m1->convertTo( dstmap1, dstmap1.type() );
if( !dstmap2.empty() && dstmap2.type() == m2->type() )
m2->copyTo( dstmap2 );
return;
}
if( m1type == CV_32FC1 && dstm1type == CV_32FC2 )
{
Mat vdata[] = { *m1, *m2 };
merge( vdata, 2, dstmap1 );
return;
}
if( m1type == CV_32FC2 && dstm1type == CV_32FC1 )
{
Mat mv[] = { dstmap1, dstmap2 };
split( *m1, mv );
return;
}
if( m1->isContinuous() && (m2->empty() || m2->isContinuous()) &&
dstmap1.isContinuous() && (dstmap2.empty() || dstmap2.isContinuous()) )
{
size.width *= size.height;
size.height = 1;
}
#if CV_SSE2
bool useSSE2 = checkHardwareSupport(CV_CPU_SSE2);
#endif
#if CV_SSE4_1
bool useSSE4_1 = checkHardwareSupport(CV_CPU_SSE4_1);
#endif
const float scale = 1.f/INTER_TAB_SIZE;
int x, y;
for( y = 0; y < size.height; y++ )
{
const float* src1f = m1->ptr<float>(y);
const float* src2f = m2->ptr<float>(y);
const short* src1 = (const short*)src1f;
const ushort* src2 = (const ushort*)src2f;
float* dst1f = dstmap1.ptr<float>(y);
float* dst2f = dstmap2.ptr<float>(y);
short* dst1 = (short*)dst1f;
ushort* dst2 = (ushort*)dst2f;
x = 0;
if( m1type == CV_32FC1 && dstm1type == CV_16SC2 )
{
if( nninterpolate )
{
#if CV_NEON
for( ; x <= size.width - 8; x += 8 )
{
int16x8x2_t v_dst;
v_dst.val[0] = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x))),
vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x + 4))));
v_dst.val[1] = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src2f + x))),
vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src2f + x + 4))));
vst2q_s16(dst1 + (x << 1), v_dst);
}
#elif CV_SSE4_1
if (useSSE4_1)
{
for( ; x <= size.width - 16; x += 16 )
{
__m128i v_dst0 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)),
_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4)));
__m128i v_dst1 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 8)),
_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 12)));
__m128i v_dst2 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x)),
_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 4)));
__m128i v_dst3 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 8)),
_mm_cvtps_epi32(_mm_loadu_ps(src2f + x + 12)));
_mm_interleave_epi16(v_dst0, v_dst1, v_dst2, v_dst3);
_mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst0);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst1);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst2);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst3);
}
}
#endif
for( ; x < size.width; x++ )
{
dst1[x*2] = saturate_cast<short>(src1f[x]);
dst1[x*2+1] = saturate_cast<short>(src2f[x]);
}
}
else
{
#if CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)INTER_TAB_SIZE);
int32x4_t v_mask = vdupq_n_s32(INTER_TAB_SIZE - 1);
for( ; x <= size.width - 8; x += 8 )
{
int32x4_t v_ix0 = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(src1f + x), v_scale));
int32x4_t v_ix1 = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(src1f + x + 4), v_scale));
int32x4_t v_iy0 = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(src2f + x), v_scale));
int32x4_t v_iy1 = cv_vrndq_s32_f32(vmulq_f32(vld1q_f32(src2f + x + 4), v_scale));
int16x8x2_t v_dst;
v_dst.val[0] = vcombine_s16(vqmovn_s32(vshrq_n_s32(v_ix0, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_ix1, INTER_BITS)));
v_dst.val[1] = vcombine_s16(vqmovn_s32(vshrq_n_s32(v_iy0, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_iy1, INTER_BITS)));
vst2q_s16(dst1 + (x << 1), v_dst);
uint16x4_t v_dst0 = vqmovun_s32(vaddq_s32(vshlq_n_s32(vandq_s32(v_iy0, v_mask), INTER_BITS),
vandq_s32(v_ix0, v_mask)));
uint16x4_t v_dst1 = vqmovun_s32(vaddq_s32(vshlq_n_s32(vandq_s32(v_iy1, v_mask), INTER_BITS),
vandq_s32(v_ix1, v_mask)));
vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1));
}
#elif CV_SSE4_1
if (useSSE4_1)
{
__m128 v_its = _mm_set1_ps(INTER_TAB_SIZE);
__m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1);
for( ; x <= size.width - 16; x += 16 )
{
__m128i v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x), v_its));
__m128i v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 4), v_its));
__m128i v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x), v_its));
__m128i v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 4), v_its));
__m128i v_dst10 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS),
_mm_srai_epi32(v_ix1, INTER_BITS));
__m128i v_dst12 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS),
_mm_srai_epi32(v_iy1, INTER_BITS));
__m128i v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS),
_mm_and_si128(v_ix0, v_its1));
__m128i v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS),
_mm_and_si128(v_ix1, v_its1));
_mm_storeu_si128((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst20, v_dst21));
v_ix0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 8), v_its));
v_ix1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x + 12), v_its));
v_iy0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 8), v_its));
v_iy1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src2f + x + 12), v_its));
__m128i v_dst11 = _mm_packs_epi32(_mm_srai_epi32(v_ix0, INTER_BITS),
_mm_srai_epi32(v_ix1, INTER_BITS));
__m128i v_dst13 = _mm_packs_epi32(_mm_srai_epi32(v_iy0, INTER_BITS),
_mm_srai_epi32(v_iy1, INTER_BITS));
v_dst20 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy0, v_its1), INTER_BITS),
_mm_and_si128(v_ix0, v_its1));
v_dst21 = _mm_add_epi32(_mm_slli_epi32(_mm_and_si128(v_iy1, v_its1), INTER_BITS),
_mm_and_si128(v_ix1, v_its1));
_mm_storeu_si128((__m128i *)(dst2 + x + 8), _mm_packus_epi32(v_dst20, v_dst21));
_mm_interleave_epi16(v_dst10, v_dst11, v_dst12, v_dst13);
_mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst10);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 8), v_dst11);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 16), v_dst12);
_mm_storeu_si128((__m128i *)(dst1 + x * 2 + 24), v_dst13);
}
}
#endif
for( ; x < size.width; x++ )
{
int ix = saturate_cast<int>(src1f[x]*INTER_TAB_SIZE);
int iy = saturate_cast<int>(src2f[x]*INTER_TAB_SIZE);
dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
}
}
}
else if( m1type == CV_32FC2 && dstm1type == CV_16SC2 )
{
if( nninterpolate )
{
#if CV_NEON
for( ; x <= (size.width << 1) - 8; x += 8 )
vst1q_s16(dst1 + x, vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x))),
vqmovn_s32(cv_vrndq_s32_f32(vld1q_f32(src1f + x + 4)))));
#elif CV_SSE2
for( ; x <= (size.width << 1) - 8; x += 8 )
{
_mm_storeu_si128((__m128i *)(dst1 + x), _mm_packs_epi32(_mm_cvtps_epi32(_mm_loadu_ps(src1f + x)),
_mm_cvtps_epi32(_mm_loadu_ps(src1f + x + 4))));
}
#endif
for( ; x < size.width; x++ )
{
dst1[x*2] = saturate_cast<short>(src1f[x*2]);
dst1[x*2+1] = saturate_cast<short>(src1f[x*2+1]);
}
}
else
{
#if CV_NEON
float32x4_t v_scale = vdupq_n_f32((float)INTER_TAB_SIZE);
int32x4_t v_mask = vdupq_n_s32(INTER_TAB_SIZE - 1);
for( ; x <= size.width - 8; x += 8 )
{
float32x4x2_t v_src0 = vld2q_f32(src1f + (x << 1)), v_src1 = vld2q_f32(src1f + (x << 1) + 8);
int32x4_t v_ix0 = cv_vrndq_s32_f32(vmulq_f32(v_src0.val[0], v_scale));
int32x4_t v_ix1 = cv_vrndq_s32_f32(vmulq_f32(v_src1.val[0], v_scale));
int32x4_t v_iy0 = cv_vrndq_s32_f32(vmulq_f32(v_src0.val[1], v_scale));
int32x4_t v_iy1 = cv_vrndq_s32_f32(vmulq_f32(v_src1.val[1], v_scale));
int16x8x2_t v_dst;
v_dst.val[0] = vcombine_s16(vqmovn_s32(vshrq_n_s32(v_ix0, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_ix1, INTER_BITS)));
v_dst.val[1] = vcombine_s16(vqmovn_s32(vshrq_n_s32(v_iy0, INTER_BITS)),
vqmovn_s32(vshrq_n_s32(v_iy1, INTER_BITS)));
vst2q_s16(dst1 + (x << 1), v_dst);
uint16x4_t v_dst0 = vqmovun_s32(vaddq_s32(vshlq_n_s32(vandq_s32(v_iy0, v_mask), INTER_BITS),
vandq_s32(v_ix0, v_mask)));
uint16x4_t v_dst1 = vqmovun_s32(vaddq_s32(vshlq_n_s32(vandq_s32(v_iy1, v_mask), INTER_BITS),
vandq_s32(v_ix1, v_mask)));
vst1q_u16(dst2 + x, vcombine_u16(v_dst0, v_dst1));
}
#elif CV_SSE4_1
if (useSSE4_1)
{
__m128 v_its = _mm_set1_ps(INTER_TAB_SIZE);
__m128i v_its1 = _mm_set1_epi32(INTER_TAB_SIZE-1);
__m128i v_y_mask = _mm_set1_epi32((INTER_TAB_SIZE-1) << 16);
for( ; x <= size.width - 4; x += 4 )
{
__m128i v_src0 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2), v_its));
__m128i v_src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_loadu_ps(src1f + x * 2 + 4), v_its));
__m128i v_dst1 = _mm_packs_epi32(_mm_srai_epi32(v_src0, INTER_BITS),
_mm_srai_epi32(v_src1, INTER_BITS));
_mm_storeu_si128((__m128i *)(dst1 + x * 2), v_dst1);
// x0 y0 x1 y1 . . .
v_src0 = _mm_packs_epi32(_mm_and_si128(v_src0, v_its1),
_mm_and_si128(v_src1, v_its1));
__m128i v_dst2 = _mm_or_si128(_mm_srli_epi32(_mm_and_si128(v_src0, v_y_mask), 16 - INTER_BITS), // y0 0 y1 0 . . .
_mm_and_si128(v_src0, v_its1)); // 0 x0 0 x1 . . .
_mm_storel_epi64((__m128i *)(dst2 + x), _mm_packus_epi32(v_dst2, v_dst2));
}
}
#endif
for( ; x < size.width; x++ )
{
int ix = saturate_cast<int>(src1f[x*2]*INTER_TAB_SIZE);
int iy = saturate_cast<int>(src1f[x*2+1]*INTER_TAB_SIZE);
dst1[x*2] = saturate_cast<short>(ix >> INTER_BITS);
dst1[x*2+1] = saturate_cast<short>(iy >> INTER_BITS);
dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1)));
}
}
}
else if( m1type == CV_16SC2 && dstm1type == CV_32FC1 )
{
#if CV_NEON
uint16x8_t v_mask2 = vdupq_n_u16(INTER_TAB_SIZE2-1);
uint32x4_t v_zero = vdupq_n_u32(0u), v_mask = vdupq_n_u32(INTER_TAB_SIZE-1);
float32x4_t v_scale = vdupq_n_f32(scale);
for( ; x <= size.width - 8; x += 8)
{
uint32x4_t v_fxy1, v_fxy2;
if (src2)
{
uint16x8_t v_src2 = vandq_u16(vld1q_u16(src2 + x), v_mask2);
v_fxy1 = vmovl_u16(vget_low_u16(v_src2));
v_fxy2 = vmovl_u16(vget_high_u16(v_src2));
}
else
v_fxy1 = v_fxy2 = v_zero;
int16x8x2_t v_src = vld2q_s16(src1 + (x << 1));
float32x4_t v_dst1 = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src.val[0]))),
v_scale, vcvtq_f32_u32(vandq_u32(v_fxy1, v_mask)));
float32x4_t v_dst2 = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src.val[1]))),
v_scale, vcvtq_f32_u32(vshrq_n_u32(v_fxy1, INTER_BITS)));
vst1q_f32(dst1f + x, v_dst1);
vst1q_f32(dst2f + x, v_dst2);
v_dst1 = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src.val[0]))),
v_scale, vcvtq_f32_u32(vandq_u32(v_fxy2, v_mask)));
v_dst2 = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src.val[1]))),
v_scale, vcvtq_f32_u32(vshrq_n_u32(v_fxy2, INTER_BITS)));
vst1q_f32(dst1f + x + 4, v_dst1);
vst1q_f32(dst2f + x + 4, v_dst2);
}
#elif CV_SSE2
__m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1);
__m128i v_zero = _mm_setzero_si128(), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1);
__m128 v_scale = _mm_set1_ps(scale);
for( ; x <= size.width - 16; x += 16)
{
__m128i v_src10 = _mm_loadu_si128((__m128i const *)(src1 + x * 2));
__m128i v_src11 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 8));
__m128i v_src20 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 16));
__m128i v_src21 = _mm_loadu_si128((__m128i const *)(src1 + x * 2 + 24));
_mm_deinterleave_epi16(v_src10, v_src11, v_src20, v_src21);
__m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero;
__m128i v_fxy_p = _mm_unpacklo_epi16(v_fxy, v_zero);
_mm_storeu_ps(dst1f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src10), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask)))));
_mm_storeu_ps(dst2f + x, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src20), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS)))));
v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero);
_mm_storeu_ps(dst1f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src10), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask)))));
_mm_storeu_ps(dst2f + x + 4, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src20), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS)))));
v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x + 8)), v_mask2) : v_zero;
v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero);
_mm_storeu_ps(dst1f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src11), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask)))));
_mm_storeu_ps(dst2f + x + 8, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v_zero, v_src21), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS)))));
v_fxy_p = _mm_unpackhi_epi16(v_fxy, v_zero);
_mm_storeu_ps(dst1f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src11), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_and_si128(v_fxy_p, v_mask)))));
_mm_storeu_ps(dst2f + x + 12, _mm_add_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v_zero, v_src21), 16)),
_mm_mul_ps(v_scale, _mm_cvtepi32_ps(_mm_srli_epi32(v_fxy_p, INTER_BITS)))));
}
#endif
for( ; x < size.width; x++ )
{
int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1) : 0;
dst1f[x] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
dst2f[x] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
}
}
else if( m1type == CV_16SC2 && dstm1type == CV_32FC2 )
{
#if CV_NEON
int16x8_t v_mask2 = vdupq_n_s16(INTER_TAB_SIZE2-1);
int32x4_t v_zero = vdupq_n_s32(0), v_mask = vdupq_n_s32(INTER_TAB_SIZE-1);
float32x4_t v_scale = vdupq_n_f32(scale);
for( ; x <= size.width - 8; x += 8)
{
int32x4_t v_fxy1, v_fxy2;
if (src2)
{
int16x8_t v_src2 = vandq_s16(vld1q_s16((short *)src2 + x), v_mask2);
v_fxy1 = vmovl_s16(vget_low_s16(v_src2));
v_fxy2 = vmovl_s16(vget_high_s16(v_src2));
}
else
v_fxy1 = v_fxy2 = v_zero;
int16x8x2_t v_src = vld2q_s16(src1 + (x << 1));
float32x4x2_t v_dst;
v_dst.val[0] = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src.val[0]))),
v_scale, vcvtq_f32_s32(vandq_s32(v_fxy1, v_mask)));
v_dst.val[1] = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src.val[1]))),
v_scale, vcvtq_f32_s32(vshrq_n_s32(v_fxy1, INTER_BITS)));
vst2q_f32(dst1f + (x << 1), v_dst);
v_dst.val[0] = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src.val[0]))),
v_scale, vcvtq_f32_s32(vandq_s32(v_fxy2, v_mask)));
v_dst.val[1] = vmlaq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src.val[1]))),
v_scale, vcvtq_f32_s32(vshrq_n_s32(v_fxy2, INTER_BITS)));
vst2q_f32(dst1f + (x << 1) + 8, v_dst);
}
#elif CV_SSE2
if (useSSE2)
{
__m128i v_mask2 = _mm_set1_epi16(INTER_TAB_SIZE2-1);
__m128i v_zero = _mm_set1_epi32(0), v_mask = _mm_set1_epi32(INTER_TAB_SIZE-1);
__m128 v_scale = _mm_set1_ps(scale);
for ( ; x <= size.width - 8; x += 8)
{
__m128i v_src = _mm_loadu_si128((__m128i const *)(src1 + x * 2));
__m128i v_fxy = src2 ? _mm_and_si128(_mm_loadu_si128((__m128i const *)(src2 + x)), v_mask2) : v_zero;
__m128i v_fxy1 = _mm_and_si128(v_fxy, v_mask);
__m128i v_fxy2 = _mm_srli_epi16(v_fxy, INTER_BITS);
__m128 v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_fxy1, v_fxy2)), v_scale);
_mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(v_src, v_zero)), v_add));
v_add = _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_fxy1, v_fxy2)), v_scale);
_mm_storeu_ps(dst1f + x * 2, _mm_add_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(v_src, v_zero)), v_add));
}
}
#endif
for( ; x < size.width; x++ )
{
int fxy = src2 ? src2[x] & (INTER_TAB_SIZE2-1): 0;
dst1f[x*2] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale;
dst1f[x*2+1] = src1[x*2+1] + (fxy >> INTER_BITS)*scale;
}
}
else
CV_Error( CV_StsNotImplemented, "Unsupported combination of input/output matrices" );
}
}