Fast implementation of box filtering algorithm on HiSilicon—-BoxFilter

1. Accelerate the calculation of the box filtering algorithm in the form of an integral graph on HiSilicon; the technical points adopted;
A. Integral graph
B. ARM inline assembly
C. Data preloading, etc.
2. The code is as follows:
The function input has converted u8 to int32_t format to facilitate subsequent calculations. It can also be changed to u8. In the data processing, the instruction set is used to perform data type conversion. Since the data type conversion is found to be time-consuming during the test, so One-time conversion to int32_t for processing during processing.

void imfilterInteImage(int* in,int* out,int* inteImageOutPtr,int* integralImgColsPtr, int rows, int cols, int M, bool normalize){<!-- -->
if (in == nullptr)

return;

//integralImage(in, inteImageOutPtr, integralImgColsPtr, rows, cols);
integralImage(in, inteImageOutPtr, rows, cols);

int colsSubPad = cols - m_pad;
int rowsSubPad = rows - m_pad;
int iii;
int m_pad_double = m_pad + m_pad;
//step 1
//first rows
#ifdef NEON_ENABLE
int* ptr0 = inteImageOutPtr + m_pad_double * cols + m_pad_double;
int* ptr1 = inteImageOutPtr + m_pad_double * cols;
int* outPtr = out;
for(;iii + 7 < colsSubPad;iii + = 8){<!-- -->
#ifdef ARM_ASSEMBLY
asm volatile(
"dup v8.4s,%w6\
"
"0:\
"
"prfm pldl1keep,[%0,#512]\
"
"prfm pldl1keep,[%1,#512]\
"
"ld1 {v0.4s,v1.4s},[%0],#32\
"
"ld1 {v2.4s,v3.4s},[%1],#32\
"
"sub v0.4s,v0.4s,v2.4s\
"
"sub v1.4s,v1.4s,v3.4s\
"
"mul v0.4s,v0.4s,v8.4s\
"
"mul v1.4s,v1.4s,v8.4s\
"
"srshr v0.4s,v0.4s,#19\
"
"srshr v1.4s,v1.4s,#19\
"
"st1 {v0.4s,v1.4s},[%2],#32\
"
:"=r"(ptr0), "=r"(ptr1), "=r"(outPtr)
:"0"(ptr0), "1"(ptr1), "2"(outPtr), "r"(m_scale)
:"memory","v0","v1","v2","v3","v8");
#else
int32x4_t subTemp = vld1q_s32(ptr0);//inteImageOutPtr[(pad + pad) * cols + iii + pad]
int32x4_t subTemp_0 = vld1q_s32(ptr0 + 4);//inteImageOutPtr[(pad + pad) * cols + iii + pad]
int32x4_t subTemp1 = vld1q_s32(ptr1);//inteImageOutPtr[(pad + pad) * cols + iii - pad]
int32x4_t subTemp1_0 = vld1q_s32(ptr1 + 4);//inteImageOutPtr[(pad + pad) * cols + iii - pad]
int32x4_t temp = vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32 out[outIndex] = ((inteImageOutPtr[(pad + pad) * cols + iii + pad][inteImageOutPtr (pad + pad) * cols + iii - pad])*scale_) >> 15;
int32x4_t temp_0 = vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp_0,subTemp1_0),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32 out[outIndex] = ((inteImageOutPtr[(pad + pad) * cols + inte[iii + Imagetr] - (pad + pad) * cols + iii - pad])*scale_) >> 15;
vst1q_s32(outPtr, temp);
vst1q_s32(outPtr + 4, temp_0);
ptr0 += 8;
ptr1 += 8;
outPtr += 8;
#endif
}
for(;iii + 3 < colsSubPad;iii + =4){<!-- -->
int32x4_t subTemp = vld1q_s32(ptr0);//inteImageOutPtr[(pad + pad) * cols + iii + pad]
int32x4_t subTemp1 = vld1q_s32(ptr1);//inteImageOutPtr[(pad + pad) * cols + iii - pad]
int32x4_t temp = vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32 out[outIndex] = ((inteImageOutPtr[(pad + pad) * cols + iii + pad][inteImageOutPtr (pad + pad) * cols + iii - pad])*scale_) >> 15;
vst1q_s32(outPtr, temp);
ptr0 += 4;
ptr1 += 4;
outPtr += 4;
}
for(;iii < colsSubPad;iii ++ ){<!-- -->
int outIndex = iii - m_pad;
out[outIndex] = ((inteImageOutPtr[m_pad_double * cols + iii + m_pad] - inteImageOutPtr[m_pad_double * cols + iii - m_pad])*m_scale) >> 19;
}
#else
for(iii = m_pad; iii < colsSubPad; iii + + ){<!-- -->
int outIndex = iii - m_pad;
out[outIndex] = ((inteImageOutPtr[m_pad_double * cols + iii + m_pad] - inteImageOutPtr[m_pad_double * cols + iii - m_pad])*m_scale) >> 19;
}
#endif
//step 2:
//
#ifdef OMP_ENABLE
#pragma omp parallel for num_threads(m_maxThreadNum)
#endif
for(iii = m_pad; iii < rowsSubPad; iii + + ){<!-- -->
int outIndex = (iii - m_pad) * (cols - 2 * m_pad);
out[outIndex] = ((inteImageOutPtr[(iii + m_pad) * cols + m_pad_double] - inteImageOutPtr[(iii - m_pad) * cols + m_pad_double])*m_scale) >> 19;
}
//internal image
// A---------|----------B
// |---------|----------|
// |---------|----------|
// |--------O----------|
// |---------|----------|
// |---------|----------|
// C--------------------D
// O coordinate boxFilter
// D - C - B + A
#ifdef NEON_ENABLE
#ifdef OMP_ENABLE
#pragma omp parallel for num_threads(m_maxThreadNum)
#endif
for (iii = m_pad + 1; iii < rowsSubPad; iii + + ) {<!-- -->
int jjj = m_pad + 1;
int* Dptr = inteImageOutPtr + (iii + m_pad) * cols + m_pad_double + 1;//int* Dptr = inteImageOutPtr + (iii + m_pad) * cols + jjj + m_pad;
int* Bptr = inteImageOutPtr + (iii - m_pad - 1) * cols + m_pad_double + 1;//int* Bptr = inteImageOutPtr + (iii - m_pad - 1) * cols + jjj + m_pad;
int* Cptr = inteImageOutPtr + (iii + m_pad) * cols;//int* Cptr = inteImageOutPtr + (iii + m_pad) * cols + jjj - m_pad - 1;
int* Aptr = inteImageOutPtr + (iii - m_pad - 1) * cols;//int* Aptr = inteImageOutPtr + (iii - m_pad - 1) * cols + jjj - m_pad - 1;
int* outPtr = out + (iii - m_pad) * (cols - 2 * m_pad) + 1;//int* outPtr = out + (iii - m_pad) * (cols - 2 * m_pad) + jjj - m_pad;
#ifndef NEON_SIMD_4
for (; jjj + 15 < colsSubPad; jjj + =16) {<!-- -->
#ifdef ARM_ASSEMBLY
asm volatile(
"dup v8.4s,%w10 \
" //dup m_scale into v8.4s register
"0:\
"
"prfm pldl1keep,[%0,#512]\
"
"prfm pldl1keep,[%1,#512]\
"
"prfm pldl1keep,[%2,#512]\
"
"prfm pldl1keep,[%3,#512]\
"
"ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[%3],#64\
" //D
"ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[%2],#64\
" //C
"sub v0.4s,v0.4s,v4.4s \
" // D = D - C
"sub v1.4s,v1.4s,v5.4s\
"
"sub v2.4s,v2.4s,v6.4s\
"
"sub v3.4s,v3.4s,v7.4s\
"
"ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[%1],#64\
" //C
"sub v0.4s,v0.4s,v4.4s \
" // D = D - C - B
"sub v1.4s,v1.4s,v5.4s\
"
"sub v2.4s,v2.4s,v6.4s\
"
"sub v3.4s,v3.4s,v7.4s\
"
"ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[%0],#64\
" //A
"add v0.4s,v0.4s,v4.4s \
" // D = D - C - B + A
"add v1.4s,v1.4s,v5.4s\
"
"add v2.4s,v2.4s,v6.4s\
"
"add v3.4s,v3.4s,v7.4s\
"
"mul v0.4s,v0.4s,v8.4s \
" // D = D*scale_
"mul v1.4s,v1.4s,v8.4s\
"
"mul v2.4s,v2.4s,v8.4s\
"
"mul v3.4s,v3.4s,v8.4s\
"
"srshr v0.4s,v0.4s,#19 \
" // D = D*scale_ 0 - 3
"srshr v1.4s,v1.4s,#19\
"
"srshr v2.4s,v2.4s,#19\
"
"srshr v3.4s,v3.4s,#19\
"
"st1 {v0.4s,v1.4s,v2.4s,v3.4s},[%4],#64 \
" // D = D*scale_
:"=r"(Aptr), "=r"(Bptr), "=r"(Cptr), "=r"(Dptr), "=r"(outPtr)
:"0"(Aptr), "1"(Bptr), "2"(Cptr), "3"(Dptr), "4"(outPtr), "r"(m_scale)
:"memory","v0","v1","v2","v3","v4","v5","v6","v7","v8");
#else
int32x4_t D = vld1q_s32(Dptr);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t D2 = vld1q_s32(Dptr + 4);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t D3 = vld1q_s32(Dptr + 8);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t D4 = vld1q_s32(Dptr + 12);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t B = vld1q_s32(Bptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t B2 = vld1q_s32(Bptr + 4);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t B3 = vld1q_s32(Bptr + 8);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t B4 = vld1q_s32(Bptr + 12);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t C = vld1q_s32(Cptr);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t C2 = vld1q_s32(Cptr + 4);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t C3 = vld1q_s32(Cptr + 8);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t C4 = vld1q_s32(Cptr + 12);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t A = vld1q_s32(Aptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t A2 = vld1q_s32(Aptr + 4);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t A3 = vld1q_s32(Aptr + 8);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t A4 = vld1q_s32(Aptr + 12);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t temp = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D,B),C),A),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(vsub_s32(vsub_scale)_3,dTemp1) ),15)
int32x4_t temp2 = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D2,B2),C2),A2),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(scale_) ),15)
int32x4_t temp3 = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D3,B3),C3),A3),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(scale_) ),15)
int32x4_t temp4 = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D4,B4),C4),A4),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(scale_) ),15)
vst1q_s32(outPtr, temp);//
vst1q_s32(outPtr + 4, temp2);//
vst1q_s32(outPtr + 8, temp3);//
vst1q_s32(outPtr + 12, temp4);//
Dptr + =16;
Bptr + =16;
Cptr + =16;
Aptr + =16;
outPtr + =16;
#endif
}
for (; jjj + 7 < colsSubPad; jjj + =8) {<!-- -->
#ifdef ARM_ASSEMBLY
asm volatile(
"dup v8.4s,%w10 \
" //dup m_scale into v8.4s register
"0:\
"
"prfm pldl1keep,[%0,#512]\
"
"prfm pldl1keep,[%1,#512]\
"
"prfm pldl1keep,[%2,#512]\
"
"prfm pldl1keep,[%3,#512]\
"
"ld1 {v0.4s,v1.4s},[%0],#32\
"
"ld1 {v2.4s,v3.4s},[%1],#32\
"
"ld1 {v4.4s,v5.4s},[%2],#32\
"
"ld1 {v6.4s,v7.4s},[%3],#32\
"
"sub v6.4s,v6.4s,v4.4s \
" // D = D - C
"sub v6.4s,v6.4s,v2.4s \
" // D = D - B
"add v6.4s,v6.4s,v0.4s \
" // D = D + A
"mul v6.4s,v6.4s,v8.4s \
" // D = D*scale_
"srshr v6.4s,v6.4s,#19 \
" // D = D*scale_ 0 - 3
"sub v7.4s,v7.4s,v5.4s \
" // D = D - C
"sub v7.4s,v7.4s,v3.4s \
" // D = D - B
"add v7.4s,v7.4s,v1.4s \
" // D = D + A
"mul v7.4s,v7.4s,v8.4s \
" // D = D*scale_
"srshr v7.4s,v7.4s,#19 \
" // D = D*scale_ 0 - 3
"st1 {v6.4s,v7.4s},[%4],#32 \
" // D = D*scale_
:"=r"(Aptr), "=r"(Bptr), "=r"(Cptr), "=r"(Dptr), "=r"(outPtr)
:"0"(Aptr), "1"(Bptr), "2"(Cptr), "3"(Dptr), "4"(outPtr), "r"(m_scale)
:"memory","v0","v1","v2","v3","v4","v5","v6","v7","v8");
#else
int32x4_t D = vld1q_s32(Dptr);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t D2 = vld1q_s32(Dptr + 4);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t B = vld1q_s32(Bptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t B2 = vld1q_s32(Bptr + 4);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t C = vld1q_s32(Cptr);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t C2 = vld1q_s32(Cptr + 4);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t A = vld1q_s32(Aptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t A2 = vld1q_s32(Aptr + 4);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t temp = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D,B),C),A),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(vsub_s32(vsub_scale)_3,dTemp1) ),15)
int32x4_t temp2 = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D2,B2),C2),A2),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(subTemp,subTemp1),vdupq_n_s32(scale_) ),15)
vst1q_s32(outPtr, temp);//
vst1q_s32(outPtr + 4, temp2);//
Dptr + =8;
Bptr + =8;
Cptr + =8;
Aptr + =8;
outPtr + =8;
#endif
}
#endif
for (; jjj + 3 < colsSubPad; jjj + =4) {<!-- -->
#ifdef ARM_ASSEMBLY
asm volatile(
"dup v8.4s,%w10 \
" //dup m_scale into v8.4s register
"0:\
"
"prfm pldl1keep,[%0,#512]\
"
"prfm pldl1keep,[%1,#512]\
"
"prfm pldl1keep,[%2,#512]\
"
"prfm pldl1keep,[%3,#512]\
"
"ld1 {v0.4s},[%3],#16 \
"//D
"ld1 {v1.4s},[%2],#16 \
"//C
"sub v0.4s,v0.4s,v1.4s \
" // D = D - C
"ld1 {v1.4s},[%1],#16 \
"//B
"sub v0.4s,v0.4s,v1.4s \
" // D = D - C - B
"ld1 {v1.4s},[%0],#16 \
"//A
"add v0.4s,v0.4s,v1.4s \
" // D = D -C - B + A
"mul v0.4s,v0.4s,v8.4s \
" // D = D*scale_
"srshr v0.4s,v0.4s,#19 \
" // D = D*scale_ 0 - 3
"st1 {v0.4s},[%4],#16 \
" // D = D*scale_
:"=r"(Aptr), "=r"(Bptr), "=r"(Cptr), "=r"(Dptr), "=r"(outPtr)
:"0"(Aptr), "1"(Bptr), "2"(Cptr), "3"(Dptr), "4"(outPtr), "r"(m_scale)
:"memory","v0","v1","v8");
#else
int32x4_t D = vld1q_s32(Dptr);//inteImageOutPtr[(iii + pad) * cols + jjj + pad]
int32x4_t B = vld1q_s32(Bptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj + pad]
int32x4_t C = vld1q_s32(Cptr);//inteImageOutPtr[(iii + pad) * cols + jjj - pad - 1]
int32x4_t A = vld1q_s32(Aptr);//inteImageOutPtr[(iii - pad - 1) * cols + jjj - pad - 1]
int32x4_t temp = vrshrq_n_s32(vmulq_s32(vaddq_s32(vsubq_s32(vsubq_s32(D,B),C),A),vdupq_n_s32(m_scale)),19);//vrshrq_n_s32(vmulq_s32(vsubq_s32(vsub_s32(vsub_scale)_3,dTemp1) ),15)
vst1q_s32(outPtr, temp);//
Dptr + =4;
Bptr + =4;
Cptr + =4;
Aptr + =4;
outPtr + =4;
#endif
}
for (; jjj < colsSubPad; jjj ++ ) {<!-- -->
int outIndex = (iii - m_pad) * (cols - 2 * m_pad) + jjj - m_pad;

out[outIndex] = ((inteImageOutPtr[(iii + m_pad) * cols + jjj + m_pad] - inteImageOutPtr[(iii - m_pad - 1) * cols + jjj + m_pad] - inteImageOutPtr[(iii + m_pad) * cols + jjj - m_pad - 1] + inteImageOutPtr[(iii - m_pad - 1) * cols + jjj - m_pad - 1])*m_scale)>>19;

}
}
#else
#ifdef OMP_ENABLE
#pragma omp parallel for num_threads(m_maxThreadNum)
#endif
for (iii = m_pad + 1; iii < rowsSubPad; iii + + ) {<!-- -->
for (auto jjj = m_pad + 1; jjj < colsSubPad; jjj ++ ) {<!-- -->
int outIndex = (iii - m_pad) * (cols - 2 * m_pad) + jjj - m_pad;

out[outIndex] = ((inteImageOutPtr[(iii + m_pad) * cols + jjj + m_pad] - inteImageOutPtr[(iii - m_pad - 1) * cols + jjj + m_pad] - inteImageOutPtr[(iii + m_pad) * cols + jjj - m_pad - 1] + inteImageOutPtr[(iii - m_pad - 1) * cols + jjj - m_pad - 1])*m_scale)>>19;

}
}
#endif
//return out;
}

3. The implementation of the integral graph is as follows:

void integralImage(int* in,int* inteImageOutPtr, int rows, int cols){<!-- -->
if (in == nullptr)
return;
int length = rows * cols;
int* out = inteImageOutPtr;

out[0] = in[0];
for (auto iii = 1; iii < cols; iii + + ) {<!-- -->
out[iii] = in[iii] + out[iii - 1];
}

for (auto iii = 1; iii < rows; iii + + ) {<!-- -->
int32_t colsLocSum = 0;
int jjj = 0;
#ifdef NEON_ENABLE
int index = iii*cols;
int befLine = (iii - 1)*cols;
int* inLocPtr = in + index;
int* befLineDataPtr = out + befLine;
int* outPtr_1 = out + index;
#ifndef NEON_SIMD_4 // 16 ______________________________________________________________________________________________________________16
for(;jjj + 15 < cols;jjj + = 16){<!-- -->

int num0 = *inLocPtr;
int num1 = *(inLocPtr + 1) + num0;
int num2 = *(inLocPtr + 2) + num1;
int num3 = *(inLocPtr + 3) + num2;
int num4 = *(inLocPtr + 4) + num3;
int num5 = *(inLocPtr + 5) + num4;
int num6 = *(inLocPtr + 6) + num5;
int num7 = *(inLocPtr + 7) + num6;
int num8 = *(inLocPtr + 8) + num7;
int num9 = *(inLocPtr + 9) + num8;
int num10 = *(inLocPtr + 10) + num9;
int num11 = *(inLocPtr + 11) + num10;
int num12 = *(inLocPtr + 12) + num11;
int num13 = *(inLocPtr + 13) + num12;
int num14 = *(inLocPtr + 14) + num13;
int num15 = *(inLocPtr + 15) + num14;
\t\t\t
#ifdef ARM_ASSEMBLY
int packNum[16] = {<!-- -->num0,num1,num2,num3,num4,num5,num6,num7,num8,num9,num10,num11,num12,num13,num14,num15};
int* packNum_ = packNum;
asm volatile(
"dup v8.4s,%w6\
"
"0:\
"
"prfm pldl1keep,[%1,#512]\
"
"ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[%0]\
"
"add v0.4s,v0.4s,v8.4s\
"
"add v1.4s,v1.4s,v8.4s\
"
"add v2.4s,v2.4s,v8.4s\
"
"add v3.4s,v3.4s,v8.4s\
"
"ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[%1],#64\
"
"add v0.4s,v0.4s,v4.4s\
"
"add v1.4s,v1.4s,v5.4s\
"
"add v2.4s,v2.4s,v6.4s\
"
"add v3.4s,v3.4s,v7.4s\
"
"st1 {v0.4s,v1.4s,v2.4s,v3.4s},[%2],#64\
"
:"=r"(packNum_), "=r"(befLineDataPtr), "=r"(outPtr_1)
:"0"(packNum_), "1"(befLineDataPtr), "2"(outPtr_1), "r"(colsLocSum)
:"memory","v0","v1","v2","v3","v4","v5","v6","v7","v8");
// inLocPtr += 16;
#else
//0-3
int32x4_t packNum0 = {<!-- -->num0,num1,num2,num3};
int32x4_t sum_neon0 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum0);
int32x4_t outTemp0 = vaddq_s32(vld1q_s32(befLineDataPtr),sum_neon0);
vst1q_s32(outPtr_1,outTemp0);
//4-7
int32x4_t packNum1 = {<!-- -->num4,num5,num6,num7};
int32x4_t sum_neon1 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum1);
int32x4_t outTemp1 = vaddq_s32(vld1q_s32(befLineDataPtr + 4), sum_neon1);
vst1q_s32(outPtr_1 + 4, outTemp1);
//8-11
int32x4_t packNum2 = {<!-- -->num8,num9,num10,num11};
int32x4_t sum_neon2 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum2);
int32x4_t outTemp2 = vaddq_s32(vld1q_s32(befLineDataPtr + 8), sum_neon2);
vst1q_s32(outPtr_1 + 8, outTemp2);
//12-15
int32x4_t packNum3 = {<!-- -->num12,num13,num14,num15};
int32x4_t sum_neon3 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum3);
int32x4_t outTemp3 = vaddq_s32(vld1q_s32(befLineDataPtr + 12), sum_neon3);
vst1q_s32(outPtr_1 + 12, outTemp3);
outPtr_1 += 16;
befLineDataPtr += 16;
\t\t\t\t\t
#endif
inLocPtr += 16;
colsLocSum += num15;
}

for(;jjj + 7 < cols;jjj + = 8){<!-- -->

int num0 = *inLocPtr;
int num1 = *(inLocPtr + 1) + num0;
int num2 = *(inLocPtr + 2) + num1;
int num3 = *(inLocPtr + 3) + num2;
int num4 = *(inLocPtr + 4) + num3;
int num5 = *(inLocPtr + 5) + num4;
int num6 = *(inLocPtr + 6) + num5;
int num7 = *(inLocPtr + 7) + num6;
#ifdef ARM_ASSEMBLY
int packNum[16] = {<!-- -->num0,num1,num2,num3,num4,num5,num6,num7};
int* packNum_ = packNum;
asm volatile(
"dup v8.4s,%w6\
"
"0:\
"
"prfm pldl1keep,[%1,#512]\
"
"ld1 {v0.4s,v1.4s},[%0]\
"
"add v0.4s,v0.4s,v8.4s\
"
"add v1.4s,v1.4s,v8.4s\
"
"ld1 {v4.4s,v5.4s},[%1],#32\
"
"add v0.4s,v0.4s,v4.4s\
"
"add v1.4s,v1.4s,v5.4s\
"
"st1 {v0.4s,v1.4s},[%2],#32\
"
:"=r"(packNum_), "=r"(befLineDataPtr), "=r"(outPtr_1)
:"0"(packNum_), "1"(befLineDataPtr), "2"(outPtr_1), "r"(colsLocSum)
:"memory","v0","v1","v4","v5","v8");
//inLocPtr + =8;
#else
//0-3
int32x4_t packNum0 = {<!-- -->num0,num1,num2,num3};
int32x4_t sum_neon0 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum0);
int32x4_t outTemp0 = vaddq_s32(vld1q_s32(befLineDataPtr),sum_neon0);
vst1q_s32(outPtr_1,outTemp0);
//4-7
int32x4_t packNum1 = {<!-- -->num4,num5,num6,num7};
int32x4_t sum_neon1 = vaddq_s32(vdupq_n_s32(colsLocSum),packNum1);
int32x4_t outTemp1 = vaddq_s32(vld1q_s32(befLineDataPtr + 4), sum_neon1);
vst1q_s32(outPtr_1 + 4, outTemp1);
outPtr_1 += 8;
befLineDataPtr += 8;
// inLocPtr += 8;
#endif
inLocPtr += 8;
colsLocSum += num7;
}

#endif//4______________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
for(;jjj + 3 < cols;jjj + = 4){<!-- -->

int num0 = *inLocPtr;
int num1 = *(inLocPtr + 1) + num0;
int num2 = *(inLocPtr + 2) + num1;
int num3 = *(inLocPtr + 3) + num2;
#ifdef ARM_ASSEMBLY
int packNum[16] = {<!-- -->num0,num1,num2,num3};
int* packNum_ = packNum;
asm volatile(
"dup v8.4s,%w6\
"
"0:\
"
"prfm pldl1keep,[%1,#512]\
"
"ld1{v0.4s},[%0]\
"
"add v0.4s,v0.4s,v8.4s\
"
"ld1 {v4.4s},[%1],#16\
"
"add v0.4s,v0.4s,v4.4s\
"
"st1 {v0.4s},[%2],#16\
"
:"=r"(packNum_), "=r"(befLineDataPtr), "=r"(outPtr_1)
:"0"(packNum_), "1"(befLineDataPtr), "2"(outPtr_1), "r"(colsLocSum)
:"memory","v0","v4","v8");
//inLocPtr + =4;
#else
//0-3
int32x4_t packNum = {<!-- -->num0,num1,num2,num3};
int32x4_t sum_neon = vaddq_s32(vdupq_n_s32(colsLocSum),packNum);
int32x4_t outTemp = vaddq_s32(vld1q_s32(befLineDataPtr), sum_neon);
vst1q_s32(outPtr_1,outTemp);
outPtr_1 += 4;
befLineDataPtr += 4;
\t\t\t\t\t
#endif
inLocPtr += 4;
colsLocSum += num3;
}
\t
#endif

for (; jjj < cols; jjj ++ ) {<!-- -->
int index = iii * cols + jjj;
colsLocSum + = in[index];
out[index] = colsLocSum + out[(iii - 1) * cols + jjj];
}
}
//return out;
}