#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <ivs_base_move_type.h>
#ifdef DMALLOC
#include <imp/dmalloc.h>
#endif
#define JZ_BASE_MXU 1
#if JZ_BASE_MXU
#include <mxu2.h>
#include <jzmxu128.h>
#endif
static uint8_t JzMin(uint8_t a, uint8_t b)
{
	return a < b ? a:b;
}
static void JzMergeErode(IMat src1, IMat src2, IMat dst, int thresh){//absdiff+thresh+erode
	uint8_t min_value;
	uint8_t tmp;
	int i, j, p, q;
	for(i = 0; i < src1.rows; i++){
		for(j = 0; j < src1.cols; j++){
			min_value = 255;
			for(p = -1; p <2; p++){
				int rid = i+p;
				rid = rid>(src1.rows-1) ? src1.rows-1:rid;
				rid = (rid<0)?0:rid;
				for(q = -1; q <2; q++){
					int cid = j+q;
					cid = cid<0?0:cid;
					cid = cid>(src1.cols-1)?(src1.cols-1):cid;
					tmp = abs( *(src1.data+rid*src1.step+cid) - *(src2.data+rid*src2.step+cid) );
					tmp = (tmp < thresh) ? 0:tmp;
					min_value = min_value < tmp ? min_value : tmp;
				}
			}
			*(dst.data + i*dst.step + j) = min_value;
		}
	}
}

#if JZ_BASE_MXU
#if 0
static void MergeBaseErode3_8u(const uint8_t* _S1,const uint8_t* _S2, uint8_t* D, int thresh, int Sstep1,int Sstep2,int Dstep,int height,int width)
{
    uint8_t* S1 = (uint8_t*)_S1;
    uint8_t* S2 = (uint8_t*)_S2;
    int swidth = (width & -16);
    int i=0,j=0;

    v16i8 v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,/*v16,*/v17,v18,v19,v20;
    v16i8 va,vb,z0,z1;
    v16i8 s0,s1,s2;
    int32_t k0,k1,k2;

    v16i8 base_v0, base_v1, base_v2, base_v3, base_v4, base_v5;
    v16i8 base_cmp0, base_cmp1, base_cmp2, base_cmp3, base_cmp4, base_cmp5;
    v16i8 base_thresh = _mx128_mfcpu_b(thresh);
    k0 = 0x09070503;
    k1 = 0x0503011e;
    k2 = 0x03011e1c;
    s0 = (v16i8)_mx128_insfcpu_w((v4i32)s0,0,k0);
    s1 = (v16i8)_mx128_insfcpu_w((v4i32)s1,0,k1);
    s2 = (v16i8)_mx128_insfcpu_w((v4i32)s2,0,k2);

    k0 = 0x110f0d0b;
    k1 = 0x0d0b0907;
    k2 = 0x0b090705;
    s0 = (v16i8)_mx128_insfcpu_w((v4i32)s0,1,k0);
    s1 = (v16i8)_mx128_insfcpu_w((v4i32)s1,1,k1);
    s2 = (v16i8)_mx128_insfcpu_w((v4i32)s2,1,k2);

    k0 = 0x19171513;
    k1 = 0x1513110f;
    k2 = 0x13110f0d;
    s0 = (v16i8)_mx128_insfcpu_w((v4i32)s0,2,k0);
    s1 = (v16i8)_mx128_insfcpu_w((v4i32)s1,2,k1);
    s2 = (v16i8)_mx128_insfcpu_w((v4i32)s2,2,k2);

    k0 = 0x001f1d1b;
    k1 = 0x1d1b1917;
    k2 = 0x1b191715;
    s0 = (v16i8)_mx128_insfcpu_w((v4i32)s0,3,k0);
    s1 = (v16i8)_mx128_insfcpu_w((v4i32)s1,3,k1);
    s2 = (v16i8)_mx128_insfcpu_w((v4i32)s2,3,k2);

    z0 = _mx128_mfcpu_b(255);
    z1 = _mx128_li_b(0);
    /*************first***************/
    v0 = _mx128_lu1q(S1,0);
    v1 = _mx128_lu1q(S1+Sstep1,0);

    base_v0 = _mx128_lu1q(S2,0);
    base_v1 = _mx128_lu1q(S2+Sstep2,0);
    base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
    base_v1 = _mx128_subua_b(base_v1, v1);
    base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
    base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
    v0 = _mx128_andv(base_v0 ,base_cmp0);
    v1 = _mx128_andv(base_v1 ,base_cmp1);

    va = _mx128_minu_b(v0,v1);
    v3 = _mx128_shufv(va,z0,s1);
    v4 = _mx128_shufv(va,z0,s2);

    v11 = _mx128_minu_b(va,v3);
    v12 = _mx128_minu_b(v11,v4);

    v12 = _mx128_shufv(v12,z0,s0);
    _mx128_su1q(v12,D,0);

    i+=16;
    while(i <= swidth-16)
	{
		v0 = _mx128_lu1q(S1+i,0);
		v1 = _mx128_lu1q(S1+i+Sstep1,0);

		base_v0 = _mx128_lu1q(S2+i,0);
		base_v1 = _mx128_lu1q(S2+i+Sstep2,0);
		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);

		v2 = _mx128_minu_b(v0,v1);

		v3 = _mx128_shufv(v2,va,s1);
		v4 = _mx128_shufv(v2,va,s2);

		v11 = _mx128_minu_b(v2,v3);
		v12 = _mx128_minu_b(v11,v4);

		_mx128_su1q(v12,D+i-1,0);

		va = _mx128_add_b(v2,z1);

		i+=16;
	}
    if(i == width)
		D[i-1] = _mx128_mtcpuu_b(v11,15);
    else
	{
		uint8_t val,val1,val2;
		uint8_t base_tmp1, base_tmp2;
		for(;i <= width;i+=1)
		{
			base_tmp1 = abs(S1[i-1] - S2[i-1]);
			base_tmp1 = base_tmp1 < thresh ? 0 : base_tmp1;
			base_tmp2 = abs(S1[i-2] - S2[i-2]);
			base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;

			val = JzMin(base_tmp1 , base_tmp2);
			val1 = (i==width) ? base_tmp1 : (abs(S1[i] - S2[i])<thresh ? 0:abs(S1[i] - S2[i]));
			val1 = JzMin(val,val1);

			base_tmp1 = abs(S1[i-1+Sstep1] - S2[i-1+Sstep2]);
			base_tmp1 = base_tmp1 < thresh ? 0 : base_tmp1;
			base_tmp2 = abs(S1[i-2+Sstep1] - S2[i-2+Sstep2]);
			base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;

			val = JzMin(base_tmp1, base_tmp2);
			val2 = (i==width) ? base_tmp1 : (abs(S1[i+Sstep1] - S2[i+Sstep2])<thresh ? 0:abs(S1[i+Sstep1] - S2[i+Sstep2])) ;
			val2 = JzMin(val,val2);

			D[i-1] = JzMin(val1,val2);
		}

	}

    j += 1;

    int32_t count = ((height-j)%4==0)?1:((height-1)%4);
    while(j+4 < height-count)
	{
		i = 0;
		v0 = _mx128_lu1q(S1+(j-1)*Sstep1,0);
		v1 = _mx128_lu1q(S1+j*Sstep1,0);
		v2 = _mx128_lu1q(S1+(j+1)*Sstep1,0);
		v3 = _mx128_lu1q(S1+(j+2)*Sstep1,0);
		v4 = _mx128_lu1q(S1+(j+3)*Sstep1,0);
		v5 = _mx128_lu1q(S1+(j+4)*Sstep1,0);

		base_v0 = _mx128_lu1q(S2+(j-1)*Sstep2,0);
		base_v1 = _mx128_lu1q(S2+j*Sstep2,0);
		base_v2 = _mx128_lu1q(S2+(j+1)*Sstep2,0);
		base_v3 = _mx128_lu1q(S2+(j+2)*Sstep2,0);
		base_v4 = _mx128_lu1q(S2+(j+3)*Sstep2,0);
		base_v5 = _mx128_lu1q(S2+(j+4)*Sstep2,0);

		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_v2 = _mx128_subua_b(base_v2, v2);
		base_v3 = _mx128_subua_b(base_v3, v3);
		base_v4 = _mx128_subua_b(base_v4, v4);
		base_v5 = _mx128_subua_b(base_v5, v5);

		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);
		base_cmp3 = _mx128_cleu_b(base_thresh,base_v3);
		base_cmp4 = _mx128_cleu_b(base_thresh, base_v4);
		base_cmp5 = _mx128_cleu_b(base_thresh,base_v5);

		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);
		v2 = _mx128_andv(base_v2, base_cmp2);
		v3 = _mx128_andv(base_v3, base_cmp3);
		v4 = _mx128_andv(base_v4, base_cmp4);
		v5 = _mx128_andv(base_v5, base_cmp5);

		va = _mx128_minu_b(v1,v2);
		vb = _mx128_minu_b(v3,v4);
		v7 = _mx128_minu_b(va,v0);
		v8 = _mx128_minu_b(va,v3);
		v9 = _mx128_minu_b(vb,v2);
		v10 = _mx128_minu_b(vb,v5);

		v11 = _mx128_shufv(v7,z0,s1);
		v12 = _mx128_shufv(v8,z0,s1);
		v13 = _mx128_shufv(v9,z0,s1);
		v14 = _mx128_shufv(v10,z0,s1);

		v2 = _mx128_minu_b(v7,v11);
		v3 = _mx128_minu_b(v8,v12);
		v4 = _mx128_minu_b(v9,v13);
		v5 = _mx128_minu_b(v10,v14);

		v11 = _mx128_shufv(v7,z0,s2);
		v12 = _mx128_shufv(v8,z0,s2);
		v13 = _mx128_shufv(v9,z0,s2);
		v14 = _mx128_shufv(v10,z0,s2);

		v11 = _mx128_minu_b(v2,v11);
		v12 = _mx128_minu_b(v3,v12);
		v13 = _mx128_minu_b(v4,v13);
		v14 = _mx128_minu_b(v5,v14);

		v11 = _mx128_shufv(v11,z0,s0);
		v12 = _mx128_shufv(v12,z0,s0);
		v13 = _mx128_shufv(v13,z0,s0);
		v14 = _mx128_shufv(v14,z0,s0);

		_mx128_su1q(v11,D+j*Dstep,0);
		_mx128_su1q(v12,D+(j+1)*Dstep,0);
		_mx128_su1q(v13,D+(j+2)*Dstep,0);
		_mx128_su1q(v14,D+(j+3)*Dstep,0);

		i+=16;

		while(i <= swidth-32)
		{
			v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,0);
			v1 = _mx128_lu1q(S1+i+j*Sstep1,0);
			v2 = _mx128_lu1q(S1+i+(j+1)*Sstep1,0);
			v3 = _mx128_lu1q(S1+i+(j+2)*Sstep1,0);
			v4 = _mx128_lu1q(S1+i+(j+3)*Sstep1,0);
			v5 = _mx128_lu1q(S1+i+(j+4)*Sstep1,0);

			base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,0);
			base_v1 = _mx128_lu1q(S2+i+j*Sstep2,0);
			base_v2 = _mx128_lu1q(S2+i+(j+1)*Sstep2,0);
			base_v3 = _mx128_lu1q(S2+i+(j+2)*Sstep2,0);
			base_v4 = _mx128_lu1q(S2+i+(j+3)*Sstep2,0);
			base_v5 = _mx128_lu1q(S2+i+(j+4)*Sstep2,0);

			base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
			base_v1 = _mx128_subua_b(base_v1, v1);
			base_v2 = _mx128_subua_b(base_v2, v2);
			base_v3 = _mx128_subua_b(base_v3, v3);
			base_v4 = _mx128_subua_b(base_v4, v4);
			base_v5 = _mx128_subua_b(base_v5, v5);

			base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
			base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
			base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);
			base_cmp3 = _mx128_cleu_b(base_thresh,base_v3);
			base_cmp4 = _mx128_cleu_b(base_thresh, base_v4);
			base_cmp5 = _mx128_cleu_b(base_thresh,base_v5);

			v0 = _mx128_andv(base_v0, base_cmp0);
			v1 = _mx128_andv(base_v1, base_cmp1);
			v2 = _mx128_andv(base_v2, base_cmp2);
			v3 = _mx128_andv(base_v3, base_cmp3);
			v4 = _mx128_andv(base_v4, base_cmp4);
			v5 = _mx128_andv(base_v5, base_cmp5);

			va = _mx128_minu_b(v1,v2);
			vb = _mx128_minu_b(v3,v4);
			v17 = _mx128_minu_b(va,v0);
			v18 = _mx128_minu_b(va,v3);
			v19 = _mx128_minu_b(vb,v2);
			v20 = _mx128_minu_b(vb,v5);

			v12 = _mx128_shufv(v17,v7,s1);
			v13 = _mx128_shufv(v18,v8,s1);
			v14 = _mx128_shufv(v19,v9,s1);
			v15 = _mx128_shufv(v20,v10,s1);

			v2 = _mx128_minu_b(v17,v12);
			v3 = _mx128_minu_b(v18,v13);
			v4 = _mx128_minu_b(v19,v14);
			v5 = _mx128_minu_b(v20,v15);

			v12 = _mx128_shufv(v17,v7,s2);
			v13 = _mx128_shufv(v18,v8,s2);
			v14 = _mx128_shufv(v19,v9,s2);
			v15 = _mx128_shufv(v20,v10,s2);

			v12 = _mx128_minu_b(v2,v12);
			v13 = _mx128_minu_b(v3,v13);
			v14 = _mx128_minu_b(v4,v14);
			v15 = _mx128_minu_b(v5,v15);

			_mx128_su1q(v12,D+j*Dstep+i-1,0);
			_mx128_su1q(v13,D+(j+1)*Dstep+i-1,0);
			_mx128_su1q(v14,D+(j+2)*Dstep+i-1,0);
			_mx128_su1q(v15,D+(j+3)*Dstep+i-1,0);


			v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,16);
			v1 = _mx128_lu1q(S1+i+j*Sstep1,16);
			v2 = _mx128_lu1q(S1+i+(j+1)*Sstep1,16);
			v3 = _mx128_lu1q(S1+i+(j+2)*Sstep1,16);
			v4 = _mx128_lu1q(S1+i+(j+3)*Sstep1,16);
			v5 = _mx128_lu1q(S1+i+(j+4)*Sstep1,16);

			base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,16);
			base_v1 = _mx128_lu1q(S2+i+j*Sstep2,16);
			base_v2 = _mx128_lu1q(S2+i+(j+1)*Sstep2,16);
			base_v3 = _mx128_lu1q(S2+i+(j+2)*Sstep2,16);
			base_v4 = _mx128_lu1q(S2+i+(j+3)*Sstep2,16);
			base_v5 = _mx128_lu1q(S2+i+(j+4)*Sstep2,16);

			base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
			base_v1 = _mx128_subua_b(base_v1, v1);
			base_v2 = _mx128_subua_b(base_v2, v2);
			base_v3 = _mx128_subua_b(base_v3, v3);
			base_v4 = _mx128_subua_b(base_v4, v4);
			base_v5 = _mx128_subua_b(base_v5, v5);

			base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
			base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
			base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);
			base_cmp3 = _mx128_cleu_b(base_thresh,base_v3);
			base_cmp4 = _mx128_cleu_b(base_thresh, base_v4);
			base_cmp5 = _mx128_cleu_b(base_thresh,base_v5);

			v0 = _mx128_andv(base_v0, base_cmp0);
			v1 = _mx128_andv(base_v1, base_cmp1);
			v2 = _mx128_andv(base_v2, base_cmp2);
			v3 = _mx128_andv(base_v3, base_cmp3);
			v4 = _mx128_andv(base_v4, base_cmp4);
			v5 = _mx128_andv(base_v5, base_cmp5);

			va = _mx128_minu_b(v1,v2);
			vb = _mx128_minu_b(v3,v4);
			v7 = _mx128_minu_b(va,v0);
			v8 = _mx128_minu_b(va,v3);
			v9 = _mx128_minu_b(vb,v2);
			v10 = _mx128_minu_b(vb,v5);

			v12 = _mx128_shufv(v7,v17,s1);
			v13 = _mx128_shufv(v8,v18,s1);
			v14 = _mx128_shufv(v9,v19,s1);
			v15 = _mx128_shufv(v10,v20,s1);

			v2 = _mx128_minu_b(v7,v12);
			v3 = _mx128_minu_b(v8,v13);
			v4 = _mx128_minu_b(v9,v14);
			v5 = _mx128_minu_b(v10,v15);

			v12 = _mx128_shufv(v7,v17,s2);
			v13 = _mx128_shufv(v8,v18,s2);
			v14 = _mx128_shufv(v9,v19,s2);
			v15 = _mx128_shufv(v10,v20,s2);

			v12 = _mx128_minu_b(v2,v12);
			v13 = _mx128_minu_b(v3,v13);
			v14 = _mx128_minu_b(v4,v14);
			v15 = _mx128_minu_b(v5,v15);

			_mx128_su1q(v12,D+j*Dstep+i+15,0);
			_mx128_su1q(v13,D+(j+1)*Dstep+i+15,0);
			_mx128_su1q(v14,D+(j+2)*Dstep+i+15,0);
			_mx128_su1q(v15,D+(j+3)*Dstep+i+15,0);

			i += 32;
		}
		while(i <= swidth-16)
		{
			v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,0);
			v1 = _mx128_lu1q(S1+i+j*Sstep1,0);
			v2 = _mx128_lu1q(S1+i+(j+1)*Sstep1,0);
			v3 = _mx128_lu1q(S1+i+(j+2)*Sstep1,0);
			v4 = _mx128_lu1q(S1+i+(j+3)*Sstep1,0);
			v5 = _mx128_lu1q(S1+i+(j+4)*Sstep1,0);

			base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,0);
			base_v1 = _mx128_lu1q(S2+i+j*Sstep2,0);
			base_v2 = _mx128_lu1q(S2+i+(j+1)*Sstep2,0);
			base_v3 = _mx128_lu1q(S2+i+(j+2)*Sstep2,0);
			base_v4 = _mx128_lu1q(S2+i+(j+3)*Sstep2,0);
			base_v5 = _mx128_lu1q(S2+i+(j+4)*Sstep2,0);

			base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
			base_v1 = _mx128_subua_b(base_v1, v1);
			base_v2 = _mx128_subua_b(base_v2, v2);
			base_v3 = _mx128_subua_b(base_v3, v3);
			base_v4 = _mx128_subua_b(base_v4, v4);
			base_v5 = _mx128_subua_b(base_v5, v5);

			base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
			base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
			base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);
			base_cmp3 = _mx128_cleu_b(base_thresh,base_v3);
			base_cmp4 = _mx128_cleu_b(base_thresh,base_v4);
			base_cmp5 = _mx128_cleu_b(base_thresh,base_v5);

			v0 = _mx128_andv(base_v0, base_cmp0);
			v1 = _mx128_andv(base_v1, base_cmp1);
			v2 = _mx128_andv(base_v2, base_cmp2);
			v3 = _mx128_andv(base_v3, base_cmp3);
			v4 = _mx128_andv(base_v4, base_cmp4);
			v5 = _mx128_andv(base_v5, base_cmp5);

			va = _mx128_minu_b(v1,v2);
			vb = _mx128_minu_b(v3,v4);
			v17 = _mx128_minu_b(va,v0);
			v18 = _mx128_minu_b(va,v3);
			v19 = _mx128_minu_b(vb,v2);
			v20 = _mx128_minu_b(vb,v5);

			v12 = _mx128_shufv(v17,v7,s1);
			v13 = _mx128_shufv(v18,v8,s1);
			v14 = _mx128_shufv(v19,v9,s1);
			v15 = _mx128_shufv(v20,v10,s1);

			v2 = _mx128_minu_b(v17,v12);
			v3 = _mx128_minu_b(v18,v13);
			v4 = _mx128_minu_b(v19,v14);
			v5 = _mx128_minu_b(v20,v15);

			v12 = _mx128_shufv(v17,v7,s2);
			v13 = _mx128_shufv(v18,v8,s2);
			v14 = _mx128_shufv(v19,v9,s2);
			v15 = _mx128_shufv(v20,v10,s2);

			v12 = _mx128_minu_b(v2,v12);
			v13 = _mx128_minu_b(v3,v13);
			v14 = _mx128_minu_b(v4,v14);
			v15 = _mx128_minu_b(v5,v15);

			_mx128_su1q(v12,D+j*Dstep+i-1,0);
			_mx128_su1q(v13,D+(j+1)*Dstep+i-1,0);
			_mx128_su1q(v14,D+(j+2)*Dstep+i-1,0);
			_mx128_su1q(v15,D+(j+3)*Dstep+i-1,0);

			i+=16;
		}
		if(i == width)
		{
			D[i-1+j*Dstep] = _mx128_mtcpuu_b(v2,15);
			D[i-1+(j+1)*Dstep] = _mx128_mtcpuu_b(v3,15);
			D[i-1+(j+2)*Dstep] = _mx128_mtcpuu_b(v4,15);
			D[i-1+(j+3)*Dstep] = _mx128_mtcpuu_b(v5,15);
		}
		else
		{
			uint8_t val,val1,val2,val3,val4,val5,val6;
			uint8_t base_tmp1, base_tmp2;
			for(;i<=width;i++)
			{
				if(i==width)
				{
					base_tmp1 = abs(S1[i-1+(j-1)*Sstep1]-S2[i-1+(j-1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val1 = base_tmp1;
					base_tmp1 = abs(S1[i-1+j*Sstep1]-S2[i-1+j*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val2 = base_tmp1;
					base_tmp1 = abs(S1[i-1+(j+1)*Sstep1]-S2[i-1+(j+1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val3 = base_tmp1;
					base_tmp1 = abs(S1[i-1+(j+2)*Sstep1] - S2[i-1+(j+2)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val4 = base_tmp1;
					base_tmp1 = abs(S1[i-1+(j+3)*Sstep1] - S2[i-1+(j+3)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val5 = base_tmp1;
					base_tmp1 = abs(S1[i-1+(j+4)*Sstep1] - S2[i-1+(j+4)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val6 = base_tmp1;
				}
				else
				{
					base_tmp1 = abs(S1[i+(j-1)*Sstep1]-S2[i+(j-1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val1 = base_tmp1;
					base_tmp1 = abs(S1[i+j*Sstep1]-S2[i+j*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val2 = base_tmp1;
					base_tmp1 = abs(S1[i+(j+1)*Sstep1]-S2[i+(j+1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val3 = base_tmp1;
					base_tmp1 = abs(S1[i+(j+2)*Sstep1]-S2[i+(j+2)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val4 = base_tmp1;
					base_tmp1 = abs(S1[i+(j+3)*Sstep1]-S2[i+(j+3)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val5 = base_tmp1;
					base_tmp1 = abs(S1[i+(j+4)*Sstep1]-S2[i+(j+4)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val6 = base_tmp1;
				}
				base_tmp1 = abs(S1[i-2+(j-1)*Sstep1] - S2[i-2+(j-1)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j-1)*Sstep1] - S2[i-1+(j-1)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1, base_tmp2);
				val1 = JzMin(val,val1);

				base_tmp1 = abs(S1[i-2+j*Sstep1] - S2[i-2+j*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+j*Sstep1] - S2[i-1+j*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val2 = JzMin(val,val2);

				base_tmp1 = abs(S1[i-2+(j+1)*Sstep1] - S2[i-2+(j+1)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j+1)*Sstep1] - S2[i-1+(j+1)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val3 = JzMin(val,val3);

				base_tmp1 = abs(S1[i-2+(j+2)*Sstep1]- S2[i-2+(j+2)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j+2)*Sstep1] - S2[i-1+(j+2)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val4 = JzMin(val,val4);

				base_tmp1 = abs(S1[i-2+(j+3)*Sstep1] - S2[i-2+(j+3)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j+3)*Sstep1] - S2[i-1+(j+3)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val5 = JzMin(val,val5);

				base_tmp1 = abs(S1[i-2+(j+4)*Sstep1] - S2[i-2+(j+4)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j+4)*Sstep1] - S2[i-1+(j+4)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val6 = JzMin(val,val6);

				val = JzMin(val2,val3);
				D[i-1+j*Dstep] = JzMin(val1,val);
				D[i-1+(j+1)*Dstep] = JzMin(val4,val);

				val = JzMin(val4,val5);
				D[i-1+(j+2)*Dstep] = JzMin(val3,val);
				D[i-1+(j+3)*Dstep] = JzMin(val6,val);
			}
		}

		j += 4;
	}
    while(j<height-1)
	{
		i = 0;
		v0 = _mx128_lu1q(S1+(j-1)*Sstep1,0);
		v1 = _mx128_lu1q(S1+j*Sstep1,0);
		v2 = _mx128_lu1q(S1+(j+1)*Sstep1,0);

		base_v0 = _mx128_lu1q(S2+(j-1)*Sstep2,0);
		base_v1 = _mx128_lu1q(S2+j*Sstep2,0);
		base_v2 = _mx128_lu1q(S2+(j+1)*Sstep2,0);

		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_v2 = _mx128_subua_b(base_v2, v2);

		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);

		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);
		v2 = _mx128_andv(base_v2, base_cmp2);

		va = _mx128_minu_b(v1,v2);
		va = _mx128_minu_b(va,v0);

		v4 = _mx128_shufv(va,z0,s1);
		v5 = _mx128_shufv(va,z0,s2);

		v11 = _mx128_minu_b(va,v4);
		v12 = _mx128_minu_b(v11,v5);

		v12 = _mx128_shufv(v12,z0,s0);

		_mx128_su1q(v12,D+j*Dstep,0);

		i+=16;
		while(i <= swidth-16)
		{
			v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,0);
			v1 = _mx128_lu1q(S1+i+j*Sstep1,0);
			v2 = _mx128_lu1q(S1+i+(j+1)*Sstep1,0);

			base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,0);
			base_v1 = _mx128_lu1q(S2+i+j*Sstep2,0);
			base_v2 = _mx128_lu1q(S2+i+(j+1)*Sstep2,0);

			base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
			base_v1 = _mx128_subua_b(base_v1, v1);
			base_v2 = _mx128_subua_b(base_v2, v2);

			base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
			base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
			base_cmp2 = _mx128_cleu_b(base_thresh,base_v2);

			v0 = _mx128_andv(base_v0, base_cmp0);
			v1 = _mx128_andv(base_v1, base_cmp1);
			v2 = _mx128_andv(base_v2, base_cmp2);

			v3 = _mx128_minu_b(v1,v2);
			v3 = _mx128_minu_b(v3,v0);

			v4 = _mx128_shufv(v3,va,s1);
			v5 = _mx128_shufv(v3,va,s2);

			v11 = _mx128_minu_b(v3,v4);
			v12 = _mx128_minu_b(v11,v5);

			_mx128_su1q(v12,D+i+j*Dstep-1,0);

			va = _mx128_add_b(v3,z1);

			i+=16;

		}
		if(i == width)
			D[i-1+j*Dstep] = _mx128_mtcpuu_b(v11,15);
		else
		{
			uint8_t val,val1,val2,val3;
			uint8_t base_tmp1, base_tmp2;
			for(;i<=width;i++)
			{
				if(i==width)
				{
					base_tmp1 = abs(S1[i-1+(j-1)*Sstep1] - S2[i-1+(j-1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val1 = base_tmp1;
					base_tmp1 = abs(S1[i-1+j*Sstep1] - S2[i-1+j*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val2 = base_tmp1;
					base_tmp1 = abs(S1[i-1+(j+1)*Sstep1] - S2[i-1+(j+1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val3 = base_tmp1;
				}
				else
				{
					base_tmp1 = abs(S1[i+(j-1)*Sstep1] - S2[i+(j-1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val1 = base_tmp1;
					base_tmp1 = abs(S1[i+j*Sstep1] - S2[i+j*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val2 = base_tmp1;
					base_tmp1 = abs(S1[i+(j+1)*Sstep1] - S2[i+(j+1)*Sstep2]);
					base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
					val3 = base_tmp1;
				}
				base_tmp1 = abs(S1[i-2+(j-1)*Sstep1] - S2[i-2+(j-1)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j-1)*Sstep1] - S2[i-1+(j-1)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val1 = JzMin(val,val1);

				base_tmp1 = abs(S1[i-2+j*Sstep1] - S2[i-2+j*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+j*Sstep1] - S2[i-1+j*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val2 = JzMin(val,val2);

				base_tmp1 = abs(S1[i-2+(j+1)*Sstep1] - S2[i-2+(j+1)*Sstep2]);
				base_tmp1 = base_tmp1 < thresh ? 0: base_tmp1;
				base_tmp2 = abs(S1[i-1+(j+1)*Sstep1] - S2[i-1+(j+1)*Sstep2]);
				base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
				val = JzMin(base_tmp1,base_tmp2);
				val3 = JzMin(val,val3);

				val = JzMin(val2,val3);
				D[i-1+j*Dstep] = JzMin(val1,val);
			}
		}
		j += 1;
	}

    assert(j==height-1);

    i = 0;
    v0 = _mx128_lu1q(S1+(j-1)*Sstep1,0);
    v1 = _mx128_lu1q(S1+j*Sstep1,0);

    base_v0 = _mx128_lu1q(S2+(j-1)*Sstep2,0);
    base_v1 = _mx128_lu1q(S2+j*Sstep2,0);

    base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
    base_v1 = _mx128_subua_b(base_v1, v1);
    base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
    base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
    v0 = _mx128_andv(base_v0, base_cmp0);
    v1 = _mx128_andv(base_v1, base_cmp1);

    va = _mx128_minu_b(v0,v1);

    v4 = _mx128_shufv(va,z0,s1);
    v5 = _mx128_shufv(va,z0,s2);

    v11 = _mx128_minu_b(va,v4);
    v12 = _mx128_minu_b(v11,v5);

    v12 = _mx128_shufv(v12,z0,s0);

    _mx128_su1q(v12,D+j*Dstep,0);

    i+=16;

    while(i<=swidth-32)
	{
		v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,0);
		v1 = _mx128_lu1q(S1+i+j*Sstep1,0);

		base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,0);
		base_v1 = _mx128_lu1q(S2+i+j*Sstep2,0);

		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);

		v7 = _mx128_minu_b(v0,v1);

		v4 = _mx128_shufv(v7,va,s1);
		v5 = _mx128_shufv(v7,va,s2);

		v6 = _mx128_minu_b(v7,v4);
		v6 = _mx128_minu_b(v6,v5);

		_mx128_su1q(v6,D+i+j*Dstep-1,0);

		v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,16);
		v1 = _mx128_lu1q(S1+i+j*Sstep1,16);

		base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,16);
		base_v1 = _mx128_lu1q(S2+i+j*Sstep2,16);

		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);

		va = _mx128_minu_b(v0,v1);

		v4 = _mx128_shufv(va,v7,s1);
		v5 = _mx128_shufv(va,v7,s2);

		v11 = _mx128_minu_b(va,v4);
		v12 = _mx128_minu_b(v11,v5);

		_mx128_su1q(v12,D+i+j*Dstep,15);

		i+=32;
	}
    while(i <= swidth-16)
	{
		v0 = _mx128_lu1q(S1+i+(j-1)*Sstep1,0);
		v1 = _mx128_lu1q(S1+i+j*Sstep1,0);

		base_v0 = _mx128_lu1q(S2+i+(j-1)*Sstep2,0);
		base_v1 = _mx128_lu1q(S2+i+j*Sstep2,0);

		base_v0 = _mx128_subua_b(base_v0, v0);//absdiff
		base_v1 = _mx128_subua_b(base_v1, v1);
		base_cmp0 = _mx128_cleu_b(base_thresh,base_v0);//thresh
		base_cmp1 = _mx128_cleu_b(base_thresh,base_v1);
		v0 = _mx128_andv(base_v0, base_cmp0);
		v1 = _mx128_andv(base_v1, base_cmp1);

		v7 = _mx128_minu_b(v0,v1);

		v4 = _mx128_shufv(v7,va,s1);
		v5 = _mx128_shufv(v7,va,s2);

		v11 = _mx128_minu_b(v7,v4);
		v12 = _mx128_minu_b(v11,v5);

		_mx128_su1q(v12,D+i+j*Dstep-1,0);

		i+=16;

	}
    if(i == width)
		D[i-1+j*Dstep] = _mx128_mtcpuu_b(v11,15);
    else
	{
		uint8_t val,val1,val2;
		uint8_t base_tmp1, base_tmp2;
		for(;i<=width;i++)
		{
			base_tmp1 = abs(S1[i-2+(j-1)*Sstep1] - S2[i-2+(j-1)*Sstep2]);
			base_tmp1 = base_tmp1 < thresh ? 0 : base_tmp1;
			base_tmp2 = abs(S1[i-1+(j-1)*Sstep1] - S2[i-1+(j-1)*Sstep2]);
			base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;
			val = JzMin(base_tmp1, base_tmp2);
			val1 = (i==width)?base_tmp2:( abs(S1[i+(j-1)*Sstep1]-S2[i+(j-1)*Sstep2])<thresh ? 0:abs(S1[i+(j-1)*Sstep1]-S2[i+(j-1)*Sstep2]));
			val1 = JzMin(val,val1);

			base_tmp1 = abs(S1[i-2+j*Sstep1] - S2[i-2+j*Sstep2]);
			base_tmp1 = base_tmp1 < thresh ? 0 : base_tmp1;
			base_tmp2 = abs(S1[i-1+j*Sstep1] - S2[i-1+j*Sstep2]);
			base_tmp2 = base_tmp2 < thresh ? 0: base_tmp2;

			val = JzMin(base_tmp1,base_tmp2);
			val2 = (i==width)?base_tmp2:(abs(S1[i+j*Sstep1] - S2[i+j*Sstep2])<thresh ? 0:abs(S1[i+j*Sstep1] - S2[i+j*Sstep2]));
			val2 = JzMin(val,val2);

			D[i-1+j*Dstep] = JzMin(val1,val2);
		}
	}

    j++;
}
#endif
#else
void MergeBaseErode3_8u(uint8_t* _S1,uint8_t* _S2,uint8_t* D, int thresh, int Sstep1,int Sstep2,int Dstep,int height,int width){}
#endif

void MergeBaseMove(IMat src1, IMat src2, IMat dst, int thresh, int isUseSimd)
{
#if JZ_BASE_MXU
  if(isUseSimd){
      assert(0);
#if 0
      MergeBaseErode3_8u(src1.data, src2.data, dst.data, thresh, src1.step, src2.step, dst.step, src1.rows, src1.cols);
#endif
  }else
#endif
    {
      JzMergeErode(src1, src2, dst, thresh);
    }
}

