#include <stdint.h>
#include <webrtc_audio_processing/webrtc/common_audio/signal_processing/include/signal_processing_library.h>
#include "audio_hpf.h"
#ifdef DMALLOC
#include <imp/dmalloc.h>
#endif
void audio_process_hpf_create(int16_t* vector_x,int16_t* vector_y,int16_t set_value_x,
				int16_t set_value_y,int vector_length_x,int vector_length_y)
{
	WebRtcSpl_MemSetW16(vector_x,set_value_x, vector_length_x);
	WebRtcSpl_MemSetW16(vector_y,set_value_y,vector_length_y);
}

int audio_process_hpf_process(void* filter,short* data,int length)
{
	FilterState* hpf = (FilterState *)filter;
	if(hpf == NULL)
		return -1;

	int tmp_int32 = 0;
	short* y = hpf->y;
	short* x = hpf->x;
	const short* ba = hpf->ba;

	int i= 0;
	for (i = 0; i < length; i++) {
		tmp_int32 = WEBRTC_SPL_MUL_16_16(y[1], ba[3]); // -a[1] * y[i-1] (low part)
		tmp_int32 += WEBRTC_SPL_MUL_16_16(y[3], ba[4]); // -a[2] * y[i-2] (low part)
		tmp_int32 = (tmp_int32 >> 15);
		tmp_int32 += WEBRTC_SPL_MUL_16_16(y[0], ba[3]); // -a[1] * y[i-1] (high part)
		tmp_int32 += WEBRTC_SPL_MUL_16_16(y[2], ba[4]); // -a[2] * y[i-2] (high part)
		tmp_int32 = (tmp_int32 << 1);

		tmp_int32 += WEBRTC_SPL_MUL_16_16(data[i], ba[0]); // b[0]*x[0]
		tmp_int32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); // b[1]*x[i-1]
		tmp_int32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); // b[2]*x[i-2]

		//Update state (input part)
		x[1] = x[0];
		x[0] = data[i];

		// Update state (filtered part)
		y[2] = y[0];
		y[3] = y[1];
		y[0] = (short)(tmp_int32 >> 13);
		y[1] = (short)((tmp_int32 - WEBRTC_SPL_LSHIFT_W32((int)(y[0]), 13)) << 2);

		// Rounding in Q12, i.e. add 2^11
		tmp_int32 += 2048;

		// Saturate (to 2^27) so that the HP filtered signal does not overflow
		tmp_int32 = WEBRTC_SPL_SAT((int)(134217727),tmp_int32,(int)(-134217728));

		// Convert back to Q0 and use rounding
		data[i] = (short)(tmp_int32>>12);
	}
	return 0;
}

void audio_process_hpf_free()
{
	return;
}
