#include "Wakeup.h"
#include "Model.h"
#include "Weight.h"

#define LAYER_NUM	41

typedef enum _tagLayerType
{
	Type_Unknown		= 0,
	Type_Conv1d			= 1,
	Type_BatchNorm1d	= 2,
	Type_LeakyReLU		= 3,
	Type_Sigmoid		= 4,

}LayerType;

typedef struct _tagLayerBase
{
	char m_label[32];
	LayerType m_type;
	float * m_out;
	const float * m_weight;
	const float * m_bias;

	int m_in_channels;
	int m_in_channel_size;
	int m_out_channels;
	int m_out_channel_size;

}LayerBase;

typedef struct _tagConv1d
{
	LayerBase m_base;
	
	int m_kernel_size;
	int m_stride;
	int m_padding;
	int m_hasbias;

}Conv1d;

typedef struct _tagBatchNorm1d
{
	LayerBase m_base;
	
	const float * m_running_mean;
	const float * m_running_var;
	int m_num_batches_tracked;

}BatchNorm1d;

typedef struct _tagLeakyReLU
{
	LayerBase m_base;
	
	float m_negative_slope;

}LeakyReLU;

typedef struct _tagSigmoid
{
	LayerBase m_base;

}Sigmoid;


typedef union _tagLayer
{
	LayerBase	m_LayerBase;
	Conv1d		m_Conv1d;
	BatchNorm1d	m_BatchNorm1d;
	LeakyReLU	m_LeakyReLU;
	Sigmoid		m_Sigmoid;

}Layer;

const int in_channel_size[15] = 
{
	0,
	8192,
	4096,
	2048,
	1024,
	512,
	256,
	128,
	64,
	32,
	16,
	8,
	4,
	2,
	1,
};

const int out_channel_size[15] = 
{
	0,
	4096,
	2048,
	1024,
	512,
	256,
	128,
	64,
	32,
	16,
	8,
	4,
	2,
	1,
	1,
};

static Layer * m_Layers;

typedef float LAYERBUFFER[HDRCNT*BUFLEN];
extern LAYERBUFFER * m_LayerBuffer;

void Conv1dBuild(int lid, int layer, int filter_in, int filter_out, int kernel_size, int stride, int pad, int bias)
{
	Conv1d * p = &m_Layers[lid].m_Conv1d;
	LayerBase * pb = &p->m_base;

	sprintf(pb->m_label, "conv_%d", layer);
	pb->m_type = Type_Conv1d;
	pb->m_out = (float*)m_LayerBuffer[lid%2];
	pb->m_weight = Conv1dWeight[layer];
	pb->m_bias = Conv1dBias[layer];
	pb->m_in_channels = filter_in;
	pb->m_in_channel_size = in_channel_size[layer];
	pb->m_out_channels = filter_out;
	pb->m_out_channel_size = out_channel_size[layer];

	p->m_kernel_size = kernel_size;
	p->m_stride = stride;
	p->m_padding = pad;
	p->m_hasbias = bias;
}

void BatchNorm1dBuild(int lid, int layer, int filter_out)
{
	BatchNorm1d * p = &m_Layers[lid].m_BatchNorm1d;
	LayerBase * pb = &p->m_base;

	sprintf(pb->m_label, "bn_%d", layer);
	pb->m_type = Type_BatchNorm1d;
	pb->m_out = (float*)m_LayerBuffer[lid%2];
	pb->m_weight = BatchNorm1dWeight[layer];
	pb->m_bias = BatchNorm1dBias[layer];
	pb->m_in_channels = filter_out;
	pb->m_in_channel_size = out_channel_size[layer];
	pb->m_out_channels = filter_out;
	pb->m_out_channel_size = out_channel_size[layer];

	p->m_running_mean = BatchNorm1dMean[layer];
	p->m_running_var = BatchNorm1dVar[layer];
	p->m_num_batches_tracked = 0;
}

void LeakyReLUBuild(int lid, int layer, int filter_out, float negative_slope)
{
	LeakyReLU * p = &m_Layers[lid].m_LeakyReLU;
	LayerBase * pb = &p->m_base;

	sprintf(pb->m_label, "relu_%d", layer);
	pb->m_type = Type_LeakyReLU;
	pb->m_out = (float*)m_LayerBuffer[lid%2];
	pb->m_weight = 0;
	pb->m_bias = 0;
	pb->m_in_channels = filter_out;
	pb->m_in_channel_size = out_channel_size[layer];
	pb->m_out_channels = filter_out;
	pb->m_out_channel_size = out_channel_size[layer];

	p->m_negative_slope = negative_slope;
}

void SigmoidBuild(int lid, int layer, int filter_out)
{
	Sigmoid * p = &m_Layers[lid].m_Sigmoid;
	LayerBase * pb = &p->m_base;

	sprintf(pb->m_label, "sgmd_%d", layer);
	pb->m_type = Type_Sigmoid;
	pb->m_out = (float*)m_LayerBuffer[lid%2];
	pb->m_weight = 0;
	pb->m_bias = 0;
	pb->m_in_channels = filter_out;
	pb->m_in_channel_size = out_channel_size[layer];
	pb->m_out_channels = filter_out;
	pb->m_out_channel_size = out_channel_size[layer];
}

void ModelRelease(void)
{
	if(m_Layers)
	{
		FREE(m_Layers);
		m_Layers = 0;
	}
}

void ModelInit(void)
{
	m_Layers = (Layer*)MALLOC(sizeof(Layer)*LAYER_NUM);
	memset(m_Layers, 0, sizeof(Layer)*LAYER_NUM);

	Conv1dBuild		(0, 1, 1, 2, 3, 2, 1, 0);	// 2 x 4096
	BatchNorm1dBuild(1, 1, 2);
	LeakyReLUBuild	(2, 1, 2, 0.1f);

	Conv1dBuild		(3, 2, 2, 2, 3, 2, 1, 0);	// 2 x 2048
	BatchNorm1dBuild(4, 2, 2);
	LeakyReLUBuild	(5, 2, 2, 0.1f);

	Conv1dBuild		(6, 3, 2, 4, 3, 2, 1, 0);	// 4 x 1024
	BatchNorm1dBuild(7, 3, 4);
	LeakyReLUBuild	(8, 3, 4, 0.1f);

	Conv1dBuild		(9,  4, 4, 4, 3, 2, 1, 0);	// 4 x 512
	BatchNorm1dBuild(10, 4, 4);
	LeakyReLUBuild	(11, 4, 4, 0.1f);

	Conv1dBuild		(12, 5, 4, 8, 3, 2, 1, 0);	// 8 x 256
	BatchNorm1dBuild(13, 5, 8);
	LeakyReLUBuild	(14, 5, 8, 0.1f);

	Conv1dBuild		(15, 6, 8, 8, 3, 2, 1, 0);	// 8 x 128
	BatchNorm1dBuild(16, 6, 8);
	LeakyReLUBuild	(17, 6, 8, 0.1f);

	Conv1dBuild		(18, 7, 8, 16, 3, 2, 1, 0);	// 16 x 64
	BatchNorm1dBuild(19, 7, 16);
	LeakyReLUBuild	(20, 7, 16, 0.1f);

	Conv1dBuild		(21, 8, 16, 16, 3, 2, 1, 0);	// 16 x 32
	BatchNorm1dBuild(22, 8, 16);
	LeakyReLUBuild	(23, 8, 16, 0.1f);

	Conv1dBuild		(24, 9, 16, 32, 3, 2, 1, 0);	// 32 x 16
	BatchNorm1dBuild(25, 9, 32);
	LeakyReLUBuild	(26, 9, 32, 0.1f);

	Conv1dBuild		(27, 10, 32, 32, 3, 2, 1, 0);// 32 x 8
	BatchNorm1dBuild(28, 10, 32);
	LeakyReLUBuild	(29, 10, 32, 0.1f);

	Conv1dBuild		(30, 11, 32, 64, 3, 2, 1, 0);// 64 x 4
	BatchNorm1dBuild(31, 11, 64);
	LeakyReLUBuild	(32, 11, 64, 0.1f);

	Conv1dBuild		(33, 12, 64, 64, 3, 2, 1, 0);// 64 x 2
	BatchNorm1dBuild(34, 12, 64);
	LeakyReLUBuild	(35, 12, 64, 0.1f);

	Conv1dBuild		(36, 13, 64, 64, 2, 1, 0, 0);// 64 x 1
	BatchNorm1dBuild(37, 13, 64);
	LeakyReLUBuild	(38, 13, 64, 0.1f);

	Conv1dBuild		(39, 14, 64, 1, 1, 1, 0, 1);// 1 x 1
	SigmoidBuild	(40, 14, 64);

}

void Conv1dForward(Layer * pLayer, float * in)
{
	int i,j,ii,k;

	Conv1d * p = &pLayer->m_Conv1d;
	LayerBase * pb = &p->m_base;

	for(i=0; i<pb->m_out_channels; i++)
	{
		for(j=0; j<pb->m_out_channel_size; j++)
		{
			float temp = 0.0f;
			for(ii=0; ii<pb->m_in_channels; ii++)
			{
				for(k=0; k<p->m_kernel_size; k++)
				{
					int jj = j*p->m_stride+k-p->m_padding;
					int kk = i*pb->m_in_channels+ii;
					if(jj >= 0 && jj < pb->m_in_channel_size)
					{
						temp += in[ii*pb->m_in_channel_size+jj] * pb->m_weight[kk*p->m_kernel_size+k];
					}
				}
			}

			if(p->m_hasbias)
			{
				temp += pb->m_bias[i];
			}

			pb->m_out[i*pb->m_out_channel_size+j] = temp;
		}
	}
}

void BatchNorm1dForward(Layer * pLayer, float * in)
{
	int i,j;

	BatchNorm1d * p = &pLayer->m_BatchNorm1d;
	LayerBase * pb = &p->m_base;

	for(i=0; i<pb->m_out_channels; i++)
	{
		for(j=0; j<pb->m_out_channel_size; j++)
		{
			float x_normalized = (in[i*pb->m_out_channel_size+j]-p->m_running_mean[i]) / p->m_running_var[i];
			pb->m_out[i*pb->m_out_channel_size+j] = x_normalized * pb->m_weight[i] + pb->m_bias[i];
		}
	}
}

void LeakyReLUForward(Layer * pLayer, float * in)
{
	int i;

	LeakyReLU * p = &pLayer->m_LeakyReLU;
	LayerBase * pb = &p->m_base;

	for(i=0; i<pb->m_out_channels*pb->m_out_channel_size; i++)
	{
		pb->m_out[i] = in[i] > 0 ? in[i] : in[i] * p->m_negative_slope;
	}
}

void SigmoidForward(Layer * pLayer, float * in)
{
	int i;

	Sigmoid * p = &pLayer->m_Sigmoid;
	LayerBase * pb = &p->m_base;

	for(i=0; i<pb->m_out_channels*pb->m_out_channel_size; i++)
	{
		pb->m_out[i] = (float)(1.0f / (1.0f + WAKEUP_EXP(-(in[i]))));
	}
}

void Forward(float * in, float * out)
{
	int i;
	float * layer_in = in;

	for(i=0; i<LAYER_NUM; i++)
	{
		switch(m_Layers[i].m_LayerBase.m_type)
		{
		case Type_Conv1d:
			Conv1dForward(&m_Layers[i], layer_in);
			break;

		case Type_BatchNorm1d:
			BatchNorm1dForward(&m_Layers[i], layer_in);
			break;

		case Type_LeakyReLU:
			LeakyReLUForward(&m_Layers[i], layer_in);
			break;

		case Type_Sigmoid:
			SigmoidForward(&m_Layers[i], layer_in);
			break;

		default:
			//assert(0);
			break;
		}
		
		layer_in = m_Layers[i].m_LayerBase.m_out;
	}

	((float*)out)[0] = ((float*)layer_in)[0];
}
