/*
 *
 * This file is part of the open-source SeetaFace engine, which includes three modules:
 * SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification.
 *
 * This file is part of the SeetaFace Detection module, containing codes implementing the
 * face detection method described in the following paper:
 *
 *
 *   Funnel-structured cascade for multi-view face detection with alignment awareness,
 *   Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen.
 *   In Neurocomputing (under review)
 *
 *
 * Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group,
 * Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China.
 *
 * The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. Shiguang Shan)
 *
 * As an open-source face recognition engine: you can redistribute SeetaFace source codes
 * and/or modify it under the terms of the BSD 2-Clause License.
 *
 * You should have received a copy of the BSD 2-Clause License along with the software.
 * If not, see < https://opensource.org/licenses/BSD-2-Clause>.
 *
 * Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any problems.
 *
 * Note: the above information must be kept whenever or wherever the codes are used.
 *
 */

#include "mlp.h"
#include<string.h>
#include<math.h>
#include "common.h"
#include "math_func.c"
//namespace seeta {
//namespace fd {


void InitMLPLayer(struct classMLPLayer *p, int32_t act_func_type, int32_t input_dim, int32_t output_dim )
{
	p->input_dim_ = input_dim;
	p->output_dim_ = output_dim;
	p->act_func_type_ = act_func_type;
}
int32_t MLPLayerGetInputDim(struct classMLPLayer *p)  { return p->input_dim_; }
int32_t MLPLayerGetOutputDim(struct classMLPLayer *p) {  return p->output_dim_; }

void MLPLayerSetSize(struct classMLPLayer *p, int32_t inputDim, int32_t outputDim) {
	if (inputDim <= 0 || outputDim <= 0) {
		return;  // @todo handle the errors!!!
	}
	p->input_dim_ = inputDim;
	p->output_dim_ = outputDim;
	//p->weights_.resize(inputDim * outputDim);
	p->weights_ = (float*)malloc(sizeof(float)*inputDim*outputDim);
	//p->bias_.resize(outputDim);
	p->bias_ = (float*)malloc(sizeof(float)*outputDim);
}
void MLPLayerSetWeights(struct classMLPLayer *p, const float* weights, int32_t len) {
	if (weights == nullptr || len != p->input_dim_ * p->output_dim_) {
		return;  // @todo handle the errors!!!
	}
	//std::copy(weights, weights + p->input_dim_ * p->output_dim_, p->weights_.begin());
	memcpy(p->weights_, weights, sizeof(float)* p->input_dim_ * p->output_dim_);
}

void MLPLayerSetBias(struct classMLPLayer *p, const float* bias, int32_t len) {
	if (bias == nullptr || len != p->output_dim_) {
		return;  // @todo handle the errors!!!
	}
	//std::copy(bias, bias + p->output_dim_, p->bias_.begin());
	memcpy(p->bias_, bias, sizeof(float)*p->output_dim_);
}
float MLPLayerSigmoid(float x) {
	return 1.0f / (1.0f + exp(x));
}
float MLPLayerReLU(float x) {
	return (x > 0.0f ? x : 0.0f);
}
int32_t MLPGetInputDim(struct classMLP *p)  {
	return MLPLayerGetInputDim(&p->layers_[0]);
}

int32_t MLPGetOutputDim(struct classMLP *p)  {
	//return MLPLayerGetOutputDim(&p->layers_.back());
	int len = MLPGetLayerNum(p);
	return MLPLayerGetOutputDim(&p->layers_[len - 1]);
}

int32_t MLPGetLayerNum(struct classMLP *p)  {
	//return ((int32_t)p->layers_.size());
	if (p->layers_ == NULL) return 0;
	int len = malloc_usable_size(p->layers_) / sizeof(p->layers_[0]);
	return len;
}
void MLPLayerCompute(struct classMLPLayer *p, const float* input, float* output) {
	int32_t i;
#pragma omp parallel num_threads(SEETA_NUM_THREADS)
  {
#pragma omp for nowait
    for ( i = 0; i < p->output_dim_; i++) {
      output[i] = VectorInnerProduct(input,
        p->weights_ + i * p->input_dim_, p->input_dim_) + p->bias_[i];
	  output[i] = (p->act_func_type_ == 1 ? MLPLayerReLU(output[i]) : MLPLayerSigmoid(-output[i]));
    }
  }
}

void MLPCompute(struct classMLP *p, const float* input, float* output) {

  MLPLayerCompute(&p->layers_[0], input, p->layer_buf_[0]);
  
  size_t i; /**< layer index */
  int layerLen = MLPGetLayerNum(p);
  for (i = 1; i <layerLen - 1; i++) {
    MLPLayerCompute(&p->layers_[i],p->layer_buf_[(i + 1) % 2], p->layer_buf_[i % 2]);
  }
  int layerbufLen = MLPLayerGetOutputDim(&p->layers_[0]);
 
  MLPLayerCompute(&p->layers_[layerLen-1], p->layer_buf_[(i + 1) % 2], output);
}

void MLPAddLayer(struct classMLP *p, int32_t inputDim, int32_t outputDim, const float* weights,
    const float* bias, int list,bool is_output) {
	//int layerLen = MLPGetLayerNum(p);
	if (list > 1 && inputDim != MLPLayerGetOutputDim(&p->layers_[list - 2]))
		return;  // @todo handle the errors!!!
	else
	{
		MLPLayer layer;
		InitMLPLayer(&layer, (is_output ? 0 : 1), 0, 1);
		MLPLayerSetSize(&layer, inputDim, outputDim);
		MLPLayerSetWeights(&layer, weights, inputDim * outputDim);
		MLPLayerSetBias(&layer, bias, outputDim);
		//p->layers_.push_back(layer);
		p->layers_[list - 1] = layer;
	}
  
}

//}  // namespace fd
//}  // namespace seeta
