#include <iostream>
#include <stdlib>
#include <cstdlib>

using namespace std;


class NN_layer{
	int row;
	int col;
	int inputs_num;
	
	float *weights, *bias, **input, **output, **error;
	float *dev_weights, *dev_bias, **dev_input, **dev_output, **dev_error;
	
	bool recurrent;
	
	void randomize();
	void allocate();
	
	void freeWeights();
	void freeError();
	void freeIO();



public:
	NN_layer(int,int);
	~NN_layer();

	set_recurrent(bool rec){ recurrent = rec;}
	get_recurrent(){ return recurrent};
	
	void set_input(float*,int);
	float* get_output();
	void to_device();
	void from_device();

	void reset_input();

}



NN_layer::NNlayer(int row, int col){
	self.row = row;
	self.col = col;
	inputs_num = 0;

	recurrent = False;

	allocate();
	randomize();
	
}

NN_layer::~NNlayer(){

	freeWeights();
	freeError();
	freeIO();

}

void set_input(float* data, int len){

	if (inputs_num == 0){

		
		input = (float **)malloc(sizeof(float*));
		input[num_inputs] = data;
		num_inputs++;

		float *dev_data;
		cudaStatus = cudaMalloc((void*)dev_data, len * sizeof(float));
		if (cudaStatus != cudaSuccess) {
			fprintf(stderr, "cudaMalloc failed!");
		
		}
		cudaStatus = cudaMemcpy(dev_a, a, row*col * sizeof(float), cudaMemcpyHostToDevice);
		if (cudaStatus != cudaSuccess) {
			fprintf(stderr, "cudaMemcpy failed!");
			goto Error;
		}
	}
	else{
		float ** new_input = (float **)realloc(input, (num_inputs + 1)*sizeof(float *));
		input = new_inputs;//memory leak???
		input[num_inputs] = data;
		num_inputs++;

	};

}

void NN_layer::to_device(){
	cudaStatus = cudaMalloc((void*)dev_weights, row*col * sizeof(float));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
    }
	cudaStatus = cudaMalloc((void*)dev_bias, row * sizeof(float));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
    }

	cudaStatus = cudaMemcpy(dev_weights, weights, row*col * sizeof(float), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
    }
	cudaStatus = cudaMemcpy(dev_bias, bias, row * sizeof(float), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
    }

}

void NN_layer::from_device(){

    cudaStatus = cudaMemcpy(weights, dev_weights, row * col * sizeof(float), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
    }

	cudaStatus = cudaMemcpy(bias, dev_bias, row * col * sizeof(float), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
    }

}

void NN_layer::allocate(){
	
	weights = (float*)malloc(row*col*sizeof(float));
	bias = (float*)malloc(row*sizeof(float));
	input = NULL;
	output = NULL;
	error = NULL;

}

void NN_layer::randomize(){
	
	for (int k = 0; k < row*col; k++){

		weights[k] = 4.0*rand()/RAND_MAX - 2;

	}

	for (int k = 0; k < row; k++){

		bias[k] = 4.0*rand()/RAND_MAX - 2;

	}

}

void NN_layer::freeWeights(){

	free(weights);
	free(bias);
	weights = NULL;

}

void NN_layer::freeError(){

	for (int j = 0; j<inputs_num; j++){

		free(error[j]);
		
	}

	free(error);
	error = NULL;

}

void NN_layer::freeIO(){


	for (int k = 0; k< inputs_num; k++){
		
		free(input[k]);
		free(output[k]);

	}

	free(input);
	free(output);
	input = NULL;
	output = NULL;

}

void NN_layer::reset_input(){

	freeIO();
	inputs_num = 0;

}