
//	this program implements a neural network

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include "argc.h"
#include "network.h"

static int prt_epoch( int e, int prt_opts, double err,
		      int pats, int outs, int hids, int ins )
       {
	   // just print the epoch number and the current error
	   int i, p;
	   printf( "%4d: error=%e\n", e, err );
	   for( p = 0; p < pats; p++ )
	   {
	      present_pattern( p, ins );
	      forward( outs, hids, ins );
	      printf( "  " );
	      for( i = 0; i < ins; i++ )
	         printf( "%d", (int) get_pattern( p, IN, i ) );
	      printf( " -->" );
	      for( i = 0; i < outs; i++ )
	         printf( " %e", get_unit( ACT, 2, i ) );
	      printf( "\n" );
	   }
       }

static int prt_unit( int l, int u, int conns, int lng )
       {
	  int i;
          printf( "\tact= %8.4lf", get_unit( ACT, l, u ) );
	  if ( lng )
	  {
	     printf( " links :" );
	     for( i = 0; i < conns; i++ )
		printf( " %8.4lf", get_link( WEIGHT, l, u, l - 1, i ) );
	     printf( "\n\terr= %8.4lf", get_unit( ERR, l, u ) );
	     printf( " deltas:" );
	     for( i = 0; i < conns; i++ )
		printf( " %8.4lf", get_link( DELTA, l, u, l - 1, i ) );
	  }
          printf( "\n" );
       }

static int prt_pat( int p, int ins, int outs )
       {
          int i;
	  printf( "Pattern %d\n", p );
	  printf( "In: " );
	  for( i = 0; i < ins; i++ )
	     printf( " %6.1lf", get_pattern( p, IN, i ) );
	  printf( "\n" );
	  printf( "Out:" );
	  for( i = 0; i < outs; i++ )
	     printf( " %6.1lf", get_pattern( p, OUT, i ) );
	  printf( "\n" );
       }

int present_pattern( int p, int ins )
    {
       // set the activation of the network's input units
       // to the inoput part of pattern p
		int u;
		for(u = 0; u < ins; u++)
			set_unit(ACT, IN, u, get_pattern(p, IN, u));
		
    }



int forward( int outs, int hids, int ins )
    {
		// propagate the activation of the input neurons to the output layer

			int ui, uh, uo;
			double neti, oi;
			
			// calculate the output of the hids units
			for(uh = 0; uh < hids; uh++)
			{
					neti = 0;
					for(ui = 0; ui <= ins; ui++)
						neti += get_link(WEIGHT, HID, uh, IN, ui)*get_unit(ACT, IN, ui);
					oi = 1/(1 + exp(-neti));
					set_unit(ACT, HID, uh, oi);
			}
			
			// calculate the output of the outs units
			for(uo = 0; uo < outs; uo++)
			{
					neti = 0;
					for(uh = 0; uh <= hids; uh++)
						neti += get_link(WEIGHT, OUT, uo, HID, uh)*get_unit(ACT, HID, uh);
					oi = 1/(1 + exp(-neti));
					set_unit(ACT, OUT, uo, oi);
			}

    }

double calc_error( int p, int outs )
    {
       // compare the activation of the network's output units
       // with the target values (output part) of pattern p
       // set the error part of the network's units to the
       // difference between target and actual value
       // return the sum of the squares over all output units
       
		int u;
		double sum_ei, ei;
		sum_ei = 0;
		   
		for(u = 0; u < outs; u++)
		{
			ei = 0;
			ei = get_pattern(p, OUT, u) - get_unit(ACT, OUT, u);
			set_unit(ERR, OUT, u, ei);
			sum_ei += pow(ei, 2);
		}
		
		return sum_ei;

    }



int backward( int outs, int hids, int ins )
    {
       // back propagate the activation of the output neurons to the input layer
      
		int ui, uh, uo;
		double ei;
		
		// calculate the error of the hidden layer excluding the bias unit
		for(uh = 0; uh < hids; uh++)
		{
			ei = 0;
			for(uo = 0; uo < outs; uo++)
				ei += get_unit(ERR, OUT, uo)*get_unit(ACT, OUT, uo) *
				      (1 - get_unit(ACT, OUT, uo))*get_link(WEIGHT, OUT, uo, HID, uh);
			set_unit(ERR, HID, uh, ei);
		}
		
		// sum up to the actual gradient and store it in set_link type DELTA temporarily
		for(ui = 0; ui <= ins; ui++)
			for(uh = 0; uh < hids; uh++)
				set_link(DELTA, HID, uh, IN, ui,
				         -(get_unit(ERR, HID, uh) * get_unit(ACT, HID, uh) *
				         (1 - get_unit(ACT, HID, uh)) * get_unit(ACT, IN, ui)) + 
				         get_link(DELTA, HID, uh, IN, ui));
		
		for(uh = 0; uh <= hids; uh++)
			for(uo = 0; uo < outs; uo++)
				set_link(DELTA, OUT, uo, HID, uh,
				         -(get_unit(ERR, OUT, uo) * get_unit(ACT, OUT, uo) *
				         (1 - get_unit(ACT, OUT, uo)) * get_unit(ACT, HID, uh)) + 
				         get_link(DELTA, OUT, uo, HID, uh));
    }

int clear_layer( int to_layer, int to_units,
		 int from_layer, int from_units )
    {

    }



int update( double lr, double mm, int outs, int hids, int ins )
    {
       // update the weights of all connections
		
		int ui, uh, uo;
		
		for(ui = 0; ui <= ins; ui++)
			for(uh = 0; uh < hids; uh++)
			{
				// convert the gradiet stored in DELTA to the real delta
				// DELTA = mm*PDELTA - lr*gradient(E)
				set_link(DELTA, HID, uh, IN, ui,  mm*get_link(PDELTA, HID, uh, IN, ui)
				                                - lr*get_link( DELTA, HID, uh, IN, ui));
				
				// calculate the new WEIGHT
				// WEIGHT = WEIGHT + DELTA
				set_link(WEIGHT, HID, uh, IN, ui,  get_link(WEIGHT, HID, uh, IN, ui) 							  
				                                 + get_link( DELTA, HID, uh, IN, ui));
				                                 
				// set PDELTA to DELTA and set DELTA to zero
				set_link(PDELTA, HID, uh, IN, ui, get_link(DELTA, HID, uh, IN, ui));
				set_link( DELTA, HID, uh, IN, ui, 0);
			}
			
		for(uh = 0; uh <= hids; uh++)
			for(uo = 0; uo < outs; uo++)
			{
				// convert the gradiet - stored in DELTA - to the real delta
				// DELTA = mm*PDELTA - lr*gradient(E)
				set_link(DELTA, OUT, uo, HID, uh,  mm*get_link(PDELTA, OUT, uo, HID, uh)
				                                 - lr*get_link( DELTA, OUT, uo, HID, uh));
				
				// calculate the new WEIGHT
				// WEIGHT = WEIGHT + DELTA
				set_link(WEIGHT, OUT, uo, HID, uh,  get_link(WEIGHT, OUT, uo, HID, uh) 							  
				                                  + get_link( DELTA, OUT, uo, HID, uh));
				                                 
				// set PDELTA to DELTA and set DELTA to zero
				set_link(PDELTA, OUT, uo, HID, uh, get_link(DELTA, OUT, uo, HID, uh));
				set_link( DELTA, OUT, uo, HID, uh, 0);
			}
      
    }

int debug_inverter( int outs, int hids, int ins, double lr )
    {
       int p;
       printf( "after initialization:\n" );
       printf( "   hidden unit 0:\n" ); prt_unit( 1, 0, ins  + 1, 1 );
       printf( "   output unit 0:\n" ); prt_unit( 2, 0, hids + 1, 1 );
       for( p = 0; p < 2; p++ )
       {
		  present_pattern( p, ins );
		  forward( outs, hids, ins );
		  calc_error( p, outs );
		  backward( outs, hids, ins);
		  printf( "\nactivations for pattern %d:\n", p );
		  printf( "   hidden unit 0:\n" ); prt_unit( 1, 0, ins  + 1, 1 );
		  printf( "   output unit 0:\n" ); prt_unit( 2, 0, hids + 1, 1 );
       }
       update( 1.0, 0, outs, hids, ins );
       printf( "\nafter an update with lr=1.0\n" );
       printf( "   hidden unit 0:\n" ); prt_unit( 1, 0, ins  + 1, 1 );
       printf( "   output unit 0:\n" ); prt_unit( 2, 0, hids + 1, 1 );
       exit(1);
    }

//main( argc, argv )
//int argc;
//char **argv;
//{
//	// main variables and their default values
//	int hids = 2, epochs = 10;
//	int l_pats = 4, t_pats = 0;
//	int modulo = 10, prt_opts = 0;
//	int ins = 4, outs = 4;
//	int method = BATCH;
//	double lr = 0.1, mm = 0.0, x0 = 1;
//	double tip1[ 4 ] = { 0 , 0 , 0 , 1 }, top1[ 4 ] = { 0 , 0 , 0 , 1 };
//	double tip2[ 4 ] = { 0 , 0 , 1 , 0 }, top2[ 4 ] = { 0 , 0 , 1 , 0 };
//	double tip3[ 4 ] = { 0 , 1 , 0 , 0 }, top3[ 4 ] = { 0 , 1 , 0 , 0 };
//	double tip4[ 4 ] = { 1 , 0 , 0 , 0 }, top4[ 4 ] = { 1 , 0 , 0 , 0 };
//
//	// variables to create the table
//	double talpha[6] = { 0.0 , 0.2 , 0.4 , 0.6 , 0.8 , 1.0 }; // momentum values
//	double teta[6] = { 5 , 3 , 1 , 0.3 , 0.1 , 0.001 }; // learning rates
//
//	int table[6][6];
//
//	int convergence;
//	int cnt;
//
//	// local variables
//	int i, j, p, e;
//	double error, preverror;
//
//	// process standard arguments
//	argc = std_opts( argc, argv, & hids, & epochs, & lr, & mm,
//			 & modulo, & prt_opts, & x0, & l_pats, & t_pats , & method );
//	// now, process the remaining problem-specific options
//	if ( argc > 0 )
//	{
//	   for( i = 0; i < argc; i++ )
//	      fprintf( stderr, "warning, unprocessed option: '%s'\n", argv[ i ] );
//	   fprintf( stderr, "\n" );
//	}
//
//	// start your programming here
//
//	for(i = 0; i < 6; i++) // loop over the learning rate
//		for(j = 0; j < 6; j++) // loop over the momentum value
//		{
//
//			make_nn(ins+1, hids+1, outs, l_pats+t_pats);
//			init_units(-x0, x0);
//
//			// set the learning patterns
//			set_cpattern(0, tip1, top1);
//			set_cpattern(1, tip2, top2);
//			set_cpattern(2, tip3, top3);
//			set_cpattern(3, tip4, top4);
//
//			lr = teta[i];
//			mm = talpha[j];
//
//			error = 1;
//			cnt   = 1;
//			convergence = CONVERGE;
//
//			for(e = 0; (error > EPSILON); e++)
//			{
//				preverror = error;
//				error = 0;
//				for(p = 0; p < l_pats; p++)
//				{
//					present_pattern(p, ins);
//					forward(outs, hids, ins);
//					error += calc_error(p, outs);
//					backward(outs, hids, ins);
//					if(method == ONLINE) // using online updating
//						update(lr, mm, outs, hids, ins);
//				}
//				if(method == BATCH) // using batch updating (should be default method)
//					update(lr, mm, outs, hids, ins);
//
//				if(e % 10000 == 0)
//					printf(".");
//
//				if(preverror < error)
//				{
//					cnt++;
//					if(cnt > CRITICAL)
//					{
//						convergence = DIVERGE;
//						break;
//					}
//				}
//
//
//			}
//
//			if(convergence)
//				table[i][j] = e;
//			else
//				table[i][j] = -1;
//
//			printf( "\n... finished the program - for mm = %2.1f and lr = %4.3f ...\n", mm, lr);
//			printf("... needed %d epochs ...\n", table[i][j]);
//
//			destroy_nn();
//
//		} // end of the loops
//
//
//	printf("\n  MM |                            LR                          \n");
//	printf(  "     | %7.3f | %7.3f | %7.3f | %7.3f | %7.3f | %7.3f \n",teta[0],teta[1],teta[2],teta[3],teta[4],teta[5]);
//	printf("-----------------------------------------------------------------\n");
//	for(j = 0; j < 6; j++)
//		printf(" %2.1f | %7d | %7d | %7d | %7d | %7d | %7d \n",talpha[j],table[0][j],table[1][j],table[2][j],table[3][j],table[4][j],table[5][j]);
//	printf("\n Notice: >> The value \"-1\" means that the process was aborted because it was critical to be deverging! <<\n");
//
//
//}

