// BigData course project
// Serial version of training algorithm for SOM (C impl)
// Main module

#define _POSIX_C_SOURCE 200809L

#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <inttypes.h>

#define MAXFLOAT   3.40282347e+38F
#define max(a,b)   ((a) > (b)? (a) : (b))
#define pow2(a)    ((a)*(a))
#define s2i(s)     (atoi(s))
#define s2f(s)     (strtof(s,NULL))
#define log(...)   fprintf(stderr, __VA_ARGS__)
#define deb(...)   if (DEBUG) log(__VA_ARGS__)

typedef struct
{
  float * w;   // weights in R^n
  float x,y;   // coordinates in the mapping mesh (map)
} neuron_type;

typedef struct
{
  int dim;              // dimension of weight vectors (the "n" in R^n)
  float min_w, max_w;   // range of values to initialize weight vector
  int size_x, size_y;   // sizes of the mapping mesh
  neuron_type *** map;  // the mapping mesh 
  float r;              // topological radius of the mesh
  float max_r;          // used in calculation of sigma 
  float * dw;           // wi - w, where wi is current training vector and w is the winner
} som_type;

typedef struct
{
  int dim;    // dimension of the training vectors
  int size;   // number of training vectors
  float ** c; // coordinates of training vectors, leading dimension is vector index
} trainset_type;

float my_rand(float min, float max)
{
  return ((max-min)/RAND_MAX)*rand() + min;
}

int DEBUG = 0;

// returns the current time in millisecs
long get_currtime()
{
  struct timespec spec;
  clock_gettime(CLOCK_REALTIME, &spec);
  return spec.tv_sec*1000 + round(spec.tv_nsec / 1.0e6); 
}

trainset_type * read_trainset(int train_dim, int train_size, char * ts_file)
{
  int i,j;
  trainset_type * ts = (trainset_type *) malloc(sizeof(trainset_type));
  ts->dim  = train_dim;
  ts->size = train_size;
  ts->c    = (float **) malloc(sizeof(float *) * ts->size);
  FILE * f = fopen(ts_file,"r");
  if ( f == NULL )
  {
    log("Could not read trainset file %s\n", ts_file);
    exit(2);
  }
  for(i=0; i<ts->size; i++)
  {
    ts->c[i] = (float *) malloc(sizeof(float) * ts->dim);
    for(j=0; j<ts->dim; j++)
      if (fscanf(f,"%f", &(ts->c[i][j])) != 1)
      {
        log("Could not read trainset value (%d,%d)\n", i, j);
        exit(3);
      }
  }
  fclose(f);
  return ts;
}

// creates an initialized neuron, using given range of values for its
// random weight vector in R^dim, and the given coordinates in the map
neuron_type * create_neuron(int dim, float min_w, float max_w, float x, float y)
{
  int j;
  neuron_type * n = (neuron_type *) malloc(sizeof(neuron_type));
  n->w = (float *) malloc(sizeof(float) * dim);
  for(j=0; j<dim; j++)
    n->w[j] = my_rand(min_w, max_w);
  n->x = x;
  n->y = y;
  return n;
}

// create an initialized SOM with given dimension for training
// vectors (per training set), range of values to initialize those weight vectors and
// sizes for the mapping mesh 
som_type * create_som(trainset_type * ts, int size_x, int size_y, float min_w, float max_w)
{
  int i,j;
  som_type * som = (som_type *) malloc(sizeof(som_type));
  som->dim = ts->dim;
  som->min_w = min_w;
  som->max_w = max_w;
  som->size_x = size_x;
  som->size_y = size_y;
  som->map = (neuron_type ***) malloc(sizeof(neuron_type **) * som->size_y);
  srand(time(NULL));   // initialize random number generator
  for(j=0; j<som->size_y; j++)
  {
    som->map[j] = (neuron_type **) malloc(sizeof(neuron_type *) * som->size_x);
    for(i=0; i<som->size_x; i++)
      som->map[j][i] = create_neuron(som->dim, min_w, max_w, (float) i, (float) j);
  }
  som->r = max((float) size_x, (float) size_y) / 2.f;
  som->max_r = (float) ts->size / logf(som->r);
  som->dw = (float *) malloc(sizeof(float) * som->dim);
  memset(som->dw, 0, sizeof(float) * som->dim);
  return som;
}

// the learning rate (called alpha in formula); decreases
// exponentially with iteration (t). it also needs to know the
// total number of iterations (max_t)
float alpha(int t, int max_t)
{
  return 0.5f * expf((float) -t / (float) max_t);
}

// the neighbourhood ratio (called sigma in formula); which
// decreases exponentially with iteration (t). it also needs to know
// the r,max_r parameters of the som
float sigma(int t, float r, float max_r)
{
  return r * expf(-t / max_r);
}

// dumps a neuron
void dump_neuron(int dim, neuron_type * n)
{
  int i;
  deb("neuron at [%d,%d]\n", (int) n->x+1, (int) n->y+1);
  for(i=0; i<dim; i++)
    deb("%.6f ", n->w[i]);
  deb("\n");
}

// euclidean distance between the i-th training vector and the given
// neuron's weight vector
float dist(trainset_type * ts, int i, neuron_type * n)
{
  int j;
  float d = 0.f;
  deb("dist: %d\n", i);
  dump_neuron(ts->dim, n);
  for(j=0; j<ts->dim; j++)
  {
    d += pow2(ts->c[i][j] - n->w[j]);
    deb("dist: (%0.6f - %0.6f)^2 = %0.6f -> %0.6f\n", 
          ts->c[i][j], n->w[j], pow2(ts->c[i][j] - n->w[j]), d);
  }
  deb("dist: sqrt(%0.6f) = %0.6f\n", d, sqrtf(d));
  return sqrtf(d);
}

// euclidean distance in the map (mesh), between given neurons. the
// neuron v is though as the winner of current iteration, so a side
// effect of this function is to update k's dw
float dist_map(neuron_type * k, neuron_type * v)
{
  return sqrtf(pow2(k->x - v->x) + pow2(k->y - v->y));
}

// calculates the influence (eta in formula) of a neuron (k)
// respect to the other (v), where the second is thought as the center
// of a neighbourhood. it requires to know also current iteration time
// (t), as well as the som itself (to grab some parameters saved there)
float eta(int t, neuron_type * v, neuron_type * k, som_type * som)
{
  return expf( -pow2(dist_map(v,k)) / (2 * sigma(t, som->r, som->max_r)) );
} 

// given the winning neuron v, updates the weights of neuron k
// needs to know iteration (t), and maximum number of iters (max_t),
// as well as the som itself (to grab r and max_r params, needed for
// calculation)
void update_weights(int t, neuron_type * v, neuron_type * k, int max_t, som_type * som)
{
  int j;
  for(j=0; j<som->dim; j++)
    k->w[j] += alpha(t,max_t) * eta(t,v,k,som) * som->dw[j];
}

// find closest neuron in som to i-th training vector (one with min distance)
neuron_type * find_min(int i, trainset_type * ts, som_type * som)
{
  neuron_type * v; 
  int xi,yi; 
  float min_d = MAXFLOAT;
  float d;
  for(yi=0; yi<som->size_y; yi++)
    for(xi=0; xi<som->size_x; xi++)
    {
      d = dist(ts, i, som->map[yi][xi]);
      deb("[%d,%d] -> %0.6f\n", xi, yi, d);
      if ( d < min_d )
      {
        min_d = d;
        v = som->map[yi][xi];
      }
    }  
  deb("winner dist %0.6f\n", min_d);
  dump_neuron(som->dim, v);
  return v;
}

// iteration i-th of the learning algorithm
void learn_iter(int i, trainset_type * ts, som_type * som)
{
  int j, xi, yi;

  // each each time, present input i-th and select he winner neuron
  neuron_type * v = find_min(i, ts, som);
  
  // save the difference of input vs winner, to reuse in calculation
  for(j=0; j<som->dim; j++)
    som->dw[j] = ts->c[i][j] - v->w[j];

  // update the weights of the winner and its neighbours
  for(yi=0; yi<som->size_y; yi++)
    for(xi=0; xi<som->size_x; xi++)
      update_weights(i, v, som->map[yi][xi], ts->size, som);
}

// dumps som state (pretty much, one weight vector per line)
void dump_som(som_type * som, char * fname)
{
  int xi, yi, j;        
  FILE * f = fopen(fname,"w");
  for(xi=0; xi<som->size_x; xi++)
    for(yi=0; yi<som->size_y; yi++)
    {
      for(j=0; j<som->dim; j++)
        fprintf(f,"%.6f ", som->map[yi][xi]->w[j]);
      fprintf(f,"\n");
    }
  fclose(f);  
}

// main program: learning algorithm of the som
int main(int argc, char * argv[])
{
  int i, k, tsp, ec = 0;
  long start_t, end_t, iter_start_t, iter_end_t, show_progress;
  trainset_type * ts;
  som_type * som;
  if ( argc != 13 )
  {
    fprintf(stderr,
            "Usage: %s <dim> <ts_size> <ts_file> <size_x> <size_y> <min_w> <max_w> <som_file_ini> <som_file_fin> <ts_passes> <debug> <show_progress>\n",
            argv[0]);
    ec = 1;
  }
  else
  {
    DEBUG = s2i(argv[11]);
    show_progress = s2i(argv[12]);
    log("reading training set ... \n");
    ts = read_trainset(s2i(argv[1]), s2i(argv[2]), argv[3]);
    log("creating som ... \n");
    som = create_som(ts, s2i(argv[4]), s2i(argv[5]), s2f(argv[6]), s2f(argv[7]));
    log("dumping initial som ... \n");
    dump_som(som,argv[8]);
    tsp = s2i(argv[10]);
    log("running learning algorithm ... \n");
    for(k=1; k<=tsp; k++)
    {
      if (tsp<100 || !(k % (tsp/100))) 
        log("pass %-3d ...\n", k);
      start_t = get_currtime();
      for(i=0; i<ts->size; i++)
      {
        iter_start_t = get_currtime();
        learn_iter(i, ts, som);
        iter_end_t = get_currtime();
        if ( !(i%show_progress) )
          log("pass %-3d iter %-4d  (%d ms)...\n", k, i, iter_end_t-iter_start_t);
      }
      end_t = get_currtime();      
    }
    log("dumping final som ... \n");
    dump_som(som,argv[9]);
    log("total training time = %d secs \n", (end_t-start_t)/1000);
  }
  return ec;
}
