/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package javaapplication1;

import java.util.ArrayList;
import java.util.List;

/**
 *
 * @author jonathan
 */
public class ProofOfConcept {
    
static int numGenerations = 50000;
static int numAnns = 500;
        
    public static void main(String[] args) {
                
        //load config file
        Config.load("config.ini");
         
       //Holy crap!!! i ran this and it WORKED! oi! 
       //EvolveANN();
       //here is the result i got (one of many possible correct ANNs for xor)
       //[-0.879906733561862, -0.9065901753523702, -0.056300761992946535, -0.2967616613630272, -0.25637138031381346, 0.8616698752367078, -0.22866615180254446, 0.6141830839932386, -0.3238834608044727]
       //i will now feed that in and verify it 
//       List<Double> weights = new ArrayList<Double>();
//       weights.add(-0.879906733561862);
//       weights.add(-0.9065901753523702);
//       weights.add(-0.056300761992946535);
//       weights.add(-0.2967616613630272);
//       weights.add(-0.25637138031381346);
//       weights.add(0.8616698752367078);
//       weights.add(-0.22866615180254446);
//       weights.add(0.6141830839932386);
//       weights.add(-0.3238834608044727);
//       
//       //b is the test input
//       List<Double> input = new ArrayList<Double>();
//       
//       //what we'll use as outputted doubles from the ANNs
//       List<Double> result = new ArrayList<Double>();
//       
//       //make the net
//       NeuralNet ANN = new NeuralNet();
//       
//       //load my saved weights for the WORKING net :D
//       System.out.println("ANN: "+ANN.getWeights());
//        System.out.println("wts: "+weights);
//       ANN=ANN.putWeights(weights);
//       System.out.println("ANN: "+ANN.getWeights());
//       
//       //verify by running each input on ANN
//       
//       input.clear(); input.add(0.0); input.add(0.0);
//       System.out.println(input+" "+ANN.update(input));
//       input.clear(); input.add(1.0); input.add(0.0);
//       System.out.println(input+" "+ANN.update(input));
//       input.clear(); input.add(0.0); input.add(1.0);
//       System.out.println(input+" "+ANN.update(input));
//       input.clear(); input.add(1.0); input.add(1.0);
//       System.out.println(input+" "+ANN.update(input));
//       //u>0.5 = 1 :: u<0.5 = 0  // it separated :D it works it actually verifiably works!
       
       
       
        
        

        
       ////////////////////////////////////////////
        ////////                                ////
         //////        Back Propagation        //////
          ////                                ////////
           ////////////////////////////////////////////
       
        //array of doubles is the list of inputs in such a way that the input 1 input 2 input 3... would all have a list so that at [i] they are all sending the correct input to the input layer for round [i]
        //outputs is the same way.  the arraylists just allows any number of arrays to be sent for being nice and general...
        
        ArrayList<double[]> inputs = new ArrayList<double[]>();
        double [] input1 = new double[4];
        double [] input2 = new double[4];
        
        input1[0] =0;
        input1[1] =0;
        input1[2] =1;
        input1[3] =1;
        
        input2[0] =0;
        input2[1] =1;
        input2[2] =1;
        input2[3] =0;
        
        inputs.add(input1);
        inputs.add(input2);
        
        ArrayList<double[]> expected_outputs = new ArrayList<double[]>();
        double[] expectation = new double[4];
        expectation[0]=0;
        expectation[1]=1;
        expectation[2]=0;
        expectation[3]=1;
        expected_outputs.add(expectation);
        
        
        NeuralNet ANN = new NeuralNet();
        //ANN should be usable after this is run
        ANN=BPTrain(ANN, inputs, expected_outputs, 4);
        
        //test it out
       

       


       
    }
    
    //book reccomends breaking between nodes not in a node...
    static NeuralNet Genetic(NeuralNet Alpha, NeuralNet Beta){
        NeuralNet ret = new NeuralNet();
        int size=Alpha.getNumWeights();
        int breakpoint = Config.r.nextInt(size-1);
        
        List<Double> a,b,c;
        a=Alpha.getWeights();
        b=Beta.getWeights();
        c=new ArrayList<Double>();

        //lazy (feaux)multi-point crossover
        for (int i=0; i<size; i++){
            if(Config.r.nextBoolean()){
                c.add(a.get(i));
            }else{
                c.add(b.get(i));
            }
        }
        
        //mutation
        for(int i=0; i<size; i++){
            if(Config.r.nextDouble()<Config.mutationRate){
                c.set(i, Config.r.nextDouble()*2-1);
            }
        }
        
        ret.putWeights(c);
        return ret;
    }
    
    //the roulette wheel should converge more quickly if segmented differently, such that there were distinct levels between those that got 3 and those that got 4 since there can be many 3s(trivial) and fewer 4s(perfect) its easy to get 4 and then lose it since most of the wheel is imperfect and a tiny portion is slightly better(perfect)... i suppose this could be fixed with a simple modification to the fitness function... (logarithmic increase in fitness to get a larger portion of the wheel--might be interesting to investigate later)
    
    static double[] makeRouletteWheel(int numAnns, double[] fitness){
        //make roulette wheel -- my solution is written to be easy for me to debug, not necessarily for raw speed...
        double[] proportions = new double[numAnns];
        double[] rw = new double[numAnns];
        double sum=0;
        
        //get sum (size of pie)
        for(int i=0; i<numAnns; i++) sum+=fitness[i];
        
        //get size of each slice to make proportions
        for(int i=0; i<numAnns; i++) proportions[i]=fitness[i]/sum;
        
        //use proportions to make the roulette wheel
        sum=0;//using this as the current index
        for(int i=0; i<numAnns; i++){
            sum+=proportions[i];
            rw[i]=sum;
        }
        return rw;
    }
    
    static int useRouletteWheel(int numAnns, double[] rw){
        double Q = Config.r.nextDouble(); //0-1 valid position on wheel :D
        int i;
        for(i=0; i<numAnns; i++){//if (!Q<rw[numAnns-1]) then it will i++ will exit the loop and i=numAnns, default to use last item...
            if (Q<rw[i]) break;
        }
        
        return i;
        //return 4; //determined by fair dice roll
    }

    private static void EvolveANN() {
       
       //initialize ANNs
       NeuralNet[] ANNs = new NeuralNet[numAnns];
       for(int i=0; i<numAnns; i++){
           ANNs[i] = new NeuralNet();      
       }
       
       //b is the test input
       List<Double> b = new ArrayList<Double>();
       
       //what we'll use as outputted doubles from the ANNs
       List<Double>[] c = new ArrayList[numAnns];
       
     
       //evolve ANN over generations
        for(int generation=0; generation<numGenerations; generation++){
               
        //simple fitness function, test cases for all possible inputs for xor
        double[] fitness = new double[numAnns];
        
        int[] countright = new int[numAnns];
        
        //1,1
        //make list of outputs from input
        b.clear(); b.add(1.0); b.add(1.0);
        for(int i=0; i<numAnns; i++){
           c[i] = ANNs[i].update(b);
           if(c[i].get(0)<0.5){
               fitness[i] += 0.5 - c[i].get(0);
               countright[i] +=1;
           }
        }
        
        //0,1
        b.clear(); b.add(0.0); b.add(1.0);
        for(int i=0; i<numAnns; i++){
           c[i] = ANNs[i].update(b);
           if(c[i].get(0)>0.5){
               fitness[i] += c[i].get(0) - 0.5;
               countright[i] +=1;
           } 
        }
        
        //1,0
        b.clear(); b.add(1.0); b.add(0.0);
        for(int i=0; i<numAnns; i++){
           c[i] = ANNs[i].update(b);
           if(c[i].get(0)>0.5){
               fitness[i] += c[i].get(0) - 0.5;
               countright[i] +=1;
           }
        }
        
        //0,0
        b.clear(); b.add(0.0); b.add(0.0);
        for(int i=0; i<numAnns; i++){
           c[i] = ANNs[i].update(b);
           if(c[i].get(0)<0.5){
               fitness[i] += 0.5 - c[i].get(0);
               countright[i] +=1;
           }
        }
        
        for(int i=0; i<numAnns; i++)fitness[i]+=countright[i];
        
        
        
        
        //print
        System.out.println("Generation "+generation);int best=0;
        for(int besti=0; besti<numAnns; besti++){
           if (fitness[besti]>fitness[best])best=besti;
        }
        
        //human readable fitness for the best of this generation
        double bestf = 0;
        b.clear(); b.add(1.0); b.add(1.0);
           c[best] = ANNs[best].update(b);
           if(c[best].get(0)<0.5)bestf +=1;    

        b.clear(); b.add(0.0); b.add(1.0);
           c[best] = ANNs[best].update(b);
           if(c[best].get(0)>0.5)bestf +=0.1;
        
        b.clear(); b.add(1.0); b.add(0.0);
           c[best] = ANNs[best].update(b);
           if(c[best].get(0)>0.5)bestf +=0.01;
           
        b.clear(); b.add(0.0); b.add(0.0);
           c[best] = ANNs[best].update(b);
           if(c[best].get(0)<0.5)bestf +=0.001; 
        
        System.out.println("best:"+best+" :: "+fitness[best]+" :: "+bestf);
        
        //best of the generation gets right results (heck yes!)
        if(bestf==1.111){
            System.out.println("Winner winner chicken dinner");
            System.out.println(ANNs[best].getWeights());
            System.exit(0);
        }
        
        //for(int i=0; i<numAnns; i++){
        //    System.out.println(c[i]);
        //    System.out.println(fitness[i]);
        //    System.out.println(ANNs[i].getWeights());
        //}
        

        /////////////
        // Genetic //
        /////////////
        
        //roulette wheel selection
        double[] rw;
        rw = makeRouletteWheel(numAnns, fitness);
        
        //making a new list for new anns, and using old for reproduction
        NeuralNet[] oldANNs =ANNs;
        ANNs = new NeuralNet[numAnns];
        
        //make new generation
        for (int i=0; i<numAnns; i++){
            int x, y; //appropriate letters for genetic reproduction :D
            x=useRouletteWheel(numAnns, rw);
            y=useRouletteWheel(numAnns, rw);
            ANNs[i]=Genetic(oldANNs[x],oldANNs[y]);
        }

        
      }

        
        
        
//What we want is to have this ANN do xor for the input {00,01,10,11} to have output {0,1,1,0}
//the fitness givea a {1 when correct and a 0 when incorrect} a 'perfect' would be .1111 indicating 4 correct cases (that case 1 returned 0 and case 2 returned 1...
       
//      double[] fitness = new double[numAnns];
//      
//      //After all generations, print output to see if it learned...
//        b.clear(); b.add(1.0); b.add(1.0);
//        for(int i=0; i<numAnns; i++){
//           c[i] = ANNs[i].update(b);
//           if(c[i].get(0)<0.5)fitness[i]+=1;    
//        }
//        b.clear(); b.add(0.0); b.add(1.0);
//        for(int i=0; i<numAnns; i++){
//           c[i] = ANNs[i].update(b);
//           if(c[i].get(0)>0.5)fitness[i]+=0.1;
//        }
//        b.clear(); b.add(1.0); b.add(0.0);
//        for(int i=0; i<numAnns; i++){
//           c[i] = ANNs[i].update(b);
//           if(c[i].get(0)>0.5)fitness[i]+=0.01;
//        }
//        b.clear(); b.add(0.0); b.add(0.0);
//        for(int i=0; i<numAnns; i++){
//           c[i] = ANNs[i].update(b);
//           if(c[i].get(0)<0.5)fitness[i]+=0.001; 
//        }
        
//print fitness and structure of all ANNs
//       for (int i=0; i<numAnns; i++){
//           System.out.println("ANN["+i+"] tests:"+fitness[i]);
//           ANNs[i].printStructure();
//           System.out.println("");
//           System.out.println("");
//       }
        
    }

    //unique sets is the quantity of given inputs and expected outputs, length of each sub array
    private static NeuralNet BPTrain(NeuralNet ANN, ArrayList<double[]> inputs, ArrayList<double[]> expected_outputs, int uniqueSets) {
        //get num inputs and num outputs from config
        
        
        
        return null;
    }
    
}
