from math import *
import copy
import random
import pickle
class regression_backprop(object):
    """This is an implementation of the back-propagation neural network optimization algorithm.  This is implemented as feed-forward, multilayered, which can have different numbers of nodes in each layer."""
    def __init__(self,node_list,alpha=.1,bias_enabled=True,rand_scale=100,use_tanh=False):
        assert(len(node_list) > 1)
        self.rand_scale=rand_scale
        self.alpha=alpha
        self.bias_enabled=0
        if bias_enabled:
            self.bias_enabled=1
        self.sig=0
        self.dsig=0
        if use_tanh:
            self.sig=tanh
            self.dsig=self.dtanh
        else:
            self.sig=self.sigmoid
            self.dsig=self.dsigmoid

        # Setup weight structure, indexed by layer (0 is connected to the input), node in layer+1, and finally node in layer.
        self.weights=[0.0]*(len(node_list)-1)
        for i in range(len(self.weights)):
            self.weights[i]=[None]*node_list[i+1]
            for j in range(node_list[i+1]):
                self.weights[i][j]=[0.0]*node_list[i]
        # Setup delta structure, indexed by layer(0 is the node layer that is connected to input, not actually the input nodes), then node in the layer
        self.deltas=[None]*(len(node_list)-1)
        for i in range(len(self.deltas)):
            self.deltas[i]=[0.0]*node_list[i+1]
        # Setup output structure, indexed the same as the deltas structure except that layer 0 is the input layer (this has one more list than deltas).  The reason for this is to make the implementation cleaner, as the input is just copied to this first list, and the same code can be used for the first layer as all of the others.
        self.output=[None]*(len(node_list))
        for i in range(len(self.output)):
            self.output[i]=[0.0]*node_list[i]
        
        # Setup bias weights (these are treated as a special case for now)
        self.biases=[None]*(len(node_list)-1)
        for i in range(len(self.biases)):
            self.biases[i]=[0.0]*node_list[i+1]
        self._randomize_weights()
    def _randomize_weights(self):
        for layer in range(len(self.weights)):
            for dest in range(len(self.weights[layer])):
                self.biases[layer][dest]=(.5-random.random())/self.rand_scale
                for src in range(len(self.weights[layer][dest])):
                    self.weights[layer][dest][src]=(.5-random.random())/self.rand_scale

    def _compute_output(self,input):
        assert(len(input)==len(self.weights[0][0]))  # Ensure that the input fits our dimensionality
        self.output[0]=input
        for layer in range(len(self.weights)):
            for dest in range(len(self.weights[layer])):
                temp_tot=sum(map(lambda x,y: x*y,self.output[layer],self.weights[layer][dest]))-self.biases[layer][dest]*self.bias_enabled
                self.output[layer+1][dest]=self.sig(temp_tot)

    def _compute_deltas(self,expected_out):
        assert(len(expected_out)==len(self.weights[-1]))  # Ensure that the output fits our dimensionality
        # Compute output layer delta
        for output in range(len(self.deltas[-1])):
            self.deltas[-1][output]=(expected_out[output]-self.output[-1][output])*self.dsig(self.output[-1][output])
        # Compute hidden layer delta(s) (if any)
        hidden_layers=range(0,len(self.deltas)-1)
        hidden_layers.reverse()
        for layer in hidden_layers:
            for output in range(len(self.deltas[layer])):
                self.deltas[layer][output]=self.dsig(self.output[layer+1][output])*sum(map(lambda x,y: x[output]*y, self.weights[layer+1], self.deltas[layer+1]))

    def _update_weights(self):
        for layer in range(len(self.weights)):
            for dest in range(len(self.weights[layer])):
                self.biases[layer][dest]-=self.alpha*self.deltas[layer][dest]
                for src in range(len(self.weights[layer][dest])):
                    self.weights[layer][dest][src]+=self.alpha*self.deltas[layer][dest]*self.output[layer][src]
    def save(self,file_name):
        f=file(file_name,'w')
        pickle.dump((self.weights,self.biases),f) # TODO Update save/load to use biases!!!
        f.close()
    def load(self,file_name):
        f=file(file_name,'r')
        self.weights,self.biases=pickle.load(f)
        f.close()
   
    @staticmethod
    def dtanh(x):
        return 1-x*x
    @staticmethod
    def sigmoid(x):
        return 1.0/(1.0+exp(-x))
    @staticmethod
    def dsigmoid(x):
        return x*(1-x)
    def train(self,input,expected_output,num_iters=10000):
        assert(len(input)==len(expected_output))
        max_error=0
        avg_error_sum=0
        for i in range(num_iters):
            j=random.randint(0,len(input)-1)
            self._compute_output(input[j])
            self._compute_deltas(expected_output[j])
            self._update_weights()
            #print(self.deltas)
            sample_size=100.0
            if i%sample_size==0 and i>0:
                print('T[%d] AvgError:%f MaxError:%f') % (i,avg_error_sum/sample_size,max_error)
                avg_error_sum=0
                max_error=0
            tmp_ssd=sum(map(lambda x,y:(x-y)**2,expected_output[j],self.output[-1]))
            avg_error_sum+=tmp_ssd
            max_error=max(max_error,tmp_ssd)
            #if num_iters-i<20:
                #print('Expected Output:'+str(expected_output[j]))
                #print('Output:'+str(self.output[-1]))
    def test(self,input):
        self._compute_output(input)
        return self.output[-1]

def make_function_dataset(fun,lower_bound=-1,higher_bound=1,iters=1000):
    input=[]
    output=[]
    for i in range(iters):
        input.append([random.uniform(lower_bound,higher_bound)])
        output.append([fun(input[-1][0])])
    return input,output

xor_prob=[[[0,0],[0,1],[1,0],[1,1]],[[0],[1],[1],[0]]]

def load_all():
    f=file('all_runs.txt','r')
    a=pickle.load(f)
    f.close()
    return a
