from math import *
import copy
import random
import pickle
class regression_backprop(object):
    """This is an implementation of the back-propagation neural network optimization algorithm.  This is implemented as feed-forward, multilayered, which can have different numbers of nodes in each layer."""
    def __init__(self,node_list,alpha=.1):
        assert(len(node_list) > 1)
        self.alpha=alpha
        # Setup weight structure, indexed by layer (0 is connected to the input), node in layer+1, and finally node in layer.
        self.weights=[0.0]*(len(node_list)-1)
        for i in range(len(self.weights)):
            self.weights[i]=[None]*node_list[i+1]
            for j in range(node_list[i+1]):
                self.weights[i][j]=[0.0]*node_list[i]
        self._randomize_weights()
        # Setup delta structure, indexed by layer(0 is the node layer that is connected to input, not actually the input nodes), then node in the layer
        self.deltas=[None]*(len(node_list)-1)
        for i in range(len(self.deltas)):
            self.deltas[i]=[0.0]*node_list[i+1]
        # Setup output structure, indexed the same as the deltas structure except that layer 0 is the input layer (this has one more list than deltas).  The reason for this is to make the implementation cleaner, as the input is just copied to this first list, and the same code can be used for the first layer as all of the others.
        self.output=[None]*(len(node_list))
        for i in range(len(self.output)):
            self.output[i]=[0.0]*node_list[i]

    def _randomize_weights(self):
        for layer in range(len(self.weights)):
            for dest in range(len(self.weights[layer])):
                for src in range(len(self.weights[layer][dest])):
                    self.weights[layer][dest][src]=random.random()/100 # TODO make this tunable

    def _compute_output(self,input):
        assert(len(input)==len(self.weights[0][0]))  # Ensure that the input fits our dimensionality
        self.output[0]=input
        for layer in range(len(self.weights)):
            for output in range(len(self.weights[layer])):
                self.output[layer+1][output]=self.sig(sum(map(lambda x,y: x*y,self.output[layer],self.weights[layer][output])))

    def _compute_deltas(self,expected_out):
        assert(len(expected_out)==len(self.weights[-1]))  # Ensure that the output fits our dimensionality
        # Compute output layer delta
        for output in range(len(self.deltas[-1])):
            self.deltas[-1][output]=(expected_out[output]-self.output[-1][output])*self.dsig(self.output[-1][output])
        # Compute hidden layer delta(s) (if any)
        hidden_layers=range(0,len(self.deltas)-1)
        hidden_layers.reverse()
        for layer in hidden_layers:
            for output in range(len(self.deltas[layer])):
                self.deltas[layer][output]=self.dsig(self.output[layer+1][output])*sum(map(lambda x,y: x[output]*y, self.weights[layer+1], self.deltas[layer+1]))

    def _update_weights(self):
        for layer in range(len(self.weights)):
            for dest in range(len(self.weights[layer])):
                for src in range(len(self.weights[layer][dest])):
                    self.weights[layer][dest][src]+=self.alpha*self.deltas[layer][dest]*self.output[layer][src]
    def save(self,file_name):
        f=file(file_name,'w')
        pickle.dump(self.weights,f)
        f.close()
    def load(self,file_name):
        f=file(file_name,'r')
        self.weights=pickle.load(f)
        f.close()
    @staticmethod
    def sig(x):
        return tanh(x)
        #return 1/(1+exp(-x*a))
    @staticmethod
    def dsig(sig_out):
        return 1-sig_out*sig_out
        #return sig_out*(1-sig_out)

    def train(self,input,expected_output,num_iters=10000):
        assert(len(input)==len(expected_output))
        max_error=0
        avg_error_sum=0
        for i in range(num_iters*len(input)):
            j=random.randint(0,len(input)-1)
            self._compute_output(input[j])
            self._compute_deltas(expected_output[j])
            self._update_weights()
            sample_size=10
            if i%sample_size==0 and i>0:
                print('T[%d] AvgError:%f MaxError:%f') % (i,avg_error_sum/sample_size,max_error)
                avg_error_sum=0
                max_error=0
            tmp_ssd=sum(map(lambda x,y:(x-y)**2,expected_output[j],self.output[-1]))
            avg_error_sum+=tmp_ssd
            max_error=max(max_error,tmp_ssd)
            if i>((num_iters-1)*len(input)):
                print('Expected Output:'+str(expected_output[j]))
                print('Output:'+str(self.output[-1]))
    def test(self,input):
        self._compute_output(input)
        return self.output[-1]

def make_function_dataset(fun,lower_bound=-1,higher_bound=1,iters=1000):
    input=[]
    output=[]
    for i in range(iters):
        input.append([random.uniform(lower_bound,higher_bound)])
        output.append([fun(input[-1][0])])
    return input,output

xor_prob=[[[0,0],[0,1],[1,0],[1,1]],[[0],[1],[1],[0]]]

