""" Multilayer Feedforward Neural Networks

This module define network structure and
the learning method

"""

from math import *
import random
import math

def test(cases, network):
    correction = 0.0
    for case in cases:
        x = case[:-1]
        # Propagate the inputs forward to compute the outputs
        for i in range(len(network.inputs[0])):
            network.inputs[0][i] = (x[i] - 8) / 8
            network.outputs[0][i] = (x[i] - 8) / 8
        for l in range(1, len(network.inputs)):
            for j in range(len(network.inputs[l])):
                sum = 0
                for i in range(len(network.inputs[l-1])):
                    sum += network.weights[l-1][i][j] * network.outputs[l-1][i]
                network.inputs[l][j] = sum
                network.outputs[l][j] = g(sum)
        o = network.outputs[-1].index(max(network.outputs[-1]))
        e = case[-1] - o
        if e == 0:
            correction += 1
    print ("success rate: ", correction/len(cases))


def back_prop_learning(examples, network, alpha = 0.78):
    # randomize weights
    L = len(network.weights)
    for l in range(L):
        I = len(network.weights[l])
        for i in range(I):
            J = len(network.weights[l][i])
            for j in range(J):
                 network.weights[l][i][j] = random.uniform(\
                            -2.4/len(network.inputs[l]),\
                            2.4/len(network.inputs[l]))
                 #network.weights[l][i][j] = random.uniform(-1, 1)
    # training
    for sample in examples:
        x = sample[:-1]
        y = [0.1 for _ in network.outputs[-1]]
        y[sample[-1]] =  0.90
        # Propagate the inputs forward to compute the outputs
        # inputs[0] is inputs for input layer
        for i in range(len(network.inputs[0])):
            network.inputs[0][i] = (x[i] - 8) / 8
            network.outputs[0][i] = (x[i] - 8) / 8
        for l in range(1, len(network.inputs)):
            for j in range(len(network.inputs[l])):
                sum = 0
                for i in range(len(network.inputs[l-1])):
                    sum += network.weights[l-1][i][j] * network.outputs[l-1][i]
                network.inputs[l][j] = sum
                network.outputs[l][j] = g(sum)
        # Propagate deltas backward from output layer to input layer
        # output[-1] is outputs for output layer
        for j in range(len(network.outputs[-1])):
            network.deltas[-1][j] = g_d(network.inputs[-1][j]) * \
                    (y[j] - network.outputs[-1][j])
        # from layer[L-1-1] to layer[0]
        for l in range(len(network.inputs) - 1)[::-1]:
            for i in range(len(network.inputs[l])):
                sum = 0
                for j in range(len(network.inputs[l+1])):
                    sum += network.weights[l][i][j] * network.deltas[l+1][j]
                network.deltas[l][i] = g_d(network.inputs[l][i]) * sum
        # Update every weight in network using deltas
        for l in range(len(network.weights)):
            for i in range(len(network.weights[l])):
                for j in range(len(network.weights[l][i])):
                    network.weights[l][i][j] += \
                            alpha * \
                            network.outputs[l][i] * \
                            network.deltas[l+1][j]
        # print(euclidian_distance(y, network.outputs[-1]))
    return network

def euclidian_distance(u, v):
    distance = 0.0
    for a, b in zip(u, v):
        distance += pow((a - b), 2)
    distance = pow(distance, 0.5)
    return distance

def g(v):
    return sigmoid(v)

def g_d(v):
    return sigmoid_d(v)

def sigmoid(v, a = 1):
    return (1 / (1 + exp(-a * v)))
    #return math.tanh(float(v))

def sigmoid_d(v, a = 1):
    return ((sigmoid(v, a)*(1-sigmoid(v, a))))
    #return 1.0 - float(v*v)
    
def linear(v):
    if v >=1:
        return 1
    if v <= -1:
        return -1
    else:
        return v

def linear_d(v):
    if v <= -1 or v >= 1:
        return -1
    else:
        return 0

class Network:
    def __init__(self, distribution):
        # inputs/ouputs/deltas = [[0 * di] * number of layers]
        self.inputs  = [[0 for _ in range(d)] for d in distribution]
        self.outputs = [[0 for _ in range(d)] for d in distribution]
        self.deltas  = [[0 for _ in range(d)] for d in distribution]
        # weights = [[[0 * dj] * di] * layers]
        self.weights = [[[0 for _ in range(dj)] for _ in range(di)] \
                for di,dj in zip(distribution[:-1], distribution[1:])]
