import math
import time
import random

### Data Dependent
numInputs = 3
numPatterns = 4

###
numHidden = 4
numEpochs = 500
LR_IH = 0.7
LR_HO = 0.07

###
patNum = 0
errThisPat = 0.0
outPred = 0.0
RMSerror = 0.0

### Hidden Neurons 
hiddenVal = [1.0 for x in xrange(numHidden)]

### WEIGHTS
weightsIH = [[1.0 for x in xrange(numHidden)] for x in xrange(numInputs)] 
weightsHO = [1.0 for x in xrange(numHidden)]

### THE DATA
trainInputs = [[1.0 for x in xrange(numInputs)] for x in xrange(numPatterns)] 
trainOutput = [1.0 for x in xrange(numPatterns)]

### RANDOM
RANDMAX = 1


###
def calcNet():
	global errThisPat
	global outPred

	for i in range(numHidden):
		hiddenVal[i] = 0.0

		for j in range(numInputs):
			hiddenVal[i] = hiddenVal[i] + (trainInputs[patNum][j] * weightsIH[j][i])
	
	hiddenVal[i] = math.tanh(hiddenVal[i]) ### Why do we get the tanh of the value?

	outPred = 0.0
	for i in range(numHidden):
		outPred = outPred + hiddenVal[i] * weightsHO[i]

	errThisPat = outPred - trainOutput[patNum]



def WeightChangesHO():
	for k in range(numHidden):
		weightsChange = LR_HO * errThisPat * hiddenVal[k]
		weightsHO[k] = weightsHO[k] - weightsChange

		###
		if weightsHO[k] < -5.0:
			weightsHO[k] = -5.0

		if weightsHO[k] > 5.0:
			weightsHO[k] = 5.0


###
def WeightChangesIH():
	for i in range(numHidden):
		for k in range(numInputs):
			x =  1 - (hiddenVal[i] * hiddenVal[i])
			x = x * weightsHO[i] * errThisPat * LR_IH
			x = x * trainInputs[patNum][k]

			weightsChange = x
			weightsIH[k][i] = weightsIH[k][i] - weightsChange
			

def initWeights():
	for j in range(numHidden):
		weightsHO[j] = (random.random() - 0.5)/2

		for i in range(numInputs):
			weightsIH[i][j] = (random.random() - 0.5)/5
			print ("Weight = " + str(weightsIH[i][j]))
	

def displayResults():
	for i in range(numPatterns):
		patNum = i
		calcNet()
		print("pat = "+ str(patNum+1) +" actual = "+str(trainOutput[patNum])+" neural model = "+str(outPred))

def calcOverallError():
	global patNum
	global RMSerror

	RMSerror = 0.0
	for i in range(numPatterns):
		patNum = i
		calcNet()
		RMSerror = RMSerror + (errThisPat * errThisPat)
	RMSerror = RMSerror/numPatterns
	RMSerror = math.sqrt(RMSerror)


def main():
	global patNum

	initWeights()
	initData()

	for j in range(numEpochs):
		for i in range(numPatterns):
			patNum = int((random.random()*numPatterns)-0.001)#int(random.randrange(numPatterns))#%numPatterns)
			calcNet()

			WeightChangesHO()
			WeightChangesIH()

		calcOverallError()
		#print ("epoch = "+str(j)+" RMS Error = " + str(RMSerror))

	### Finished Training
	displayResults()


def initData():
    print ("Initalizing Data: ")

    """
    // the data here is the XOR data
    // it has been rescaled to the range
    // [-1][1]
    // an extra input valued 1 is also added
    // to act as the bias
    // the output must lie in the range -1 to 1
    """

    trainInputs[0][0]  = 1
    trainInputs[0][1]  = -1
    trainInputs[0][2]  = 1#   //bias
    trainOutput[0] = 1

    trainInputs[1][0]  = -1
    trainInputs[1][1]  = 1
    trainInputs[1][2]  = 1#       //bias
    trainOutput[1] = 1

    trainInputs[2][0]  = 1
    trainInputs[2][1]  = 1
    trainInputs[2][2]  = 1#        //bias
    trainOutput[2] = -1

    trainInputs[3][0]  = -1
    trainInputs[3][1]  = -1
    trainInputs[3][2]  = 1#     //bias
    trainOutput[3] = -1



###
main()