###
import math
import random

### Get prices and info
import ystockquote

stocks = ["TGD", "AAPL"]
start_date = "2012.11.20"
end_date = "2012.11.28"

for stock in stocks:        
    price = ystockquote.get_price(stock)
    volume = ystockquote.get_volume(stock)
    change = ystockquote.get_change(stock)
    earnings = ystockquote.get_earnings_per_share(stock)
    avg_200 = ystockquote.get_200day_moving_avg(stock)
    h_prices = ystockquote.get_historical_prices(stock, start_date, end_date)

    print (stock + ":  Price: " + str(price)+"  Volume: "+str(volume) + "  Change: "+str(change) + "  Earnings: "+earnings + "  200 Day Avg: "+avg_200)
    print ("     "+str(h_prices))

print (" ")



#sigmoid function.  WHAT THE FUCK DOES THIS DO?
def sigmoid(x):
    return math.tanh(x)

def dsigmoid(y): ## http://www.math10.com/en/algebra/hyperbolic-functions/hyperbolic-functions.html
    return 1.0 - y**2

def makeMatrix(Y, X, fill=0.0):
    m = []
    for i in range(Y):
        m.append([fill]*X)
    return m


###
class Nauron:
    def __init__(self):
        pass

### Nural Net
class NN:
    def __init__(self,numinput,numhidden,numoutput):
        
        self.numinput=numinput+1 #+1 for bias input node
        self.numhidden=numhidden
        self.numoutput=numoutput
        
        self.inputact=[1.0]*self.numinput
        self.hiddenact=[1.0]*self.numhidden
        self.outputact=[1.0]*self.numoutput
        
        self.inputweights=makeMatrix(self.numinput,self.numhidden)
        self.outpweights=makeMatrix(self.numhidden,self.numoutput)
        
        #randomize weights
        for i in range(self.numinput):
            for j in range(self.numhidden):
                self.inputweights[i][j] = random.uniform(-0.2, 0.2)
        for j in range(self.numhidden):
            for k in range(self.numoutput):
                self.outpweights[j][k] = random.uniform(-2.0, 2.0)
        
        self.inputchange = makeMatrix(self.numinput, self.numhidden)
        self.outputchange = makeMatrix(self.numhidden, self.numoutput)
        #TODO:Random fill matrix of weights
    
    def update(self,inputs):
        """Update network"""
        
        if len(inputs) != self.numinput-1:
            raise ValueError('Wrong number of inputs, should have %i inputs.' % self.numinput)
        
        #ACTIVATE ALL NEURONS INSIDE A NETWORK
        
        #Activate input layers neurons (-1 ignore bias node)
        for i in range(self.numinput-1):
            self.inputact[i] = inputs[i]
        
        #Activate hidden layers neurons
        for h in range(self.numhidden):
            sum = 0.0
            for i in range(self.numinput):
                sum = sum + self.inputact[i] * self.inputweights[i][h]
            self.hiddenact[h] = sigmoid(sum)
        
        #Activate output layers neurons
        for o in range(self.numoutput):
            sum = 0.0
            for h in range(self.numhidden):
                sum = sum + self.hiddenact[h] * self.outpweights[h][o]
            self.outputact[o] = sigmoid(sum)
        
        return self.outputact[:]
    
    def backPropagate(self, targets, learningrate, momentum):
        """Back Propagate """
        
        if len(targets) != self.numoutput:
            raise ValueError('FUCK!  Wrong number of target values.')

        # calculate error for output neurons
        output_deltas = [0.0] * self.numoutput
        for k in range(self.numoutput):
            error = targets[k]-self.outputact[k]
            output_deltas[k] = dsigmoid(self.outputact[k]) * error

        # calculate error for hidden neurons
        hidden_deltas = [0.0] * self.numhidden
        for j in range(self.numhidden):
            error = 0.0
            for k in range(self.numoutput):
                error = error + output_deltas[k]*self.outpweights[j][k]
            hidden_deltas[j] = dsigmoid(self.hiddenact[j]) * error

        # update output weights
        for j in range(self.numhidden):
            for k in range(self.numoutput):
                change = output_deltas[k]*self.hiddenact[j]
                self.outpweights[j][k] += learningrate*change + momentum*self.outputchange[j][k]
                self.outputchange[j][k] = change

        # update input weights
        for i in range(self.numinput):
            for j in range(self.numhidden):
                change = hidden_deltas[j]*self.inputact[i]
                self.inputweights[i][j] += learningrate*change + momentum*self.inputchange[i][j]
                self.inputchange[i][j] = change

        # calculate error
        error = 0.0
        for k in range(len(targets)):
            error = error + 0.5*(targets[k]-self.outputact[k])**2
        return error
    
    def train(self, patterns, iterations=1000, learningrate=0.5, momentum=0.1):
        """Train network a patterns"""
        
        for i in range(iterations):
            error = 0.0
            
            for p in patterns:
                inputs = p[0]
                targets = p[1]
                self.update(inputs)
                error = error + self.backPropagate(targets, learningrate, momentum)
            #if i % 100 == 0:
            #    print('error %-.5f' % error)


### Test it bitchs
def test():
    # M, T, W, Th, F, SP, OS, SV, EPS

    a_training_patterns = []

    ### AUTOMATED TEACHING:
    for stock in stocks:  
        #print (ystockquote.get_price(stock))      
        price = float(ystockquote.get_price(stock))
        volume = float(ystockquote.get_volume(stock))
        change = float(ystockquote.get_change(stock))
        earnings = float(ystockquote.get_earnings_per_share(stock))
        avg_200 = float(ystockquote.get_200day_moving_avg(stock))

        #a_training_patterns.append([[volume, change, earnings, avg_200],[price]])
        h_prices = ystockquote.get_historical_prices(stock, start_date, end_date)

        for hp in h_prices:
            print ("Inputing Pattern:  " + str(hp))

            l = []
            result = []

            if not hp[0] == "Date":
                for node in hp:

                    index = hp.index(node)

                    if index != 4:
                        try:
                            l.append(float(node))
                        except: pass
                    else:
                        result.append(float(node))

            ### Remove empty entries
            if len(l):
                a_training_patterns.append([l,result])


    print (a_training_patterns)



    # Teach network with pattern.  Normalized values
    training_patterns = [
        [[0.1593851,0.24,0.59], [0.249]],
        [[0.4291631,0.24,0.59], [0.24]],
        [[0.2740925,0.235,0.59], [0.24]],
        [[0.10396012,0.470,0.59], [0.329]]
    ]

    # Test it with a new pattern
    dream_patterns = [
        [[3.26, 3.34, 3.22, 3.28, 122000, 3.2]],
    ]

    # Build network with (input nodes, hidden nodes, output nodes)
    network = NN(6, 6, 1)
    
    # Training
    network.train(a_training_patterns)

    # Testing
    for pat in dream_patterns:
        print ("Price Prediction:  " + str(network.update(pat[0])[0]))


### RUN
if __name__ == '__main__':
    test()


"""
TODO:
* Use Back-propagation - FINISHED
* Forward pass to accept float instead of integers
* Add market information
* Tweak wieghted sums
"""

"""
    # Teach network with pattern
    training_patterns = [
        [[0,0,0], [0.1]],
        [[1,0,1], [0.5]],
        [[0,1,1], [0.5]],
        [[1,1,1], [1.0]],
    ] """