import Core.Gadget as Gadget
import Core.Algorithm as Algorithm
import Core.MongoDB as MongoDB
import Core.IO as IO
import pandas as pd
import numpy as np
import pymongo
import os
import matplotlib.pyplot as plt
from datetime import *


def CheckMissing(dataFrame, err = "Missing Data"):
    a = dataFrame.isnull().any()
    for b in a:
        if b == True:
            print(err)
            break


def CalculateIndicator(database,filter,sort):

    # ---
    bmBarSeries = database.findWithFilter("Quote", "000300.SH_Time_86400_Bar", filter, sort)

    # ---Build Complete Dates---
    data = []
    for bar in bmBarSeries:
        entry = []
        entry.append(bar["DateTime"])
        data.append(entry)
    dfBMSeries = pd.DataFrame(data, columns=["DateTime"])
    # print(dfBMSeries)


    # ---Main Loop, Loop Date---
    length = len(dfBMSeries)
    for index in range(maxReferenceBars - 1, length):  # 59~1200

        # ---
        curDateTime = dfBMSeries.iloc[index]["DateTime"]
        print("Loop Date: " + curDateTime)
        curDateTime = Gadget.ParseDateTime(curDateTime)
        curDateTime = Gadget.ToUTCDateTime(curDateTime)

        # ---Stat---
        sma20Count = 0
        sma60Count = 0
        breakout20Count = 0
        breakout60Count = 0

        # ---Figuer out Instrument corresponding Date---
        instruments = Gadget.Find(instrumentList, curDateTime)

        # ---Loop instruments---
        instrumentCount = 0
        for instrument in instruments["Values"]:
            # instrumentCount += 1
            # if instrumentCount > 10:
            #    continue
            symbol = instrument["Symbol"]
            # print("Loop Instruments : " + symbol)
            dfBarSeries = barsCollectionBySymbol.get(symbol)

            # ---给每个Stock创建数据区（Sheet）---
            if dfBarSeries is None:
                # barSeries = database.findWithFilter("Quote", symbol + "_Time_86400_Bar", filter)
                dfBarSeries = IO.LoadBarsAsDataFrame(database, symbol, filter, sort)
                dfBarSeries = pd.merge(dfBMSeries, dfBarSeries, on='DateTime', how='left')
                dfBarSeries["SMA20Value"] = 0.0
                dfBarSeries["SMA20"] = 0.0  # Setting a new column
                dfBarSeries["SMA60"] = 0.0
                dfBarSeries["Breakout20"] = 0.0
                dfBarSeries["Breakout60"] = 0.0

                # ---Checking Missing---
                # dfMissing = dfBarSeries[dfBarSeries.isnull().values == True]
                # if dfMissing.empty != True:
                #    print(dfMissing)
                barsCollectionBySymbol[symbol] = dfBarSeries

            # ---Locate index---
            # barIndex = Gadget.FindIndex(barSeries, curDateTime)

            # ---Calc Indicator---
            # ---Moving Average---
            close = dfBarSeries.at[index,"BClose"]
            # [2:5] Get index 2,3,4(Not include 5), total 3, use "loc[2:5]" include 2,3,4,5
            closes = dfBarSeries.loc[index - 20 + 1: index]["BClose"]
            closes2 = closes.dropna()
            if (len(closes) != len(closes2)):
                print(symbol + " 20 Missing Closes to Compute")

            sma20 = np.mean(closes)
            dfBarSeries.at[index, "SMA20Value"] = sma20
            if close > sma20:
                sma20Count += 1
                dfBarSeries.at[index, "SMA20"] = 1

            #
            closes = dfBarSeries.loc[index - 60 + 1: index]["BClose"]
            closes2 = closes.dropna()
            if (len(closes) != len(closes2)):
                print(symbol + " 60 Missing Closes to Compute")

            sma60 = np.mean(closes)
            if close > sma60:
                sma60Count += 1
                dfBarSeries.at[index, "SMA60"] = 1

            # New Hi
            closes = dfBarSeries.loc[index - 20 + 1: index - 1]["BClose"]
            hi20 = np.max(closes)
            if close > hi20:
                # if hi20 < close:
                breakout20Count += 1
                dfBarSeries.at[index, "Breakout20"] = 1

            closes = dfBarSeries.loc[index - 60 + 1: index - 1]["BClose"]
            hi60 = np.max(closes)
            if close > hi60:
                breakout60Count += 1
                dfBarSeries.at[index, "Breakout60"] = 1

        kkwood = 0

    # ---Ouput---
    for symbol, dataFrame in barsCollectionBySymbol.items():
        dataFrame.to_csv("c://Data//QualityIndex//" + symbol + ".csv")


def CalculateAggregateIndicator(database,filter,sort):

    #---Benchmark Data---
    dfBMSeries = LoadBarsAsDataFrame("000300.SH", filter, sort)
    dfBMSeries["SMA20"] = 0
    dfBMSeries["SMA60"] = 0
    dfBMSeries["Breakout20"] = 0
    dfBMSeries["Breakout60"] = 0
    dfBMSeries["Forward20"] = 0 # Return% Next Month
    kkwood = 0

    # 600631.SH
    # 000527.SZ
    # ---Read Data---
    pathFolderName = "c://Data//QualityIndex//"
    files = os.listdir(pathFolderName)
    count = 0
    for filename in files:
        symbol = filename[:9]
        count += 1
        print("Load " + str(count))
        dfBars = pd.read_csv(pathFolderName + filename)
        #barsCollectionBySymbol[symbol] = dfBars

    # ---Main Loop, Loop Date---
    length = len(dfBMSeries)
    print(length)
    for index in range(maxReferenceBars - 1, length - 20):
        # ---
        curDateTime = dfBMSeries.iloc[index]["DateTime"]
        print("Loop Date: " + curDateTime)
        curDateTime = Gadget.ParseDateTime(curDateTime)
        curDateTime = Gadget.ToUTCDateTime(curDateTime)

        # ---Stat---
        sma20Count = 0
        sma60Count = 0
        breakout20Count = 0
        breakout60Count = 0

        # ---Figuer out Instrument corresponding Date---
        instruments = Gadget.Find(instrumentList, curDateTime)

        # ---Loop instruments---
        instrumentCount = 0
        for instrument in instruments["Values"]:
            # instrumentCount += 1
            # if instrumentCount > 10:
            #    continue
            symbol = instrument["Symbol"]
            # print("Loop Instruments : " + symbol)
            dfBarSeries = barsCollectionBySymbol.get(symbol)
            if dfBarSeries == None:
                continue

            # ---Aggregate Indicatiora 聚合数据---#
            if dfBarSeries.at[index, "SMA20"] == 1:
                sma20Count += 1
            if dfBarSeries.at[index, "SMA60"] == 1:
                sma60Count += 1

            breakout20Within5Days = dfBarSeries.loc[index - 5 + 1: index]["Breakout20"]
            if np.sum(breakout20Within5Days) > 0:
                breakout20Count += 1

            breakout60Within5Days = dfBarSeries.loc[index - 5 + 1: index]["Breakout60"]
            if np.sum(breakout60Within5Days) > 0:
                breakout60Count += 1

        #---Aggregate Indicators---
        dfBMSeries.at[index, "SMA20"] = sma20Count
        dfBMSeries.at[index, "SMA60"] = sma20Count
        dfBMSeries.at[index, "Breakout20"] = breakout20Count
        dfBMSeries.at[index, "Breakout60"] = breakout60Count

        #---Forecating Return---
        returnForward20 = dfBMSeries.at[index + 20, "BClose"] / dfBMSeries.at[index, "BClose"] - 1
        if returnForward20 > 0.06:
            dfBMSeries.at[index, "Forward20"] = 1

    # ---Ouput---
    dfBMSeries.to_csv("c://Data//QualityIndex//BM.csv")


def ReadData():
    dfData = pd.read_csv("c://Data//QualityIndex//BM.csv")
    length = len(dfData)

    xTrain = []
    yTrain = []
    xValid = []
    yValid = []
    for index in range(length):
        data = []
        data.append(dfData.at[index, "SMA20"])
        data.append(dfData.at[index, "SMA60"])
        data.append(dfData.at[index, "Breakout20"])
        data.append(dfData.at[index, "Breakout60"])
        label = [dfData.at[index, "Forward20"]]
        if index <= 850:
            xTrain.append(data)
            yTrain.append(label[0])
        else:
            xValid.append(data)
            yValid.append(label[0])

    xTrain = np.array(xTrain)
    yTrain = np.array(yTrain)
    xValid = np.array(xValid)
    yValid = np.array(yValid)

    return xTrain, yTrain, xValid, yValid


def Keras_Model():

    from keras.models import Sequential
    from keras import optimizers
    from keras import regularizers
    from keras import metrics
    from keras.layers import Dense, Dropout

    xTrain, yTrain, xValid, yValid = ReadData()

    #--Hyper Parameters---
    learningRate = 0.01
    lbd = 0.01

    #
    model = Sequential()
    model.add(Dense(8, input_dim=4, activation='relu', kernel_regularizer = regularizers.l2(lbd)))
    #model.add(Dropout(0.5))
    model.add(Dense(8, activation='relu', kernel_regularizer = regularizers.l2(lbd)))
    #model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid', kernel_regularizer = regularizers.l2(lbd)))

    #
    sgd = optimizers.SGD(lr=learningRate, momentum=0.0, decay=0.0, nesterov=False)
    model.compile(loss = 'mean_squared_error', optimizer = sgd, metrics = ['accuracy'])

    #
    #xTrain = np.random.random((1000, 4))
    #yTrain = np.random.randint(2, size=(1000, 1))

    model.fit(xTrain, yTrain,epochs=20,batch_size=128)
    score = model.evaluate(xValid, yValid, batch_size=128)
    print(score)


def TF_Model():

    xTrain, yTrain, xValid, yValid = ReadData()

    import CygnusExample.TF_NeuralNetwork as TF_Network
    import tensorflow as tf

    #---Parameters---
    input_size = 4
    hidden_units = 50
    num_classes = 2
    learning_rate = 0.001
    max_steps = 2000
    batch_size = 100
    reg_constant = 0.1

    #---Place Holder---
    # define placeholder for inputs to network
    xs = tf.placeholder(tf.float32, [None, input_size])  # 3 * 32 * 32 = 3072 dim Input
    #yVectors = tf.placeholder(tf.float32, [None, num_classes])  # 10 dim Output
    yLabels = tf.placeholder(tf.int64, shape=[None], name='image-labels')


    #---Buold Network
    layer1 = TF_Network.Add_layer("Layer1", xs, input_size, hidden_units,
                       activation_function=tf.nn.relu,
                       reg_constant=reg_constant)  # add hidden layer 输入值是 xs  in 3072，在隐藏层有out 50 个神经元
    logits = TF_Network.Add_layer("Layer2", layer1, hidden_units, num_classes,
                       activation_function=None,
                       reg_constant=reg_constant)  # add output layer 输入值是隐藏层 l1， in 50 在预测层输出 out 10 个结果

    # --- softmat Regression ---
    prediction = tf.nn.softmax(logits)  # matrix multiply

    # --- Operation for the loss function ---
    loss = TF_Network.Loss(logits, yLabels)
    #loss = TF_Network.Loss(logits, yVectors, sparse=False)

    # --- Operation for the training step ---
    train_step = TF_Network.Training(loss, learning_rate)

    # --- Operation calculating the accuracy of our predictions ---
    accuracy = TF_Network.Evaluation(logits, yLabels)
    # accuracy = TF_Network.Evaluation(logits, yVectors, sparse=False)

    # important step 对所有变量进行初始化
    init = tf.initialize_all_variables()

    # 激活结构
    sess = tf.Session()
    # 上面定义的都没有运算，直到 sess.run 才会开始运算
    sess.optimize()

    loss_history = []
    num_train = len(xTrain)
    # 迭代 1000 次学习，sess.run optimizer
    for step in range(max_steps):
        idx = np.random.choice(range(num_train), batch_size)
        x_batch = xTrain[idx, :]
        y_labels_batch = yTrain[idx]
        #y_vector_batch = yTrain[idx]

        feed_dict = {
            xs: x_batch,
            yLabels: y_labels_batch
            #yVectors: y_vector_batch
        }

        sess.optimize()
        # sess.run( train_step , feed_dict=feed_dict)
        # sess.run(train_step, feed_dict={xs: x_batch, ys: y_batch})

        if step % 100 == 0:
            train_accuracy = sess.optimize()
            train_loss = sess.optimize()
            print('Step {:d}, training loss {:g}, accuracy {:g} '.format(step, train_loss, train_accuracy))
            loss_history.append(train_loss)

    # Plot the loss function and train / validation accuracies
    plt.subplot(2, 1, 1)
    plt.plot(loss_history)
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')
    plt.show()

    # ---Evaluate---
    # test_accuracy = sess.run(accuracy, feed_dict={
    #    xs: x_data,
    #    labels_placeholder: labels_data})

    test_accuracy = sess.optimize()

    print('Test accuracy {:g}'.format(test_accuracy))


#---2017-7-29 v2.0---
print("Chen's Quanlity Index")

# ---Connecting DataBase---
database = MongoDB.MongoDB("192.168.1.100","27017")
sort = [("StdDateTime",pymongo.ASCENDING)]

# ---instrument list---
instrumentList = database.findWithFilter("Instruments", "InstrumentList", {"Symbol": "000300.SH"}, sort)

# ---Hyper Parameters---
datetime1 = datetime(2010, 1, 1, 15, 0, 0)
datetime2 = datetime(2015, 1, 1, 0, 0, 0)
datetime1 = Gadget.ToUTCDateTime(datetime1)
datetime2 = Gadget.ToUTCDateTime(datetime2)
filter = {}
filter["StdDateTime"] = {"$gt": datetime1, "$lt": datetime2}
maxReferenceBars = 60
extremeToleranceDays = 5


# ---Bara Cache---
barsCollectionBySymbol = {}

# --- Calc indicators seperately---
#CalculateIndicator(database,filter,sort)

#--- Calc indicator Aggregately---
#CalculateAggregateIndicator(database,filter,sort)


Keras_Model()
#TF_Model()

kkwood = 1