import Core.Gadget as Gadget
import datetime
import random
import math
import numpy as np
import pandas as pd
import copy
import matplotlib.pyplot as plt
import Core.DataSeries as DataSeries
from sklearn.model_selection import train_test_split, learning_curve, validation_curve, ShuffleSplit, GridSearchCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import make_scorer
from sklearn import metrics
from sklearn.externals import joblib
import Analysis.General as General
import Analysis.PreProcess
import pickle
from collections import Counter
import Factors.FactorContent


def RandomInstruments(database, datetime1, num=100, seed=0):

    #
    totalInstruments = Gadget.FindListedInstrument(database, datetime1)
    count = len(totalInstruments)

    #
    random.seed(seed)
    l = list(range(0, count))
    r = random.sample(l, num)

    #
    instruments = []
    for i in r:
        instruments.append(totalInstruments[i])

    #
    return instruments


def Performance_Metric(y_true, y_pred):

    acc = metrics.accuracy_score(y_true, y_pred)
    # accuracy_score(y_true, y_pred, normalize=False) # 类似海明距离，每个类别求准确后，再求微平均

    precision = metrics.precision_score(y_true, y_pred)
    a1 = metrics.precision_score(y_true, y_pred, average='micro')  # 微平均，精确率
    a2 = metrics.precision_score(y_true, y_pred, average='macro')  # 宏平均，精确率
    recall = metrics.recall_score(y_true, y_pred)
    f1 = metrics.f1_score(y_true, y_pred)

    #
    print("Acc", acc, "Precision", precision, "Recall", recall, "F1", f1)

    #
    confuse = metrics.confusion_matrix(y_true, y_pred)
    print("        ", "Pred F", "Pred T")
    print("Actual F", confuse[0][0], confuse[0][1], confuse[0][0] + confuse[0][1])
    print("Actual T", confuse[1][0], confuse[1][1], confuse[1][0] + confuse[1][1])
    print("   Total", confuse[0][0] + confuse[1][0], confuse[0][1] + confuse[1][1])
    #
    total = confuse[0][0] + confuse[1][0] + confuse[0][1] + confuse[1][1]
    print("True", (confuse[1][0] + confuse[1][1]) / total, "False",  (confuse[0][0] + confuse[0][1]) / total )



#y_actual =  [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
#y_predict = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1]
# Performance_Metric(y_actual, y_predict)
k = 0


def SymbolsToInstruments(database, symbols):

    filter = {}
    filter["$or"] = []
    for symbol in symbols:
        filter["$or"].append({"Symbol": symbol})

    #
    instruments = database.Find("Instruments", "Stock", filter)
    return instruments


def FindPatialFactorValues(database, instruments, factor, datetime0, datetime1):
    filter = {}
    # filter["StdDateTime"] = {"$lte": datetime1}
    gte = {"StdDateTime": {"$gte": datetime0}}
    lte = {"StdDateTime": {"$lte": datetime1}}
    filter["$and"] = [gte, lte]

    GenarateMongoCondition(instruments, filter)
    factorValues = database.Find("Factor", factor, filter, {"StdDateTime": 1})
    return factorValues


def GenarateMongoCondition(instruments, filter, logic="$or"):
    filter[logic] = []
    for instrument in instruments:
        symbol = instrument["Symbol"]
        filter[logic].append({"Symbol": symbol})


def InstrumentsAvgAsBenchmark(params):

    database = params["Database"]
    datetime1 = params["DateTime1"]
    datatime2 = params["DateTime2"]
    instruments = params["Instruments"]

    #
    filter = {}
    gte = {"StdDateTime": {"$gte": datetime1}}
    lte = {"StdDateTime": {"$lte": datatime2}}
    filter["$and"] = [gte, lte]

    #
    filter["$or"] = []
    for instrument in instruments:
        symbol = instrument["Symbol"]
        filter["$or"].append({"Symbol":symbol})

    #
    returns = database.Find("Factor", "MonthlyReturn", filter, {"StdDateTime": 1})

    # ---Group by Symbols---
    print("Format Returns by Symbol", datetime.datetime.now())
    returnsBySymbol = {}
    for r in returns:
        symbol = r["Symbol"]
        stdDatetime = r["StdDateTime"]
        value = r["Value"]
        #if symbol not in validSymbols:
        #    continue
        if symbol not in returnsBySymbol.keys():
            returnsBySymbol[symbol] = []
        #
        returnsBySymbol[symbol].append([stdDatetime, value])

    #
    maxLength = 0
    maxLengthSymbol = None
    # ---找到最长那个序列，以之为基础---
    for symbol, returns in returnsBySymbol.items():
        if len(returns) > maxLength:
            maxLength = len(returns)
            maxLengthSymbol = symbol
    #
    df = pd.DataFrame(returnsBySymbol[maxLengthSymbol], columns=["StdDateTime", maxLengthSymbol])

    #
    for symbol, returns in returnsBySymbol.items():
        # don't duplicated
        if symbol == maxLengthSymbol:
            continue

        # Create Dataframe
        tempDf = pd.DataFrame(returns, columns=["StdDateTime", symbol])
        df = pd.merge(df, tempDf, on='StdDateTime', how='left')
    #
    return df


def SymbolAsBenchmark(params):
    #
    database = params["Database"]
    datetime1 = params["DateTime1"]
    datetime2 = params["DateTime2"]
    instruments = params["Instruments"]

    #
    filter = {"Symbol": instruments[0]["Symbol"]}
    gte = {"StdDateTime": {"$gte": datetime1}}
    lte = {"StdDateTime": {"$lte": datetime2}}
    filter["$and"] = [gte, lte]
    returns = database.Find("Factor", "MonthlyReturn", filter, {"StdDateTime": 1})

    #
    data = []
    for r in returns:
        symbol = r["Symbol"]
        stdDatetime = r["StdDateTime"]
        value = r["Value"]
        data.append([stdDatetime, value])

    #
    df = pd.DataFrame(data, columns=["StdDateTime", "BenchmarkReturn"])
    return df


def ReturntoLabel(df, threshold=0.1, useExcessReturn=False, isRisk=False):
    #
    # Convert Excess Return to label
    # annulizedVola = 0.3
    # threshold = 0.1

    def IsSignificant(x): # Finding Alpha
        if x > threshold:
            return 1
        else:
            return 0

    def IsRisk(x): # Finding Risk
        #if isinstance(x, str):
        #    print(x)
        if x < threshold:
            return 1
        else:
            return 0

    #
    if useExcessReturn:
        if isRisk:
            df['IsSignificant'] = df['ExcessReturn'].apply(lambda x: IsRisk(x))
        else:
            df['IsSignificant'] = df['ExcessReturn'].apply(lambda x: IsSignificant(x))
    else:
        if isRisk:
            df['IsSignificant'] = df['Return'].apply(lambda x: IsRisk(x))
        else:
            df['IsSignificant'] = df['Return'].apply(lambda x: IsSignificant(x))

    # print(df)
    #
    return df


# ---Back Test---
def MonthBased(database, instruments, factors, benchmarkFunc):

    #
    datetime1 = datetime.datetime(2010, 1, 1)
    datetime2 = datetime.datetime(2019, 4, 1)
    datetime0 = datetime1 + datetime.timedelta(days=-365)

    #
    params = {}
    params["Database"] = database
    params["DateTime1"] = datetime0
    params["DateTime2"] = datetime2
    params["Instruments"] = [{"Symbol":"000001.SH"}]
    dfBmReturns = benchmarkFunc(params)

    print(dfBmReturns.head())

    #
    # datetimes = Gadget.GenerateEndDateofMonth(datatime1, datatime2)

    trainData = None
    # ---Cache Factors---
    factorValues = {}
    for factor in factors:
        if factor not in factorValues:
            factorValues[factor] = {}
        data = FindPatialFactorValues(database, instruments, factor, datetime0, datetime2)
        for d in data:
            symbol = d["Symbol"]
            if symbol not in factorValues[factor]:
                factorValues[factor][symbol] = []
            factorValues[factor][symbol].append([d["StdDateTime"], d["Value"]])

    #
    for instrument in instruments:
        symbol = instrument["Symbol"]
        print(symbol)
        #
        filter = {"Symbol": symbol}
        gte = {"StdDateTime": {"$gte": datetime0}}
        lte = {"StdDateTime": {"$lte": datetime2}}
        filter["$and"] = [gte, lte]
        returnFactorValues = database.Find("Factor", "MonthlyReturn", filter, {"StdDateTime": 1})

        #
        returns = []
        for r in returnFactorValues:
            stdDatetime = r["StdDateTime"]
            value = r["Value"]
            returns.append([stdDatetime, value])

        # ---Return Matrix---
        df = pd.DataFrame(returns, columns=["StdDateTime", "Return"])
        # print(df.head())

        df = pd.merge(df, dfBmReturns, on='StdDateTime', how='left')
        df["ExcessReturn"] = df["Return"] - df["BenchmarkReturn"]
        # print(df.head())

        # ---Factor Value---
        for factor in factors:
            factorValuesThisSymbol = factorValues[factor][symbol]
            dfFactorValue = pd.DataFrame(factorValuesThisSymbol, columns=["StdDateTime", factor])
            df = pd.merge(df, dfFactorValue, on='StdDateTime', how='left')

        print(df.head(30))

        # ---Concat---
        if trainData == None:
            trainData = df
        else:
            pd.concat([trainData,df])


def MonthBased2(database, datetime1, datetime2, instruments, factors):

    #
    datetimes = Gadget.GenerateMonthDates(datetime1, datetime2)

    trainData = pd.DataFrame()

    # ---Loop Dates---
    i = 0
    for dt in datetimes:
        i += 1
        if i == 1:
            lastDateTime = dt
            continue
        #
        # print(dt)
        df = GenerateProfileData(database, lastDateTime, dt, instruments, factors, "000001.SH")
        # print(df)

        #
        if trainData.empty:
            trainData = df
        else:
            trainData = pd.concat([trainData,df])
            # print(trainData)

        #
        lastDateTime = dt


def SeasonBasedTest(database, datetime1, datetime2, instruments, factors):
    # Dupont Analysis
    # Profit Margin
    # Asset Turnover
    # Leverage

    #
    datetimes = Gadget.GenerateReleaseDates(datetime1, datetime2)
    #
    trainData = pd.DataFrame()

    # ---Loop Dates---
    i = 0
    for dt in datetimes:
        i += 1
        if i == 1:
            lastDateTime = dt
            continue
        #
        print(dt)

        SinglePeriodTest(database, datetime1, datetime2, instruments, factors)


        # ---Concat dataframe---
        #if trainData.empty:
        #    trainData = df
        #else:
        #    trainData = pd.concat([trainData, df])
        #    # print(trainData)

        #
        lastDateTime = dt

# DateTime1 as Profile Data
# DateTime1 ~ DateTime2 is Range Return
def SinglePeriodTest(database,
                     datetime1, datetime2, instruments=None, factors=None,
                     profileFolderName=None, rangeReturnFolderName=None):

    if database != None:
        # ---Prepare X---
        dfFactors = General.Profile(database, datetime1, factors, instruments)
        # dfFactors.to_csv("d://ProfileTest.csv")
        # dfFactors = General.Profile_MostRecent(database, datetime1, factors, instruments)
        # dfFactors.to_csv("d://ProfileTest2.csv")

        # print(dfFactors)

        # ---Prepare Y---
        dfReturns = General.RangeReturn(database, datetime1, datetime2, instruments)

    else:
        # ---Load Data---
        folderName1 = "d://Data//Profile//" + profileFolderName + "//"
        dfFactors = pd.read_csv(folderName1 + "Profile_" + Gadget.ToDateString(datetime1) + ".csv")
        #
        folderName2 = "d://Data//RangeReturn//" + rangeReturnFolderName + "//"
        dfReturns = pd.read_csv(
            folderName2 + "RangeReturn_" + Gadget.ToDateString(datetime1) + "_" + Gadget.ToDateString(
                datetime2) + ".csv", engine="python")

    #
    df = PreProcessData(dfFactors, dfReturns)
    dfX = df.drop(columns=["IsSignificant"])
    dfY = df["IsSignificant"]
    Train(dfX, dfY)
    pass

def MultiPeriodData(database, datetime1, datetime2,
                    factors=[],
                    instruments=[],
                    symbols=[],
                    profileFolderName=None,
                    rangeReturnFolderName=None,
                    params={}
                    ):
    #
    datatimeRanges = []
    if rangeReturnFolderName == "Yearly":
        datatimeRanges = Gadget.GenerateTimeRange_Yearly(datetime1, datetime2)
    if rangeReturnFolderName == "HalfYear":
        datatimeRanges = Gadget.GenerateTimeRange_HalfYear(datetime1, datetime2)

    #
    keepFields = params.get("KeepFields")
    if keepFields == None:
        keepFields = []

    #
    dfSymbols = pd.DataFrame()
    symbols = params.get("Symbols")
    instruments = params.get("Instruments")
    if symbols == None and instruments != None:
        symbols = []
        for instrument in instruments:
            symbols.append(instrument["Symbol"])
    #
    if symbols != None and len(symbols) > 0:
        data = []
        for symbol in symbols:
            data.append(symbol)
        #
        dfSymbols = pd.DataFrame(data, columns=["Symbol"])

    #
    df = pd.DataFrame()
    for timeRange in datatimeRanges:
        print("")
        print("Time Range", timeRange)
        #
        begin = timeRange[0]
        end = timeRange[1]

        # ---Load from File---
        folderName1 = "d://Data//Profile//" + profileFolderName + "//"
        dfFactors = pd.read_csv(folderName1 + "Profile_" + Gadget.ToDateString(begin) + ".csv")

        # ---Merge Symbols---
        if not dfSymbols.empty:
            dfFactors = pd.merge(dfSymbols, dfFactors, on="Symbol", how="inner")

        # ---Drop Un-related Columns---
        for column in dfFactors.columns:
            if column == "Symbol" or column == "DateTime" or column in keepFields:
                pass
            elif column in factors:
                pass
            else:
                dfFactors.drop(columns=[column], inplace=True)

        # print(dfFactors.head())
        # print(dfFactors.columns)
        # print(dfFactors.shape[1])

        # ---Load Return from Return---
        folderName2 = "d://Data//RangeReturn//" + rangeReturnFolderName + "//"
        dfReturns = pd.read_csv(
            folderName2 + "RangeReturn_" + Gadget.ToDateString(begin) + "_" + Gadget.ToDateString(
                end) + ".csv", engine="python")

        # ---单期独立校正---
        # dfTemp = PreProcessData(dfFactors, dfReturns, params)
        dfTemp = PreProcessData2(dfFactors, dfReturns, params)
        # print(dfTemp.head())

        # ---拼接---
        df = pd.concat([df, dfTemp], axis=0)
        # print(df.head())
        # print(df.columns)
        print("#Rows", df.shape[0], "#Columns", df.shape[1])
        pass

    # ---多期统一校正---
    # print(df.head())
    # df.to_csv("d:/data/MultiPeriodData.csv")
    #

    # print("#Data", df.shape[0], "#True", df[df["IsSignificant"]>0].shape[0])
    print("Data Statistics")
    print("#Data", df.shape[0])
    if "IsSignificant" in df.columns:
        print("#True", df[df["IsSignificant"]>0].shape[0])

    return df


def MultiPeriodTest(database, datetime1, datetime2,
                    factors,
                    profileFolderName=None, rangeReturnFolderName=None):

    # ---Prepare MultiData---
    df = MultiPeriodData(database, datetime1, datetime2, factors,
                         profileFolderName, rangeReturnFolderName,
                         )

    # ---Prepare DataSet---
    dfX = df.drop(columns=["IsSignificant"])
    dfY = df["IsSignificant"]

    Train(dfX, dfY)

    # Test
    # params = {}
    # params["Intercept"] = -0.57640893
    # params["Coef"] = [0.12130684, -0.15430095, 0.20790787, 0.19465667]
    # Test(dfX, params, dfY)
    pass

# 先Process 再Merge
def PreProcessData2(dfFactors, dfReturns, transformDataPrarams):

    #
    dropColumns = ["Symbol", "DateTime", "DateTime1", "DateTime2", "BenchmarkReturn", "证券简称", "ExcessReturn"]

    # ---Symbol Filter---
    symbols = transformDataPrarams.get("Symbols")
    if symbols != None:
        data = []
        for symbol in symbols:
            data.append(symbol)
        dfSymbols = pd.DataFrame(data, columns=["Symbol"])
        dfFactors = pd.merge(dfFactors, dfSymbols, on="Symbol", how="inner")

    # ---Process X---
    if transformDataPrarams.get("ProcessX"):
        PreprocessData_X(dfFactors, transformDataPrarams)

    # ---Process Y---
    if transformDataPrarams.get("ProcessY_Threshold"):
        PreprocessData_Y(dfReturns, transformDataPrarams)
        dropColumns.append("Return")

    # ---Merge, Formatting Data---
    # ---Merge X and Y---
    df = pd.merge(dfFactors, dfReturns, on="Symbol", how="inner")
    df.dropna(inplace=True)
    # print(df.head(10))

    # ---Remove Redundant Fields---
    if transformDataPrarams.get("DropRedundantFields"):
        realToDrop = copy.deepcopy(dropColumns)
        keepFields = transformDataPrarams.get("KeepFields")
        if keepFields != None:
            for keepField in keepFields:
                if keepField in realToDrop:
                    realToDrop.remove(keepField)
        #
        DropRedundantColumns(df, realToDrop)
    else:
        DropRedundantColumns(df, dropColumns=[])
    #
    return df


# 先Merge再 Process
def PreProcessData(dfFactors, dfReturns, transformDataPrarams):

    # ---Merge, Formatting Data---
    df = MergeData(dfFactors, dfReturns)

    # ---顺序不要乱，先加工完Y，再Drop无关Columns---
    PreprocessData_Y(df, transformDataPrarams)
    # df.to_csv("D:/Data/Dupont/test.csv")

    # ---Prepare DataSet---
    DropRedundantColumns(df)
    dfX = df.drop(columns=["IsSignificant"])
    dfY = df[["Symbol", "IsSignificant"]]

    #
    PreprocessData_X(dfX, transformDataPrarams)

    # ---ReMerge---
    df = pd.merge(dfX, dfY, on="Symbol", how="inner")
    df.drop(columns=["Symbol"], inplace=True)
    # dfX = df.drop(columns=["IsSignificant"])
    # dfY = df["IsSignificant"]
    # print(df)

    # ---Print Statistics---
    totalData = df.shape[0]
    trueData = df[df["IsSignificant"] > 0].shape[0]
    print("Prepare X-Y Data, Total Instance#", totalData, "Significant#", trueData)

    #
    return df


def PreprocessData_X(df, params={}):

    # ---Special Process Factors---
    # factors = Factors.FactorContent.FactorList()
    # Factors.FactorContent.ProcessFactors(df, factors)
    #
    # ---Delete Dirty---
    df.replace(["nan"], np.nan, inplace=True)
    df.replace([np.inf, -np.inf], np.nan, inplace=True)
    df.dropna(inplace=True)

    #
    if df.shape[0] == 0:
        return

    # ---PreProcess Outlier---
    if params.get("ProcessX_Outlier"):
        df = Analysis.PreProcess.Outlier_Percentile(df, 0.025, 0.975)
        # df = Analysis.PreProcess.Outlier_NSigma(df)

    # ---Normal Distribution---
    # Boxcox(df)
    # print(df)

    if params.get("ProcessX_Normalization"):
        Analysis.PreProcess.Normalization(df)
        # print(x_train.describe())


def PreprocessData_Y(df, params={}):

    # ---Parse Parameter---
    threshold = 0.1
    tmp = params.get("ProcessY_Threshold")
    if tmp != None:
        threshold = tmp

    #
    useExcessReturn = True
    tmp = params.get("ProcessY_UseExcessReturn")
    if tmp != None:
        useExcessReturn = tmp

    # ---Risk or Alpha---
    isRisk = True
    tmp = params.get("ProcessY_RiskAlpha")
    if tmp != None:
        if tmp == "Alpha":
            isRisk = False
        elif tmp == "Risk":
            isRisk = True

    # ---One-hot Encoder---
    ReturntoLabel(df, threshold, useExcessReturn=useExcessReturn, isRisk=isRisk)
    # print(dfReturns)
    # DropRedundantColumns(df)


def DropRedundantColumns(df, dropColumns=["DateTime1", "DateTime2", "BenchmarkReturn", "ExcessReturn", "Return", "证券简称"]):
    #
    columns = df.columns
    for dropColumn in dropColumns:
        if dropColumn in columns:
            df.drop(columns=[dropColumn], inplace=True)
    #
    for col in columns:
        if "Unnamed" in col:
            df.drop(columns=[col], inplace=True)


def DropRedundantColumns2(df, dropColumns=["DateTime1", "DateTime2", "BenchmarkReturn", "证券简称"]):
    DropRedundantColumns(df, dropColumns)


def MergeData(dfFactors, dfReturns):

    # ---Merge X and Y---
    df = pd.merge(dfFactors, dfReturns, on="Symbol", how="inner")
    df.dropna(inplace=True)
    #
    # print(df)
    return df

def Train(dfX, dfY, model=None):

    # ---拆分数据集---
    # x_train, x_test, y_train, y_test = train_test_split(dfFeatures, dfY, test_size=0.2, random_state=1)
    x_train = dfX
    y_train = dfY

    # ---Build Model---
    regressor = TrainModel(x_train, y_train)

    # ---Test---
    y_predicted = regressor.predict(x_train)

    # score = Performance_Metric(y_test, y_predicted)
    Performance_Metric(y_train, y_predicted)

    # ---Save Model---
    # file = open("D:/data/model/model.pickle", "wb")
    # pickle.dump(regressor, file)

    # features = list(x_train.columns)
    # print(regressor.intercept_)
    # print(regressor.coef_)
    # results = []
    # for i in range(len(features)):
    #     results.append([features[i], regressor.coef_[0][i], math.fabs(regressor.coef_[0][i])])
    # results = sorted(results, key=lambda d: d[2])
    #
    # for result in results:
    #     print("{0} : {1:.4f}".format(result[0], result[1]))


    #
    return regressor


def TrainModel(x_train, y_train):

    # print(x_train)

    # ---认识学习曲线，不用自己分组---
    # vs.ModelLearning(dfFeatures, dfY)

    # ---认识 Bias vs Variance---
    # 在函数内部有分组，不用自己分组
    # vs.ModelComplexity(dfFeatures, dfY)

    # Fit the training data to the model using grid search
    # reg = fit_model(x_train, y_train)

    # Produce the value for 'max_depth'
    # print("Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']))

    # ---Model---
    regressor = LogisticRegression(solver='lbfgs',
                                   multi_class='ovr')  # multi_class='multinomial'
    regressor.fit(x_train, y_train)

    # ---Print Coeffient---
    features = list(x_train.columns)
    print(regressor.intercept_)
    print(regressor.coef_)
    results = []
    for i in range(len(features)):
        results.append([features[i], regressor.coef_[0][i], math.fabs(regressor.coef_[0][i])])
    results = sorted(results, key=lambda d: d[2])
    results = reversed(results)

    print("Intercept :", regressor.intercept_[0])
    for result in results:
        print("{0} : {1:.4f}".format(result[0], result[1]))

    # 特征重要度
    # features = list(x_train.columns)
    # importances = regressor.feature_importances_
    # indices = np.argsort(importances)[::-1]
    # num_features = len(importances)

    # 输出各个特征的重要度
    # for i in indices:
    #     print("{0} - {1:.3f}".format(features[i], importances[i]))

    # 将特征重要度以柱状图展示
    #plt.figure()
    #plt.title("Feature importances")
    #plt.bar(range(num_features), importances[indices], color="g", align="center")
    #plt.xticks(range(num_features), [features[i] for i in indices], rotation='45')
    #plt.xlim([-1, num_features])
    #plt.show()

    return regressor


def Test(x_train, params, y_train=None):
    # ---Build Model---
    regressor = TestModel(params)

    # ---Test---
    y_predicted = regressor.predict(x_train)

    if y_train != None:
        # score = Performance_Metric(y_test, y_predicted)
        Performance_Metric(y_train, y_predicted)


def TestModel(params):
    #
    regressor = LogisticRegression(solver='lbfgs',
                                   multi_class='ovr')
    #
    regressor.intercept_ = np.array(params["Intercept"])
    regressor.coef_ = [[]]

    #
    l = params["Coef"]
    for p in l:
        regressor.coef_[0].append(p)

    print(regressor.intercept_)
    print(regressor.coef_)

    #
    return regressor


def FactorSampleTTest(df):
    pass
    # for column in df.columns:


if __name__ == '__main__':

    #
    from Core.Config import *
    cfgPathFilename = os.getcwd() + "/../config.json"
    config = Config(cfgPathFilename)
    database = config.DataBase("MySQL")
    realtime = config.RealTime()

    #
    # DupontTest(database)

    # ---Prepare Data---
    # ---Factors---
    # "BookToMarket",
    factors = []
    factors.append("ProfitMargin_OperatingProfit1_LYR")
    factors.append("ProfitMargin_OperatingProfit_LYR")
    factors.append("ProfitMargin_OperatingProfit1")
    factors.append("ProfitMargin_OperatingProfit")
    factors.append("ROE_NetIncome2_TTM")
    factors.append("ROA_NetIncome2_TTM")
    factors.append("ROE_NetIncome2_LYR")
    factors.append("ROA_NetIncome2_LYR")
    #

    # ---DateTime---
    datetime1 = datetime.datetime(2010, 1, 1)
    datetime2 = datetime.datetime(2010, 5, 1)
    #
    datetime1 = datetime.datetime(2008, 1, 1)
    datetime2 = datetime.datetime(2019, 5, 1)

    # ---Instruments---
    symbols = ["000001.SZ", "600399.SH", "000708.SZ"]
    # instruments = SymbolsToInstruments(database, symbols)
    # instruments = RandomInstruments(database, datetime1, num=100)
    # instruments = RandomInstruments(database, datetime2, num=1000)
    # instruments = Gadget.FindListedInstrument(database, datetime1)

    #
    days = (datetime2 - datetime1).days
    dailyVola = 0.2 / math.sqrt(242)
    monthlyVola = dailyVola * math.sqrt(20)
    significant = 2 * math.sqrt(120) * dailyVola

    #
    datetime1 = datetime.datetime(2018, 11, 1)
    datetime2 = datetime.datetime(2019, 5, 1)
    # SinglePeriodTest(database, datetime1, datetime2, instruments, factors)
    # MonthBased2(database, datetime1, datetime2, instruments, factors)
    # SeasonBased(database, datetime1, datetime2, instruments, factors)

    for year in range(2019, 2019+1):
        datetime2 = datetime.datetime(year, 5, 1)
        print("Profile Date", datetime2)
        General.Profile_to_File(database, datetime2, factors, instruments=None, folderName="")