import pandas as pd
import datetime
import os
import numpy as np
from WindPy import w
import Core.Config as Config
import Core.Gadget as Gadget
import Core.Quote as Quote
import matplotlib.pyplot as plt


# 利用Wind EDB 函数下载宏观数据
# Wind EDB Function
# wind_code,  # wind code，比如 'M0000612'
# datetime1, datetime2,  # 索取数据的时间区间
# field_name,  # 返回dataframe时，该数据自定义的字段名，比如 CPI_YoY, GDP_YoY
# dateAsIndex=True  # 返回的dataframe是否以Date（Report_Date）作为索引
def EDB(wind_code, datetime1, datetime2, field_name="", dateAsIndex=True, date_field_name="Report_Date"):
    #
    if datetime1 == None:
        datetime1 = datetime.datetime(2000, 1, 1)
    #
    if field_name == "":
        field_name = wind_code
    #
    mydata = w.edb(wind_code, datetime1, datetime2, 'Fill=Previous')
    mydata_df = pd.DataFrame(data=mydata.Data[0], columns=[field_name], index=mydata.Times)
    # mydata_df.columns = [name]

    # 如果Date是普通列，不是Index
    if not dateAsIndex:
        mydata_df.reset_index(inplace=True)
        mydata_df.rename(columns={"index": date_field_name}, inplace=True)
        mydata_df = mydata_df[mydata_df[date_field_name] <= datetime2.date()]
    else:
        mydata_df = mydata_df[mydata_df.index <= datetime2.date()] # 居然会发生取出的数值日期 大于 datetime2的情况
        mydata_df.index = pd.to_datetime(mydata_df.index)

    # print(mydata_df)
    return mydata_df


def WSD_Quote_Snapshot(symbols, datetime2, param=""):
    fields = ["Close"]
    data = w.wsd(symbols, fields, datetime2, datetime2, param)
    df = pd.DataFrame(data=data.Data[0], index=data.Codes, columns=["Close"])
    # print(df)
    df["Symbol"] = df.index
    return df


def WSD_Quote(symbols, datetime1, datetime2=None, param=""):
    fields = ["Close"]
    #
    if not isinstance(symbols, list):
        symbols = [symbols]

    asset_number = len(symbols)
    #
    if datetime2 == None:
        datetime2 = datetime1
    #
    data = w.wsd(symbols, fields, datetime1, datetime2, param)
    data_raw = []
    for i in range(len(data.Times)):
        entry = []
        for j in range(asset_number):
            entry.append(data.Data[j][i])
        data_raw.append(entry)

    df = pd.DataFrame(data=data_raw, index=data.Times, columns=symbols)
    # print(df)
    return df


def WSD_Multi_Fields(symbol, datetime1, datetime2, fields=[], param=""):
    #
    if datetime2 == None:
        datetime2 = datetime1
    #
    fields_number = len(fields)
    data = w.wsd(symbol, fields, datetime1, datetime2, param)
    data_raw = []
    for i in range(len(data.Times)):
        entry = []
        for j in range(fields_number):
            entry.append(data.Data[j][i])
        data_raw.append(entry)

    df = pd.DataFrame(data=data_raw, index=data.Times, columns=fields)
    # print(df)
    return df


def RealTime_Quote(database, realtime, symbols, datetime2):
    # Convert Date to DateTime
    tmp_datetime2 = datetime2
    if isinstance(datetime2, datetime.date):
        tmp_datetime2 = datetime.datetime.combine(datetime2, datetime.time(15, 0, 0))

    # Load from Realtime
    data = []
    for symbol in symbols:
        quote = Quote.GetQuote(database, symbol, tmp_datetime2, realtime)
        if quote == None:
            print("No Quote", symbol, datetime2)
            continue
        quote["Symbol"] = symbol
        data.append(quote)
    df = Gadget.DocumentsToDataFrame(data, keep=["DateTime", "Close", "Symbol"], rename={"DateTime": "Time"})
    # print(df)
    return df


# fieldName: df中数据名称
# factorName: 输入数据库的因子名称
# tableName: 插入的表格的名称
# 原则上df必须含有Date字段
# 2020-7-18 统一到一个函数
def SaveSystematic_MarketFactorToDatabase(database,
                                          df,  # 储存df
                                          factorName,  # 储存成的因子名
                                          fieldName=""): # df中的列名

    #
    # if "Date" in df.columns:
    #     df.set_index(["Date"], inplace=True)

    #
    if fieldName == "":
        fieldName = factorName

    #
    print("Saving Systematic Market Factor", factorName, "#", len(df))
    # print(df.tail())

    newDocuments = []
    #
    dateField = "Date"
    if "Date" in df.columns:
        dateField = "Date"
    elif "date" in df.columns:
        dateField = "date"
    else:
        print("No Date Field in Dataframe")
        return

    # 2020-7-18 统一到一个函数
    if "report_date" in df.columns or "ReportDate" in df.columns:
        pass
    else:
        df["Report_Date"] = df[dateField]
    #
    if "release_date" in df.columns or "ReleaseDate" in df.columns:
        pass
    else:
        df["Release_Date"] = df[dateField]
    #
    Save_Systematic_To_Database(database, df, factorName, fieldName, table_name="A_sys_factor")
    return

    #
    for index, row in df.iterrows():
        value = row[fieldName]
        dt = row[dateField]
        newDocument = {}
        newDocument["Name"] = factorName
        newDocument["Value"] = float(value)
        newDocument["Date"] = dt
        newDocument["DateTime"] = dt
        newDocument["Key2"] = factorName + "_" + Gadget.ToDateString(dt)
        newDocuments.append(newDocument)
    #
    database.Upsert_Many("Factor", "A_sys_factor_market", [], newDocuments)
    print("Saved Systematic Market Factor", factorName, "#", len(newDocuments))


# Market Factor vs Macro Factor
# Market 因子一般只有一个日期，一般是当天交易的结果
# Macro 是客观统计数据，发布日滞后于报告日
def SaveSystematic_MacroFactorToDatabase(database,
                                         df, # 储存df
                                         factorName, # 储存成的因子名
                                         fieldName="" # df中的列名
                                         ):

    #
    if fieldName == "":
        fieldName = factorName
    #
    print("Saving Systematic Macro Factor", factorName, "#", len(df))
    # print(df.tail())
    #
    if "ReportDate" not in df.columns:
        print("No report_date Field in Dataframe")
        return

    if "ReleaseDate" not in df.columns:
        print("No release_date Field in Dataframe")
        return
    #
    newDocuments = []
    for index, row in df.iterrows():
        value = row[fieldName]
        reportDate = row["ReportDate"]
        releaseDate = row["ReleaseDate"]
        #
        newDocument = {}
        newDocument["Name"] = factorName
        newDocument["Value"] = float(value)
        newDocument["Report_Date"] = reportDate
        newDocument["Release_Date"] = releaseDate
        newDocument["Date"] = releaseDate
        newDocument["DateTime"] = releaseDate
        newDocument["Key2"] = factorName + "_" + Gadget.ToDateString(reportDate)
        newDocuments.append(newDocument)
    #
    database.Upsert_Many("Factor", "A_sys_factor_macro", [], newDocuments)
    print("Saved Systematic Macro Factor", factorName, "#", len(newDocuments))


def Save_Systematic_Factor_To_Database(database, df, save_name, field_name="", key_date="", save_null=False):
    Save_Systematic_To_Database(database, df, save_name, field_name, table_name="A_sys_factor", key_date=key_date,
                                save_null=save_null)


def Save_Systematic_Raw_To_Database(database, df, saved_name, field_name="", key_date=""):
    Save_Systematic_To_Database(database, df, saved_name, field_name, table_name="A_sys_factor_raw", key_date=key_date)


def Save_Systematic_To_Database(database, df, save_name, field_name="", table_name="", key_date="", save_null=False):
    #
    print(" *** ")
    print("Saving Systematic Data", save_name)
    #
    if field_name == "":
        field_name = save_name
    #
    if "ReportDate" in df.columns:
        df.rename(columns={"ReportDate": "Report_Date"}, inplace=True)
    elif "report_date" in df.columns:
        df.rename(columns={"report_date": "Report_Date"}, inplace=True)
    #
    if "ReleaseDate" in df.columns:
        df.rename(columns={"ReleaseDate": "Release_Date"}, inplace=True)
    elif "release_date" in df.columns:
        df.rename(columns={"release_date": "Release_Date"}, inplace=True)

    # 不存在日期字段的补救措施
    if "Report_Date" not in df.columns and "Release_Date" not in df.columns and "date" in df.columns:
        df["Report_Date"] = df["date"]
        df["Release_Date"] = df["date"]

    # 不存在日期字段的补救措施
    if "Report_Date" not in df.columns and "Release_Date" not in df.columns and "Date" in df.columns:
        df["Report_Date"] = df["Date"]
        df["Release_Date"] = df["Date"]

    #
    if "Release_Date" not in df.columns:
        df["Release_Date"] = df.index
    #
    if "Report_Date" not in df.columns:
        df["Report_Date"] = df.index
    #
    print(df[["Report_Date", "Release_Date", field_name]].tail(10))

    #
    newDocuments = []
    for index, row in df.iterrows():
        #
        value = row[field_name]
        if isinstance(value, float):
            if np.isnan(value):
                value = None
            else:
                value = float(value)
        elif isinstance(value, int):
            pass
        else:
            value = None

        # 不存空值
        if not save_null and value == None:
            continue
        #
        if str(row["Report_Date"]) == "NaT":
            continue
        #
        if str(row["Report_Date"]) == "nan":
            continue

        reportDate = row["Report_Date"]
        #
        if str(row["Release_Date"]) == "NaT":
            releaseDate = reportDate
        else:
            releaseDate = row["Release_Date"]

        #
        newDocument = {}
        newDocument["Name"] = save_name
        newDocument["Value"] = value

        # 该数据的逻辑日期，比如4月数据的发布日其实是5月中旬
        # 那么Report Date 是4月31日，Release Date是5月15日
        newDocument["Report_Date"] = reportDate
        newDocument["Release_Date"] = releaseDate

        # 时间戳和日期戳 用于使用数据时的日期对齐
        if "Date" in row:
            newDocument["Date"] = row["Date"]
            newDocument["DateTime"] = row["Date"]
        else:
            newDocument["Date"] = releaseDate
            newDocument["DateTime"] = releaseDate
        #
        if key_date == "":
            newDocument["Key2"] = save_name + "_" + Gadget.ToDateString(reportDate)
        else:
            keyDateTime = row[key_date]
            newDocument["Key2"] = save_name + "_" + Gadget.ToDateString(keyDateTime)
        #
        newDocuments.append(newDocument)
    #
    database.Upsert_Many("Factor", table_name, [], newDocuments)
    print("Saved Systematic Data", save_name, "#", len(newDocuments))




#
def Load_Systematic_Market_Factor(database, factor_name, datetime1=None, datetime2=None):
    #
    filter = []
    filter.append(["Name", factor_name])
    if datetime1 != None:
        filter.append(["Date", ">=", datetime1])
    if datetime2 != None:
        filter.append(["Date", "<=", datetime2])
    #
    factor_series = database.Find("Factor", "a_sys_factor", {"Name": factor_name}, sort=[("date", 1)])
    df = Gadget.DocumentsToDataFrame(factor_series, keep=["date", "value", "modified_time"])

    df = df[["date", 'value']]
    # Process
    df["date"] = pd.to_datetime(df["date"])
    df.rename(columns={"value": factor_name}, inplace=True)
    #
    return df


# 从数据库读取因子
def Load_Systematic_Factor(database, factor_name, datetime1=None, datetime2=None, indexed=False): # 要读取的时间区间
    #
    filter = []
    filter.append(["Name", factor_name])
    if datetime1 != None:
        filter.append(["Date", ">=", datetime1])
    if datetime2 != None:
        filter.append(["Date", "<=", datetime2])
    #
    df = database.GetDataFrame("Factor", "a_sys_factor", filter,
                               projection=["date", "report_date", "release_date", 'value'],
                               sort=[("date", 1)])
    #
    if df.empty:
        print("No Factor", factor_name)
        return pd.DataFrame()

    # Process
    df['report_date'] = pd.to_datetime(df['report_date'])
    df['release_date'] = pd.to_datetime(df['release_date'])
    df.rename(columns={"value": factor_name}, inplace=True)

    #
    if indexed:
        df.set_index("date", inplace=True)

    return df


def Load_Many_Systematic_Factor(database, factor_name_list, merged_date="date", datetime1=None, datetime2=None, indexed=False):
    df = pd.DataFrame()
    for factor_name in factor_name_list:
        df_factor = database.GetDataFrame("Factor", "a_sys_factor",
                                          filter=[("name", factor_name)],
                                          projection=[merged_date, "value"],
                                          sort=[(merged_date, 1)])
        df_factor.rename(columns={"value": factor_name}, inplace=True)
        #
        if df.empty:
            df = df_factor
        else:
            df = pd.merge(df, df_factor, how="outer", on=merged_date)

    # 日期排序
    df.sort_values(by=merged_date, inplace=True, ascending=True)

    #
    if datetime1:
        df = df[df[merged_date] >= datetime1].copy()
    if datetime2:
        df = df[df[merged_date] <= datetime2].copy()

    if indexed:
        df.set_index(merged_date, drop=True, inplace=True)
    #
    return df

# get_many_index
def load_many_index(database, index_symbol_list, datetime1=None, datetime2=None, indexed=False):
    merged_date = "date"
    df = pd.DataFrame()
    for factor_name in index_symbol_list:
        df_factor = database.GetDataFrame("financial_data", "index_daily_bar",
                                          filter=[("symbol", factor_name)],
                                          projection=[merged_date, "close"],
                                          sort=[(merged_date, 1)])
        df_factor.rename(columns={"close": factor_name}, inplace=True)
        #
        if df.empty:
            df = df_factor
        else:
            df = pd.merge(df, df_factor, how="outer", on=merged_date)

    # 日期排序
    df.sort_values(by="date", inplace=True, ascending=True)

    #
    if datetime1:
        df = df[df[merged_date] >= datetime1].copy()
    if datetime2:
        df = df[df[merged_date] <= datetime2].copy()

    if indexed:
        df.set_index(merged_date, drop=True, inplace=True)
    #
    return df


# Pengyan 编写的类似 Load_Systematic_Factor 程序， 用EDB实际执行
def Query_Data(database, code, datetime1, datetime2):
    #
    return EDB(code, datetime1, datetime2)

    #
    # 以下语句相当于
    # Select * From factor.cap where symbol = "000001.SZ" and date >= 20200101 and date <= 20200201 order by date desc

    # 构造筛选条件
    filter = []
    filter.append(["name", code])
    filter.append(["report_date", ">=", datetime1])
    filter.append(["report_date", "<=", datetime2])

    # 构造排序条件
    sort = [("date", 1)] # 因为可以多列排序，所以采用的是List结构，-1是降序

    #
    documents = database.Find(databaseName="factor",  # 库名
                              tableName="test_table",  # 表名
                              filter=filter,  # 筛选条件
                              sort=sort)  # 排序

    df = Gadget.DocumentsToDataFrame(documents)
    df = df[['report_date','value']]
    df.set_index('report_date',inplace=True)
    df.columns=[code]
    return df


# 填充-绝对值-历史分位数
def fill_historical_absolute_value_percentile(df, field_name):
    #
    df.reset_index(inplace=True, drop=True)
    #
    df_percentile = df[[field_name]].copy()
    df_percentile[field_name] = np.nan

    #
    for i in range(len(df)):
        df_tmp = df[0:i + 1]
        cur_value = df_tmp[field_name].iloc[-1]
        max_value = df_tmp[field_name].max()
        min_value = df_tmp[field_name].min()
        p = (cur_value - min_value) / (max_value - min_value)
        df_percentile.loc[i, field_name] = p
    #
    return df_percentile


# 填充历史分位数, v1版本速度更快，因为没有排序环节
def fill_historical_probability_percentile_v2(df, field_name, reindex=True):
    #
    df_percentile = df[[field_name]].copy()
    df_percentile[field_name] = np.nan
    #
    if reindex:
        df.reset_index(inplace=True)
    #
    for i in range(len(df)):
        df_tmp = df[0: i + 1].copy()
        df_tmp["rank"] = df_tmp[field_name].rank(method="average", pct=True)
        p = df_tmp.iloc[-1]["rank"]
        df_percentile.loc[i, field_name] = p

    # pandas 中排序方式
    # min 越小排名越高，跳跃排序，除以的是 total count ，跳跃排序的意思是出现连个a的话，并列排名第三，然后就没有第四名了
    # max 越大排名越高，跳跃排序，除以的是 total count
    # dense 连续排名，不会因为并列而出现排名丢失，方式除以的是 max rank
    # first：
    # average：
    # 自建改进方式
    # indicator_percentile = df_factor.rank(method='min', ascending=hi_value_better, pct=False)  # 先以min方式计算排名
    # max_rank = np.max(indicator_percentile)
    # indicator_percentile = indicator_percentile / max_rank  # 排名除以 max_rank

    return df_percentile


# 填充-概率历史分位数（时间轴历史分位数）
def fill_historical_probability_percentile(df, field_name, reindex=True, return_raw_data=False):
    if reindex:
        df.reset_index(inplace=True)

    #
    df_percentile = df[[field_name]].copy()
    df_percentile[field_name] = np.nan

    #
    for i in range(len(df)):
        df_tmp = df[0: i + 1]
        # df_tmp = df_tmp.dropna(subset=[field_name])
        # if df_tmp.empty:
        #     continue
        cur_value = df_tmp[field_name].iloc[-1]
        less_count = len(df_tmp[df_tmp[field_name] < cur_value])
        greater_count = len(df_tmp[df_tmp[field_name] > cur_value])

        # total_count = i + 1 # 统计切片总长度
        total_count = df_tmp[field_name].count()  # 统计非空值, 相较于统计总长度更加精确 20210924

        # 计算概率
        if total_count == 0:
            p = np.nan
        else:
            # p = less_count / total_count
            p = 1 - greater_count / total_count  # 原来是统计小于现值得概率，改为“不大于当前值的概率”，20210924

        df_percentile.loc[i, field_name] = p
    #
    if return_raw_data == True:
        df_percentile.rename(columns={field_name: field_name + "_Rank"}, inplace=True)
        df = pd.concat([df, df_percentile], axis=1)
        return df

    return df_percentile


# 补充发布日
# lag_release_month: 相对于Date中的月份延后n个月
# release_day：发布日
# 举例来说 5月CPI的发布日是6月10日，两个参数分别是1和10
def Fill_ReleaseDate(df, lag_release_month=1, release_day=1):
    #
    def AddReleaseDate(row, lag_release_month, release_day, datetime_field):
        report_date = row[datetime_field]
        cur_month = report_date.month
        #
        try_month = report_date.month + lag_release_month
        month = try_month % 12
        if month == 0:
            month = 12
        if try_month > 12: # 跨年
            year = report_date.year + int(try_month / 12)
        else: # 未跨年
            year = report_date.year
        #
        if release_day > 0: # 正数天数
            release_date = datetime.datetime(year, month, release_day)
        else: # 倒数天数
            if month == 12:
                release_date = datetime.datetime(year, 12, 32 + release_day)
            else: # 下个月减少一天的方式
                release_date = datetime.datetime(year, month + 1, 1) + datetime.timedelta(days=release_day)
        #
        return release_date
    #
    df["Release_Date"] = df.apply(lambda x: AddReleaseDate(x, lag_release_month, release_day, "Report_Date"), axis=1)
    # print(df)
    # a = 0


def Calc_Period_Return(df, field_name, period="Weekly", is_log=False):
    #
    df['Release_Date'] = df.index

    if period.lower() == "monthly":
        df = Fix_Trim_Monthly(df, include_last_day=True)
        df_weekly = df.resample('M').last()
    else: # default weekly
        df = Fix_Trim_Weekly(df, x_start=1, x_end=5)
        # print(df)
        df_weekly = df.resample('W').last()
    #
    df_weekly.fillna(method='ffill', inplace=True)

    if is_log:
        df_weekly["return"] = np.log(df_weekly[field_name] / df_weekly[field_name].shift(1))
    else:
        df_weekly["return"] = df_weekly[field_name] / df_weekly[field_name].shift(1) - 1

    # df_weekly.replace([np.inf, -np.inf], np.nan, inplace=True)
    df_weekly['Report_Date'] = df_weekly.index
    df_weekly = df_weekly[1:] # 不计入第一个
    # print(df_weekly)
    return df_weekly


def Calc_Period_Return_EDB(wind_code, datetime1, datetime2, period="Weekly", is_log=False):
    #
    df = EDB(wind_code, datetime1, datetime2, dateAsIndex=True)
    df_weekly = Calc_Period_Return(df, wind_code, period=period, is_log=is_log)
    return df_weekly


def Calc_Period_Return_Database(database, symbol, datetime1, datetime2, instrument_type="index", period="Weekly",
                                is_log=False):
    #
    if period.lower() == "weekly":
        datetime0 = datetime1 + datetime.timedelta(days=-15)
    else:
        datetime0 = datetime1 + datetime.timedelta(days=-60)
    #
    filter = []
    filter.append(["symbol", symbol])
    filter.append(["date", ">=", datetime0])
    filter.append(["date", "<=", datetime2])
    #
    df = database.GetDataFrame("financial_data", instrument_type + "_dailybar", filter, projection=["date", "close"])
    df.rename(columns={"close":symbol}, inplace=True)
    # print(df)

    # 数据预处理
    df["date_dt"] = pd.to_datetime(df["date"])
    df.set_index("date_dt", inplace=True)

    #
    df_weekly = Calc_Period_Return(df, symbol, period=period, is_log=is_log)

    # 重新确定起始位置
    df_weekly = df_weekly[(df_weekly["date"] >= datetime1) & (df_weekly["date"] <= datetime2)]
    # print(df_weekly)

    return df_weekly


# 有些宏观数据1月不公布，补齐
def Fix_Missing_Monthly(df):
    df["date"] = pd.to_datetime(df.index)
    real_datetime1 = df.iloc[0]["date"]
    real_datetime2 = df.iloc[-1]["date"]

    df_canlender = Gadget.Generate_Calender_Days_DataFrame(real_datetime1, real_datetime2, date_field_name="date")
    df = pd.merge(df_canlender, df, how="left", on="date")
    df.index = df["date"]
    df.fillna(method="ffill", inplace=True)
    #
    df_monthly = df.resample("M").last()
    return df_monthly


def Fix_Nan_DateTime(release_date, report_date):
    if str(release_date) == "nan":
        return report_date
    else:
        return release_date
    pass

# 计算 sum 时候，不需要包含上月最后一天
# 计算 return时候，需要包含上月最后一天
def Fix_Trim_Monthly(df, include_last_day=False):
    #
    datetime1 = df.index[0]
    datetime2 = df.index[-1]

    # 修正开始时间为最近的一个月1号
    if datetime1.month == 12:
        trim_datetime1 = datetime.datetime(datetime1.year+1, 1, 1)
    else:
        trim_datetime1 = datetime.datetime(datetime1.year, datetime1.month + 1, 1)
    #
    if include_last_day:
        trim_datetime1 = trim_datetime1 - datetime.timedelta(days=1)

    # 修正结束时间为最近的一个月最后一天
    try_date = datetime2 + datetime.timedelta(days=1)
    if try_date.month != datetime2.month: # 这是月末最后一天
        trim_datetime2 = datetime2
    else:
        # 用当月1号减去一天
        trim_datetime2 = datetime.datetime(datetime2.year, datetime2.month, 1) - datetime.timedelta(days=1)

    df = df[(df.index >= trim_datetime1) & (df.index <= trim_datetime2)]
    return df


# x: 星期x
def Fix_Trim_Weekly(df, x_start=1, x_end=5):
    #
    datetime1 = df.index[0]
    datetime2 = df.index[-1]

    # 修正开始时间为最近的一个周1
    trim_datetime1 = Gadget.Find_Recent_Weekday(datetime1, x_start, previous=False)
    # 修正结束时间为最近的一个周日
    trim_datetime2 = Gadget.Find_Recent_Weekday(datetime2, x_end, previous=True)
    #
    df = df[(df.index >= trim_datetime1) & (df.index <= trim_datetime2)]
    return df


def Find_Last_Update_Date(database, factor_name, use_datetime=True):
    database_name = "factor"
    maxDate = database.ExecuteSQL("factor", "select max(date) from factor.a_sys_factor where name = '" + factor_name + "'")
    maxDate = maxDate[0][0]

    if use_datetime:
        maxDate = datetime.datetime(maxDate.year, maxDate.month, maxDate.day)

    return maxDate


def test_rank_method():
    data = []
    data.append([3])
    data.append([2])
    data.append([1])

    df = pd.DataFrame(data, columns=["x_field"])

    fill_historical_probability_percentile_v2(df, "x_field", rename="volume_percentile")
    fill_historical_probability_percentile(df, "x_field")
    print(df)
    a = 0

if __name__ == '__main__':
    # ---Connecting DataBase---
    # from Core.Config import *
    # pathfilename = os.getcwd() + "\..\Config\config2.json"
    # config = Config(pathfilename)
    # database = config.DataBase("JDMySQL")
    # realtime = config.RealTime()

    # ---Connecting Database---
    path_filename = os.getcwd() + "\..\Config\config_local.json"
    database = Config.create_database(database_type="MySQL", config_file=path_filename, config_field="MySQL")

    test_rank_method()
    df = Load_Systematic_Factor(database, "UpDown_Ratio_Gap_Smooth_HS300_Weekly")







