# from sklearn.externals import joblib
import os
import json
from datetime import datetime, timedelta

# import joblib
import joblib as joblib
import pandas as pd

# model_path = r'D:\workspace\TY_LinearModel\Model\FactorDatas'
# strategy_path = r'D:\workspace\TY_LinearModel\Model\StrategyData'
# instrument_id = 'IC9999'
# strategy_id = 'GBDT_IC9999_fwdt15_1mo_20201124152825'
begin_date = datetime(2020,1,1)
end_date = datetime.today()

def read_factor_data(path, begin_date, end_date):
    begin_date = begin_date - timedelta(days = 30)
    years = [i for i in range(begin_date.year, end_date.year+1)]
    dfs = []
    for year in years:
        file_path = os.path.join(path, '{0}.csv'.format(year))
        if os.path.exists(file_path):
            df = pd.read_csv(file_path)
            dfs.append(df)
        else:
            print('文件不存在:',file_path)
    if len(dfs)>0:
        df = pd.concat(dfs)
        df['TickNow'] = pd.to_datetime(df['TickNow'])
        df.sort_values('TickNow', inplace=True)
        if 'TradingDate' in df.columns:
            df['TradingDate'] = pd.to_datetime(df['TradingDate'])
            df['EndTime'] = pd.to_datetime(df['EndTime'])
            df = df.loc[(df['TradingDate'] >= begin_date) & (df['TradingDate'] <= end_date)]
        else:
            df = df.loc[(df['TickNow']>=begin_date) & (df['TickNow']<= end_date+timedelta(days = 1))]

    else:
        df = pd.DataFrame()
    return df


# strategy_file_path = os.path.join(strategy_path, instrument_id)
# strategy_file = os.path.join(strategy_file_path, '{0}.json'.format(strategy_id))
#
# with open(strategy_file, 'r') as fp:
#     strategy_data = json.load(fp)
#
# model_ids = strategy_data['model_ids']
#
# model_id = model_ids[-1]


model_id = 'GBDT_IC9999_fwdt15_Bg20160101_Ed20201031_Up20201124192559'
# model_file_path = os.path.join(model_path, instrument_id)
# model_file_path = os.path.join(model_file_path, model_id)

model_file_path = r'D:\WorkSpace\MQResearch\MQPy\ActiveMqDemo\RpcDemo'
model_file = os.path.join(model_file_path, '{0}.json'.format(model_id))

with open(model_file,'r', encoding='utf-8') as fp:
    model_data = json.load(fp)

base_factors = model_data['base_factors']

base_factor_paths = model_data['path']

bound_values = model_data['bound_values']

factor_values = {}
for base_factor in base_factors:
    bound_value = bound_values[base_factor]
    factor_value = read_factor_data(base_factor_paths[base_factor], begin_date, end_date)
    if 'EndTime' in factor_value.columns:
        factor_value.set_index('EndTime', inplace=True)
    else:
        factor_value.set_index('TickNow', inplace=True)

    factor_value = factor_value['FactorValue']
    factor_value = factor_value.map(lambda x:min(bound_value[1], max(bound_value[0], x)))
    factor_values[base_factor] = factor_value

df = pd.DataFrame(factor_values)
df = df[base_factors]

df.fillna(inplace=True, method='ffill')
df = df.loc[df.index>=begin_date]


scaler_file = os.path.join(model_file_path, 'Scaler.model')
scaler = joblib.load(scaler_file)
scaled_df = scaler.transform(df)

model_file = os.path.join(model_file_path, 'Model.model')
model = joblib.load(model_file)

y_pred = model.predict(scaled_df)
y_pred_s = pd.Series(y_pred,index = df.index)

pd.set_option('display.max_rows', None)
pd.set_option('display.precision', 18)

df['predict']=y_pred
df.to_csv('D:\\a.csv')
print(y_pred_s.iloc[:1000])