import xgboost as xgb
import pandas as pd

import os
from ..utility import read_raw_data, generate_parameters
from ..factor import all_factor_names

raw_path = "D:\daily work\ml\\raw"


def xgb_prepare_data():
    files = os.listdir(raw_path)
    all_train = []
    for file_name in files:
        sec_id, _ = file_name.split("_")
        df = pd.read_csv(f"{raw_path}\\{file_name}")

        df["datetime"] = pd.to_datetime(df["datetime"])
        df.set_index("datetime", inplace=True)
        df["sec_id"] = sec_id

        all_train.append(df)

    train_df = pd.concat(all_train)
    train_df.sort_index(inplace=True)
    return train_df


# data = xgb_prepare_data()
data = read_raw_data()
key_para = ["n_estimators", "max_depth", "learning_rate"]
default_setting = {
    'booster': 'gbtree',
    'n_estimators': 300,
    'max_depth': 3,
    "learning_rate": 0.3,
    'objective': 'reg:squarederror',
    'eval_metric': 'rmse'
}


def run_xgb_forward_test(params, raw_data, train_days, test_days, y="y1"):
    all_corrs = []
    dates = pd.to_datetime(raw_data["date"]).drop_duplicates().tolist()
    step = test_days
    date_num = len(dates)
    start_n = train_days
    setting_name = "_".join([params[para_name] for para_name in key_para])

    model = xgb.XGBRegressor(**params)
    results = []
    for end_n in range(train_days, date_num, step):
        print(setting_name, end_n, date_num)
        if date_num - end_n - 1 < step:
            break

        start_date = dates[start_n]
        end_date = dates[end_n]
        print(setting_name, start_date, end_date)

        train_data = data[start_date: end_date]
        test_data = data[end_date: dates[end_n + step]]

        train_y = train_data[y]
        # train_x = train_data.drop(
        #    ["close", "high", "low", "open", "volume", "open_interest", "wh_ind", "date", "time", "symbol", "sec_id",
        #     "y1", "y2"], axis=1)
        train_x = train_data.loc[:, all_factor_names]
        # test_y = test_data["y1"]
        # test_x = test_data.drop(
        #    ["close", "high", "low", "open", "volume", "open_interest", "wh_ind", "date", "time", "symbol", "sec_id",
        #     "y1", "y2"], axis=1)
        test_x = test_data.loc[:, all_factor_names]

        model.fit(train_x, train_y)
        pred_y = model.predict(test_x)

        result_df = test_data.loc[:, ["close", "y1", "sec_id", "symbol"]]
        result_df["pred_y"] = pred_y
        result_df["model"] = "xgb"
        result_df["para"] = setting_name
        results.append(result_df)
        corr = result_df[y].corr(result_df["pred_y"])
        print(corr)
        all_corrs.append(
            {"para": setting_name, "model": "xgb", "end_n": end_n, "start_dt": start_date, "end_date": end_date,
             "corr": corr})
        start_n += step

    df = pd.concat(results)
    corr = df["y1"].corr(df["pred_y"])
    print("final", setting_name, corr)
    all_corrs.append({"para": setting_name, "model": "xgb", "end_n": 0, "start_dt": "", "end_date": "", "corr": corr})
    # df.to_csv("D:\daily work\ml\\xgb\\result_140.csv")
    return df, all_corrs


def run_optimize():

    parameter_optimize_setting = {
        "n_estimators": {
            "start": 100,
            "end": 600,
            "step": 100
        },
        "max_depth": {
            "start": 2,
            "end": 10,
            "step": 1
        },
        "learning_rate": {
            "start": 0.1,
            "end": 0.6,
            "step": 0.1
        }
    }

    optimize_parameters = generate_parameters(parameter_optimize_setting)
    for para in optimize_parameters:
        para["booster"] = "gbtree"
        para['objective'] = 'reg:squarederror',
        para['eval_metric'] = 'rmse'

if __name__ == "__main__":

        pass