import os.path

from lightgbm.sklearn import LGBMClassifier
import numpy as np
import pandas as pd
from hyperopt import fmin, hp, tpe, Trials
import torch.nn as nn
from train_gan import load_checkpoint_attr, load_stock_data
from src.EnvironmentVariables import BASE_PATH
from tqdm import tqdm
import torch
from sklearn.svm import SVC
import optuna
from sklearn.preprocessing import MinMaxScaler


def split_index_data():
    def get_single_data(origin: pd.DataFrame, name: str):
        new_df = origin.loc[:, origin.columns.get_level_values(1) == name]
        new_df.columns = new_df.columns.droplevel(level=1)
        return new_df

    origin_dfs = pd.read_excel(os.path.join(BASE_PATH, 'data', '所有指数行情序列.xlsx'), header=[0, 1], parse_dates=[0],
                               index_col=[0])
    origin_dfs.to_csv(os.path.join(BASE_PATH, 'data', '所有指数行情序列.csv'))
    # date_df = pd.read_csv('OpenDate.csv', parse_dates=[0], index_col=[0])
    file_name = '所有指数行情序列.csv'
    index_name = ['中证500', '上证指数']
    label_name = '涨跌幅'
    origin_dfs = pd.read_csv(os.path.join(BASE_PATH, 'data', file_name), header=[0, 1], parse_dates=[0],
                             index_col=[0])
    origin_dfs.index = pd.to_datetime(origin_dfs.index)
    origin_dfs = origin_dfs[origin_dfs.index > pd.to_datetime("1999-2-1")]
    for tar_index in index_name:
        res = get_single_data(origin_dfs, tar_index)
        res = res.dropna()

        label = res.filter(like=label_name)
        label.columns = ['label']
        label['label'][label['label'] >= 0.5] = 1
        label['label'][label['label'] < 0.5] = 0
        res.index.name = 'date'
        res.to_csv(os.path.join(BASE_PATH, 'data/preProcessedIndexData', tar_index + '.csv'))
        label.to_csv(os.path.join(BASE_PATH, 'data/preProcessedIndexData', tar_index + '_label.csv'))


def clsBySVM():
    X_train, y_train, X_test, y_test = build_data()

    def objective(_trial):
        # 定义SVM分类器
        svm = SVC(C=_trial.suggest_float('C', 1e-5, 1e3),
                  kernel='rbf',  # _trial.suggest_categorical('kernel', ['linear', 'rbf', 'poly', 'sigmoid']),
                  gamma=_trial.suggest_float('gamma', 1e-5, 1e2),
                  degree=_trial.suggest_int('degree', 1, 5))
        svm.fit(X_train, y_train)
        score = 1 - svm.score(X_test, y_test)
        return score

    study = optuna.create_study(direction='maximize')
    study.optimize(objective, n_trials=100)
    print("Best trial:")
    trial = study.best_trial
    print("  Score: {}".format(trial.value))
    print("  Params: ")
    for key, value in trial.params.items():
        print("    {}: {}".format(key, value))


def clsByGbm():
    def train_lgbm(x, y, *args):
        # 定义超参数搜索空间
        space = {
            'num_leaves': hp.choice('num_leaves', range(5, 30)),
            'max_depth': hp.choice('max_depth', range(2, 5)),
            'learning_rate': hp.loguniform('learning_rate', -5, 0),
            'n_estimators': hp.choice('n_estimators', range(5, 20)),
            'subsample': hp.uniform('subsample', 0.6, 1),
            'colsample_bytree': hp.uniform('colsample_bytree', 0.6, 1)
        }

        # 定义评价函数
        def objective(params):
            model = LGBMClassifier(**params)
            model.fit(x_train, y_train)
            return model.score(x_val, y_val)

        x_train = x
        y_train = y
        x_val = args[0]
        y_val = args[1]

        # 定义调参过程
        trials = Trials()
        best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=100, trials=trials)

        # 训练模型
        params = {
            'num_leaves': best['num_leaves'],
            'max_depth': best['max_depth'],
            'learning_rate': best['learning_rate'],
            'n_estimators': best['n_estimators'],
            'subsample': best['subsample'],
            'colsample_bytree': best['colsample_bytree']
        }
        model = LGBMClassifier(**params)
        model.fit(x_train, y_train)

        return model

    train_lgbm(*build_data())


def build_data():
    discriminator = load_checkpoint_attr('discriminator', "GANTrainer_checkpoint_20.pt")
    data_array = load_stock_data(os.path.join(BASE_PATH, 'data/preProcessedIndexData'), ['上证指数'],
                                 is_index=True,
                                 return_x=False,
                                 shuffle=False,
                                 num_historical_days=50)
    label = pd.read_csv(os.path.join(BASE_PATH, 'data/preProcessedIndexData', '上证指数_label.csv'), parse_dates=[0],
                        index_col=[0])
    label = label.shift(-51, axis=0)
    label.dropna(inplace=True)
    feature_model = nn.Sequential(
        discriminator.input,
        discriminator.feature_model
    )
    feature_model.cuda()
    feature_model.eval()
    input_x = []
    with torch.no_grad():
        for X, Z in tqdm(data_array):
            X = feature_model(X)
            input_x.append(X.cpu().numpy())
    input_x = np.concatenate(input_x, axis=0)
    input_x = np.squeeze(input_x)
    X_train = input_x[:4500]
    scaler = MinMaxScaler()
    X_train = scaler.fit_transform(X_train)
    y_train = np.array(label[:4500]).ravel()
    X_test = input_x[4500:]
    X_test = scaler.transform(X_test)
    y_test = np.array(label[4500:]).ravel()
    return X_train, y_train, X_test, y_test


if __name__ == '__main__':
    # split_index_data()
    # clsBySVM()
    clsByGbm()
