# sklearn是一个完整的pipeline。
'''
从数据加载、数据加工，数据预处理，特征工程，模型选择，结果分析，参数优化
'''

from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import r2_score,accuracy_score
from sklearn.ensemble import RandomForestRegressor,RandomForestClassifier
import xgboost as xgb
from scipy.stats import randint
import numpy as np

class SKlearnModel:
    def __init__(self, df=None, feature_names=None, label_name='label'):
        self.df = df
        self.feature_names = feature_names
        self.label_name = label_name

    def RF(self,X,y):
        X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
        param_distributions = {'n_estimators': randint(1, 5),
                               'max_depth': randint(5, 10)}

        model = RandomForestClassifier(random_state=0)
        #model = RandomForestRegressor(random_state=0)

        # now create a searchCV object and fit it to the data
        search = RandomizedSearchCV(estimator=model,
                                    n_iter=5,
                                    param_distributions=param_distributions,
                                    random_state=0)
        search.fit(X_train, y_train)
        print(search.best_params_)
        print('测试集R2:', search.score(X_test, y_test))
        print('训练集R2:', search.score(X_train, y_train))

        import joblib
        joblib.dump(search, 'model.job')

        m = joblib.load('model.job')
        y_pred = m.predict(X_test)

        print('accuarcy:', accuracy_score(y_test, y_pred))
        print('r2_score:', r2_score(y_test,y_pred))#这个顺序不能反。
        return search

    def XGB(self, X, y):
        X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
        clf = xgb.XGBClassifier()

        # 设定网格搜索的xgboost参数搜索范围，值搜索XGBoost的主要6个参数
        param_dist = {
            'n_estimators': range(80, 200, 4),
            'max_depth': range(2, 15, 1),
            'learning_rate': np.linspace(0.01, 2, 20),
            'subsample': np.linspace(0.7, 0.9, 20),
            'colsample_bytree': np.linspace(0.5, 0.98, 10),
            'min_child_weight': range(1, 9, 1)
        }

        # GridSearchCV参数说明，clf1设置训练的学习器
        # param_dist字典类型，放入参数搜索范围
        # scoring = 'neg_log_loss'，精度评价方式设定为“neg_log_loss“
        # n_iter=300，训练300次，数值越大，获得的参数精度越大，但是搜索时间越长
        # n_jobs = -1，使用所有的CPU进行训练，默认为1，使用1个CPU
        grid = GridSearchCV(clf, param_dist, cv=3, scoring='neg_log_loss', n_jobs=-1)

        grid.fit(X_train, y_train)
        print(grid.best_params_)
        print(grid.best_score_)


    def split_data(df, feature_names=None, label_name='label'):
        target = df[label_name]
        if feature_names:
            data = df[feature_names]
        else:
            data = df

        X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
        return X_train, X_test, y_train, y_test

    def get_model(self, **kwargs):
        return None

    def fit(self):

        X_train, X_test, y_train, y_test = self.split_data(self.df,self.feature_names,self.label_name)

        model = self.get_model()
        # 拟合模型
        model.fit(X_train, y_train)
        # 模型预测
        model.predict(X_test)

        # 获得这个模型的参数
        model.get_params()
        # 为模型进行打分
        model.score(X_test, y_test)  # 线性回归：R square； 分类问题： acc

if __name__ == '__main__':
    from logic.global_objs import D
    from engine.data.data_handler import DataHandler

    fields, names = DataHandler().get_kbar_fields_names()

    '''
    
    fields = ['Return($close,20)',
              'Return($close,10)',
              'Return($close,5)',

              ]
            
    names = fields
    '''

    all_names = names.copy()
    all_fields = fields.copy()


    all_fields.append('Ref($close,-5)/$close -1')
    all_names.append('label')

    all_fields.append('QCut($label,5)')
    all_names.append('label_c')

    df_all = D.load(['000300.SH','000905.SH','399006.SZ'], start_time='20100101', fields=all_fields, names=all_names)
    print(df_all)

    model = SKlearnModel()
    X,y = df_all[names],df_all['label_c']
    model.RF(X,y)

    #model.XGB(X,y)