"""
finger temperature prediction by molecular descriptor
f1: finger print Similarity
f2: mordred feature
f3: mol2vec
f4: metal elements features
"""
import pickle
import random

from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression, mutual_info_regression
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
import os
import scipy
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import Ridge, Lasso, ElasticNet, LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score
from util.alg import Linear_SVR, RBF_SVR
from config import config

val_config = {
    "feature_num": 2446,
    "cv_fold": 5,
    'test_ratio': 0.20  # extra test data ratio
}


def get_train_data():
    dataset = pd.read_csv("../data/finger_train.csv")
    Y_col = 'temperature'
    ele_features = dataset.columns[1:-5]
    df_f4 = dataset[ele_features]
    # df_f0 = pd.read_csv("../data/finger_train_f0.csv")
    df_f1 = pd.read_csv("../data/finger_train_f1.csv")
    df_f2 = pd.read_csv("../data/finger_train_f22.csv")
    df_f3 = pd.read_csv("../data/finger_train_f3.csv")
    df_f5 = pd.read_csv("../data/train_formula_magpie_features.csv")
    df_f5 = df_f5[list(df_f5.columns[2:])]
    df_f6 = pd.read_csv("../data/finger_train_m_magpie.csv")
    df_f7 = pd.read_csv("../data/train_f6.csv")
    X = pd.concat([df_f7, df_f6, df_f5, df_f2, df_f3, df_f4], axis=1)
    imp = SimpleImputer(missing_values=np.nan, strategy='mean')
    features = list(X.columns)
    X = pd.DataFrame(imp.fit_transform(X), columns=features)
    Y = dataset[Y_col]
    return {
        "dataset": dataset,
        "Y_col": Y_col,
        "features": features,
        "X": X,
        "Y": Y,
        "imp": imp
    }


def train_model(model, X, Y, rs, test_size=0.2):
    """
    training and eval
    :param model:
    :param X:
    :param Y:
    :param rs:
    :param test_size:
    :return:
    """
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=rs)
    # preprocessing
    X_train, Y_train = transform(X_train, Y_train)
    model.fit(X_train, Y_train)

    # inverse transform
    y_predict = model.predict(X_test)
    X_test, y_predict = inverse_transform(X_test, y_predict)

    return Y_test, y_predict


def transform(X, Y):
    new_X = X
    new_Y = Y
    return new_X, new_Y


def inverse_transform(X, Y):
    inv_X = X
    inv_Y = Y
    return inv_X, inv_Y


def main():
    random.seed(0)
    np.random.seed(0)

    train_data = get_train_data()
    dataset = train_data["dataset"]
    Y_col = train_data["Y_col"]
    features = train_data["features"]
    X = train_data["X"]
    Y = train_data["Y"]

    # concat ml dataset
    imp = train_data['imp']
    X = pd.DataFrame(imp.fit_transform(X), columns=features)
    ml_dataset = pd.concat([X, Y], axis=1)
    ml_dataset.to_csv(f"../data/ml_dataset_{Y_col}.csv", index=False)
    features = list(ml_dataset.columns)[:-1]
    print("features", len(features))

    # scaler = MinMaxScaler()
    # X = scaler.fit_transform(X)
    # assert len(Y) == 560
    param = {'max_depth': 23, 'max_features': 1970, 'min_impurity_decrease': 1, 'min_samples_leaf': 4, 'min_samples_split': 6,
             'n_estimators': 120}
    alg_dict = {
        # "Lasso": Lasso(),
        # "Ridge": Ridge(),
        # "LinearRegression": LinearRegression(),
        # 'LinearSVR': Linear_SVR(C=1),
        # 'LinearSVR2': Linear_SVR(C=100),
        # 'LinearSVR3': Linear_SVR(C=10),
        # "GradientBoosting": GradientBoostingRegressor(),
        # "AdaBoost": AdaBoostRegressor(),
        # "ExtraTrees": ExtraTreesRegressor(),
        # "RandomForest": RandomForestRegressor(random_state=1, min_samples_split=5, n_estimators=200),
        "RandomForest": RandomForestRegressor(**param, random_state=1)
        # "RandomForest2": RandomForestRegressor(random_state=2),
        # "RandomForest": RandomForestRegressor(min_samples_split=9, random_state=0),
        # f"Random Forest{i}": RandomForestRegressor(random_state=0,
        #                                            min_samples_split=i,
        #                                            # min_samples_leaf=1,
        #                                            n_estimators=10)
        # for i in range(2, 10)
        # "RandomForest3": RandomForestRegressor(random_state=3),
        # "RandomForest4": RandomForestRegressor(random_state=5),
        # "RandomForest5": RandomForestRegressor(random_state=6),
        # "KNeighbors": KNeighborsRegressor(),
        # "DecisionTree": DecisionTreeRegressor(),
        # 'RbfSVR': RBF_SVR(C=1),
        # 'RbfSVR1': RBF_SVR(C=10, gamma=0.20),
        # 'RbfSVR2': RBF_SVR(C=100, gamma=0.10),
        # 'RbfSVR3': RBF_SVR(C=1000, gamma=0.05),
        # 'RbfSVR4': RBF_SVR(C=0.1, gamma=0.01),
    }
    best_model = None
    best_score = - 10 ** 10
    for alg_name in alg_dict.keys():
        model = alg_dict[alg_name]
        score_list = []
        for rs in range(10):
            Y_test, y_predict = train_model(model, X, Y, rs, test_size=0.2)
            score = r2_score(Y_test, y_predict)
            score_list.append(score)
            print(score)
        score = np.array(score_list).mean()
        print(f"{alg_name} {score}")
        if score > best_score:
            best_model = model
            best_score = score
    # save the best model
    print(f"best score {best_score} best model {best_model}")
    model_final = best_model
    model_final.fit(X, Y)
    with open(f'../data/model_{Y_col}.pkl', 'wb') as file:
        pickle.dump(model_final, file)

    # X_train = pd.DataFrame(X_train, columns=features)
    # columns_with_nulls = X_train.columns[X_train.isnull().any()]
    # print(columns_with_nulls)
    # feature_selection = SelectKBest(f_regression, k=val_config['feature_num']).fit(X_train, Y_train)
    #
    # feature_scores = feature_selection.scores_
    # print('feature_scores:', feature_scores)
    # indices = np.argsort(feature_scores)[::-1]
    # best_features = list(X_train.columns.values[indices[0:val_config['feature_num']]])
    #
    # X_train = feature_selection.transform(X_train)
    # X_test = feature_selection.transform(X_test)
    # sc = MinMaxScaler()
    #
    #
    # best_model = None
    # best_score = -10 ** 10
    # for alg_name in alg_dict.keys():
    #     model = alg_dict[alg_name]
    #     model.fit(X_train, Y_train)
    #     y_predict = model.predict(X_test)
    #     score = r2_score(Y_test, y_predict)
    #     # score = - np.mean(cross_val_score(model, X, Y, cv=5))
    #     print(f"{alg_name} {score}")
    #     if score > best_score:
    #         best_model = model
    #         best_score = score
    # # save the best model
    # print(f"best score {best_score} best model {best_model}")
    # model_final = best_model
    # X_df = pd.DataFrame(feature_selection.transform(X), columns=best_features)
    # model_final.fit(X_df, Y)

    # test data prediction
    dataset = pd.read_csv(config['test_data_finger'])
    ele_features = dataset.columns[1:-3]
    df_f4 = dataset[ele_features]
    # df_f0 = pd.read_csv("../data/finger_test_f0.csv")
    df_f1 = pd.read_csv("../data/finger_test_f1.csv")
    df_f2 = pd.read_csv("../data/finger_test_f22.csv")
    df_f3 = pd.read_csv("../data/finger_test_f3.csv")
    df_f5 = pd.read_csv("../data/test_formula_magpie_features.csv")
    df_f5 = df_f5[list(df_f5.columns[2:])]
    df_f6 = pd.read_csv("../data/finger_test_m_magpie.csv")
    df_f7 = pd.read_csv("../data/test_f6.csv")
    # df_f8 = pd.read_csv("../data/finger_test_f8.csv")

    X_t = pd.concat([df_f7, df_f6, df_f5, df_f2, df_f3, df_f4], axis=1)
    X_t = pd.DataFrame(imp.transform(X_t), columns=features)
    X_t.to_csv("../data/test_ml_dataset.csv", index=False)
    # X_t = scaler.transform(X_t)
    # X_t = feature_selection.transform(X_t)
    y_sub = model_final.predict(X_t)

    # save prediction
    predict_df = pd.DataFrame()
    predict_df['mof'] = list(range(1, len(y_sub) + 1))
    predict_df[Y_col] = y_sub
    predict_df.to_csv(f"../data/finger_{Y_col}.csv", index=False)
    print("save prediction", len(predict_df))
    return best_score

if __name__ == '__main__':
    main()
