from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from catboost import CatBoostClassifier
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
import pickle
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from config import config


def main():
    dataset = pd.read_csv("../data/RAC_train.csv")
    category_targets = ['additive_category']
    features = list(dataset.columns[1:-12])
    X = dataset[features]
    Y_col = 'additive_category'
    Y = dataset[Y_col]
    model_final = RandomForestClassifier(n_estimators=1000)
    X, Y = SMOTE().fit_resample(X, Y)
    model_final.fit(X, Y)
    # save model
    with open(f'../data/rac_model_{Y_col}.pkl', 'wb') as file:
        pickle.dump(model_final, file)
    dataset = pd.read_csv(config["test_data_rac"])
    predict_df = pd.DataFrame()
    predict_df['mof'] = list(range(1, len(dataset) + 1))
    features = list(dataset.columns[1:])
    X = dataset[features]
    # predict
    category_targets = ['additive_category']
    for Y_col in category_targets:
        # load model
        with open(f'../data/rac_model_{Y_col}.pkl', 'rb') as file:
            model_final = pickle.load(file)
            predict_df[Y_col] = model_final.predict(X)
    predict_df.rename(columns={'additive_category': 'additive'}, inplace=True)
    predict_df.to_csv(f"../data/rac_{Y_col}.csv", index=False)
    print("save prediction", len(predict_df))

def best_rac_additive():
    """
    f1 score: 0.613
    :return:
    """
    dataset = pd.read_csv("../data/RAC_train.csv")
    category_targets = ['additive_category']
    features = list(dataset.columns[1:-12])
    X = dataset[features]
    Y_col = 'additive_category'
    Y = dataset[Y_col]

    score_list = []
    score2_list = []
    for rs in range(10):
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=rs)
        model = RandomForestClassifier(n_estimators=1000)
        # model = KNeighborsClassifier()
        # model = CatBoostClassifier()
        # stratifiedKFolds = StratifiedKFold(n_splits=10, shuffle=True)
        # Y_true = []
        # Y_predict = []
        # for (trn_idx, val_idx) in stratifiedKFolds.split(X_train, Y_train):
        #     x_train = X_train.values[trn_idx]
        #     y_train = Y_train.values[trn_idx]
        #     x_test = X_train.values[val_idx]
        #     y_test = Y_train.values[val_idx]
        #     x_train, y_train = SMOTE().fit_resample(x_train, y_train)

        #     #model = RandomForestClassifier(n_estimators=100, random_state=0)
        #     model.fit(x_train, y_train)
        #     y_predict_cv = model.predict(x_test)
        #     Y_true.extend(y_test)
        #     Y_predict.extend(y_predict_cv)
        # print("confusion_matrix")
        # print(confusion_matrix(Y_true, Y_predict))
        # score = f1_score(Y_true, Y_predict, average='weighted')
        X_train, Y_train = SMOTE().fit_resample(X_train, Y_train)
        model.fit(X_train, Y_train)
        y_predict = model.predict(X_test)
        score = f1_score(Y_test, y_predict, average='weighted')
        print("score:", score)
        score_list.append(score)
    print("===============================")
    print("f1 score:", np.array(score_list).mean())


if __name__ == '__main__':
    main()
    # best_rac_additive()
