import graphviz
import xgboost as xgb
from bayes_opt import BayesianOptimization
from catboost import CatBoostClassifier
# import sys
from prettytable import PrettyTable
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.linear_model import LogisticRegression as LR
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import train_test_split as TTS
from sklearn.svm import SVC
# from sko.AFSA import AFSA
from streamlit_extras.colored_header import colored_header

from business.algorithm.utils import *


def run():

    colored_header(label="机器学习：分类", description=" ", color_name="violet-90")
    file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
    if file is None:
        table = PrettyTable(['file name', 'class', 'description'])
        table.add_row(['file_1', 'dataset', 'data file'])
        st.write(table)
    if file is not None:
        df = pd.read_csv(file)
        # check_string(df)
        colored_header(label="数据信息", description=" ", color_name="violet-70")
        nrow = st.slider("rows", 1, len(df), 5)
        df_nrow = df.head(nrow)
        st.write(df_nrow)

        colored_header(label="特征&目标", description=" ", color_name="violet-70")

        target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

        col_feature, col_target = st.columns(2)

        # features
        features = df.iloc[:, :-target_num]
        # targets
        targets = df.iloc[:, -target_num:]
        with col_feature:
            st.write(features.head())
        with col_target:
            st.write(targets.head())

        clf = CLASSIFIER(features, targets)

        colored_header(label="Choose Target", description=" ", color_name="violet-30")
        target_selected_option = st.selectbox('target', list(clf.targets)[::-1])

        clf.targets = pd.DataFrame(targets[target_selected_option])

        col_name = list(clf.targets)
        clf.targets[col_name[0]], unique_categories = pd.factorize(clf.targets[col_name[0]])

        colored_header(label="Classifier", description=" ", color_name="violet-30")

        model_path = './models/classifiers'

        template_alg = model_platform(model_path)

        colored_header(label="Training", description=" ", color_name="violet-30")

        inputs, col2 = template_alg.show()

        if inputs['model'] == 'DecisionTreeClassifier':

            with col2:
                with st.expander('Operator'):
                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])
                    elif data_process == 'cross val score':
                        cv = st.number_input('cv', 1, 20, 5)
                    elif data_process == 'leave one out':
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)
            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=inputs['max depth'],
                                                                min_samples_leaf=inputs['min samples leaf'],
                                                                min_samples_split=inputs['min samples split'])

                        clf.DecisionTreeClassifier()
                        plot_and_export_results_clf(clf, 'DTC', col_name, unique_categories)
                        if inputs['tree graph']:
                            class_names = list(clf.features)
                            dot_data = tree.export_graphviz(clf.model, out_file=None, feature_names=list(clf.features),
                                                            class_names=class_names, filled=True, rounded=True)
                            graph = graphviz.Source(dot_data)
                            graph.render('Tree graph', view=True)

                    elif inputs['auto hyperparameters']:
                        def DTC_TT(max_depth, min_samples_leaf, min_samples_split):
                            clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                    random_state=inputs['random state'],
                                                                    splitter=inputs['splitter'],
                                                                    max_depth=int(max_depth),
                                                                    min_samples_leaf=int(min_samples_leaf),
                                                                    min_samples_split=int(min_samples_split))
                            clf.DecisionTreeClassifier()
                            return clf.score


                        DTCbounds = {'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):

                            optimizer = BayesianOptimization(f=DTC_TT, pbounds=DTCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])

                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=params_best['max_depth'],
                                                                min_samples_leaf=params_best['min_samples_leaf'],
                                                                min_samples_split=params_best['min_samples_split'])
                        clf.DecisionTreeClassifier()
                        plot_and_export_results_clf(clf, 'DTC', col_name, unique_categories)
                        if inputs['tree graph']:
                            class_names = list(clf.targets)
                            dot_data = tree.export_graphviz(clf.model, out_file=None, feature_names=list(clf.features),
                                                            class_names=class_names, filled=True, rounded=True)
                            graph = graphviz.Source(dot_data)
                            graph.render('Tree graph', view=True)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=inputs['max depth'],
                                                                min_samples_leaf=inputs['min samples leaf'],
                                                                min_samples_split=inputs['min samples split'])

                        export_cross_val_results_clf(clf, cv, "DTC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                        if inputs['tree graph']:
                            class_names = list(clf.targets)
                            dot_data = tree.export_graphviz(clf.model, out_file=None, feature_names=list(clf.features),
                                                            class_names=class_names, filled=True, rounded=True)
                            graph = graphviz.Source(dot_data)
                            graph.render('Tree graph', view=True)
                    elif inputs['auto hyperparameters']:
                        def DTC_TT(max_depth, min_samples_leaf, min_samples_split):
                            clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                    random_state=inputs['random state'],
                                                                    splitter=inputs['splitter'],
                                                                    max_depth=int(max_depth),
                                                                    min_samples_leaf=int(min_samples_leaf),
                                                                    min_samples_split=int(min_samples_split))
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        DTCbounds = {'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):

                            optimizer = BayesianOptimization(f=DTC_TT, pbounds=DTCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])

                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=params_best['max_depth'],
                                                                min_samples_leaf=params_best['min_samples_leaf'],
                                                                min_samples_split=params_best['min_samples_split'])
                        export_cross_val_results_clf(clf, cv, "DTC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                        if inputs['tree graph']:
                            class_names = list(clf.targets)
                            dot_data = tree.export_graphviz(clf.model, out_file=None, feature_names=list(clf.features),
                                                            class_names=class_names, filled=True, rounded=True)
                            graph = graphviz.Source(dot_data)
                            graph.render('Tree graph', view=True)
                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=inputs['max depth'],
                                                                min_samples_leaf=inputs['min samples leaf'],
                                                                min_samples_split=inputs['min samples split'])
                        export_loo_results_clf(clf, loo, "DTC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def DTC_TT(max_depth, min_samples_leaf, min_samples_split):
                            clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                    random_state=inputs['random state'],
                                                                    splitter=inputs['splitter'],
                                                                    max_depth=int(max_depth),
                                                                    min_samples_leaf=int(min_samples_leaf),
                                                                    min_samples_split=int(min_samples_split))
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        DTCbounds = {'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):

                            optimizer = BayesianOptimization(f=DTC_TT, pbounds=DTCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])

                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = tree.DecisionTreeClassifier(criterion=inputs['criterion'],
                                                                random_state=inputs['random state'],
                                                                splitter=inputs['splitter'],
                                                                max_depth=params_best['max_depth'],
                                                                min_samples_leaf=params_best['min_samples_leaf'],
                                                                min_samples_split=params_best['min_samples_split'])
                        export_loo_results_clf(clf, loo, "DTC_loo", col_name, unique_categories)

        if inputs['model'] == 'RandomForestClassifier':

            with col2:
                with st.expander('Operator'):
                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])
                    elif data_process == 'cross val score':
                        cv = st.number_input('cv', 1, 20, 5)
                    elif data_process == 'leave one out':
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)
            if button_train:

                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=inputs['nestimators'],
                                        random_state=inputs['random state'], max_depth=inputs['max depth'],
                                        min_samples_leaf=inputs['min samples leaf'],
                                        min_samples_split=inputs['min samples split'], warm_start=inputs['warm start'])

                        clf.RandomForestClassifier()
                        plot_and_export_results_clf(clf, 'RFC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def RFC_TT(n_estimators, max_depth, min_samples_leaf, min_samples_split):
                            clf.model = RFC(criterion=inputs['criterion'], n_estimators=int(n_estimators),
                                            max_depth=int(max_depth), min_samples_leaf=int(min_samples_leaf),
                                            min_samples_split=int(min_samples_split), n_jobs=-1)
                            clf.RandomForestClassifier()
                            return clf.score


                        RFCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=RFC_TT, pbounds=RFCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=params_best['n_estimators'],
                                        random_state=inputs['random state'], max_depth=params_best['max_depth'],
                                        min_samples_leaf=params_best['min_samples_leaf'],
                                        min_samples_split=params_best['min_samples_split'],
                                        warm_start=inputs['warm start'],
                                        n_jobs=-1)

                        clf.RandomForestClassifier()
                        plot_and_export_results_clf(clf, 'RFC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=inputs['nestimators'],
                                        random_state=inputs['random state'], max_depth=inputs['max depth'],
                                        min_samples_leaf=inputs['min samples leaf'],
                                        min_samples_split=inputs['min samples split'], warm_start=inputs['warm start'])

                        export_cross_val_results_clf(clf, cv, "RFC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def RFC_TT(n_estimators, max_depth, min_samples_leaf, min_samples_split):
                            clf.model = RFC(criterion=inputs['criterion'], n_estimators=int(n_estimators),
                                            max_depth=int(max_depth), min_samples_leaf=int(min_samples_leaf),
                                            min_samples_split=int(min_samples_split), n_jobs=-1)
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        RFCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=RFC_TT, pbounds=RFCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=params_best['n_estimators'],
                                        random_state=inputs['random state'], max_depth=params_best['max_depth'],
                                        min_samples_leaf=params_best['min_samples_leaf'],
                                        min_samples_split=params_best['min_samples_split'],
                                        warm_start=inputs['warm start'],
                                        n_jobs=-1)

                        export_cross_val_results_clf(clf, cv, "RFC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=inputs['nestimators'],
                                        random_state=inputs['random state'], max_depth=inputs['max depth'],
                                        min_samples_leaf=inputs['min samples leaf'],
                                        min_samples_split=inputs['min samples split'], warm_start=inputs['warm start'])

                        export_loo_results_clf(clf, loo, "RFC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def RFC_TT(n_estimators, max_depth, min_samples_leaf, min_samples_split):
                            clf.model = RFC(criterion=inputs['criterion'], n_estimators=int(n_estimators),
                                            max_depth=int(max_depth), min_samples_leaf=int(min_samples_leaf),
                                            min_samples_split=int(min_samples_split), n_jobs=-1)
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        RFCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                     'min_samples_leaf': (1, inputs['min samples leaf']),
                                     'min_samples_split': (2, inputs['min samples split'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=RFC_TT, pbounds=RFCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['min_samples_leaf'] = int(params_best['min_samples_leaf'])
                        params_best['min_samples_split'] = int(params_best['min_samples_split'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = RFC(criterion=inputs['criterion'], n_estimators=params_best['n_estimators'],
                                        random_state=inputs['random state'], max_depth=params_best['max_depth'],
                                        min_samples_leaf=params_best['min_samples_leaf'],
                                        min_samples_split=params_best['min_samples_split'],
                                        warm_start=inputs['warm start'],
                                        n_jobs=-1)
                        export_loo_results_clf(clf, loo, "RFC_loo", col_name, unique_categories)

        if inputs['model'] == 'Logistic回归':

            with col2:
                with st.expander('Operator'):
                    preprocess = st.selectbox('data preprocess', ['StandardScaler', 'MinMaxScaler'])

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        cv = st.number_input('cv', 1, 20, 5)

                    elif data_process == 'leave one out':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = LR(penalty=inputs['penalty'], C=inputs['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       random_state=inputs['random state'], l1_ratio=inputs['l1 ratio'])
                        clf.LogisticRegreesion()
                        plot_and_export_results_clf(clf, 'LRC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def LRC_TT(C):
                            clf.model = LR(penalty=inputs['penalty'], C=C, solver=inputs['solver'],
                                           max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                           l1_ratio=inputs['l1 ratio'])
                            clf.LogisticRegreesion()
                            return clf.score


                        LRCbounds = {'C': (1, inputs['C'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=LRC_TT, pbounds=LRCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = LR(penalty=inputs['penalty'], C=params_best['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       l1_ratio=inputs['l1 ratio'])

                        clf.LogisticRegreesion()
                        plot_and_export_results_clf(clf, 'LRC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = LR(penalty=inputs['penalty'], C=inputs['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       random_state=inputs['random state'], l1_ratio=inputs['l1 ratio'])

                        export_cross_val_results_clf(clf, cv, "LRC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def LRC_TT(C):
                            clf.model = LR(penalty=inputs['penalty'], C=C, solver=inputs['solver'],
                                           max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                           l1_ratio=inputs['l1 ratio'])
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        LRCbounds = {'C': (1, inputs['C'])}
                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=LRC_TT, pbounds=LRCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = LR(penalty=inputs['penalty'], C=params_best['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       l1_ratio=inputs['l1 ratio'])

                        export_cross_val_results_clf(clf, cv, "LRC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = LR(penalty=inputs['penalty'], C=inputs['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       random_state=inputs['random state'], l1_ratio=inputs['l1 ratio'])

                        export_loo_results_clf(clf, loo, "LRC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def LRC_TT(C):
                            clf.model = LR(penalty=inputs['penalty'], C=C, solver=inputs['solver'],
                                           max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                           l1_ratio=inputs['l1 ratio'])
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        LRCbounds = {'C': (1, inputs['C'])}
                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=LRC_TT, pbounds=LRCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = LR(penalty=inputs['penalty'], C=params_best['C'], solver=inputs['solver'],
                                       max_iter=inputs['max iter'], multi_class=inputs['multi class'],
                                       l1_ratio=inputs['l1 ratio'])

                        export_loo_results_clf(clf, loo, "LRC_loo", col_name, unique_categories)

        if inputs['model'] == 'SupportVector':
            with col2:
                with st.expander('Operator'):
                    preprocess = st.selectbox('data preprocess', ['StandardScaler', 'MinMaxScaler'])

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        cv = st.number_input('cv', 1, 20, 5)

                    elif data_process == 'leave one out':
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)
            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = SVC(C=inputs['C'], kernel=inputs['kernel'], class_weight=inputs['class weight'])

                        clf.SupportVector()
                        plot_and_export_results_clf(clf, 'SVC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def SVC_TT(C):
                            clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])
                            clf.SupportVector()
                            return clf.score


                        SVCbounds = {'C': (1, inputs['C'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=SVC_TT, pbounds=SVCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])

                        clf.SupportVector()
                        plot_and_export_results_clf(clf, 'SVC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = SVC(C=inputs['C'], kernel=inputs['kernel'], class_weight=inputs['class weight'])
                        export_cross_val_results_clf(clf, cv, "SVC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def SVC_TT(C):
                            clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        SVCbounds = {'C': (1, inputs['C'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=SVC_TT, pbounds=SVCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])
                        export_cross_val_results_clf(clf, cv, "SVC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = SVC(C=inputs['C'], kernel=inputs['kernel'], class_weight=inputs['class weight'])

                        export_loo_results_clf(clf, loo, "SVC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def SVC_TT(C):
                            clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        SVCbounds = {'C': (1, inputs['C'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=SVC_TT, pbounds=SVCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = SVC(C=C, kernel=inputs['kernel'], class_weight=inputs['class weight'])
                        export_loo_results_clf(clf, loo, "SVC_loo", col_name, unique_categories)

        if inputs['model'] == 'BaggingClassifier':

            with col2:
                with st.expander('Operator'):

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':
                        cv = st.number_input('cv', 1, 20, 5)
                    elif data_process == 'leave one out':
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=inputs['nestimators'],
                                                      max_samples=inputs['max samples'],
                                                      max_features=inputs['max features'], n_jobs=-1)

                        clf.BaggingClassifier()
                        plot_and_export_results_clf(clf, 'BaggingC', col_name, unique_categories)

                    elif inputs['auto hyperparameters']:
                        def BaggingC_TT(n_estimators, max_samples, max_features):
                            clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                          n_estimators=int(n_estimators),
                                                          max_samples=int(max_samples), max_features=int(max_features),
                                                          n_jobs=-1)
                            clf.BaggingClassifier()
                            return clf.score


                        BaggingCbounds = {'n_estimators': (1, inputs['nestimators']),
                                          'max_samples': (1, inputs['max samples']),
                                          'max_features': (1, inputs['max features'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=BaggingC_TT, pbounds=BaggingCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_samples'] = int(params_best['max_samples'])
                        params_best['max_features'] = int(params_best['max_features'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=params_best['n_estimators'],
                                                      max_samples=params_best['max_samples'],
                                                      max_features=params_best['max_features'], n_jobs=-1)

                        clf.BaggingClassifier()

                        plot_and_export_results_clf(clf, 'BaggingC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=inputs['nestimators'],
                                                      max_samples=inputs['max samples'],
                                                      max_features=inputs['max features'], n_jobs=-1)

                        export_cross_val_results_clf(clf, cv, "BaggingC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def BaggingC_TT(n_estimators, max_samples, max_features):
                            clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                          n_estimators=int(n_estimators),
                                                          max_samples=int(max_samples), max_features=int(max_features),
                                                          n_jobs=-1)
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        BaggingCbounds = {'n_estimators': (1, inputs['nestimators']),
                                          'max_samples': (1, inputs['max samples']),
                                          'max_features': (1, inputs['max features'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=BaggingC_TT, pbounds=BaggingCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_samples'] = int(params_best['max_samples'])
                        params_best['max_features'] = int(params_best['max_features'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=params_best['n_estimators'],
                                                      max_samples=params_best['max_samples'],
                                                      max_features=params_best['max_features'], n_jobs=-1)

                        export_cross_val_results_clf(clf, cv, "BaggingC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=inputs['nestimators'],
                                                      max_samples=inputs['max samples'],
                                                      max_features=inputs['max features'], n_jobs=-1)

                        export_loo_results_clf(clf, loo, "BaggingC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def BaggingC_TT(n_estimators, max_samples, max_features):
                            clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                          n_estimators=int(n_estimators),
                                                          max_samples=int(max_samples), max_features=int(max_features),
                                                          n_jobs=-1)
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        BaggingCbounds = {'n_estimators': (1, inputs['nestimators']),
                                          'max_samples': (1, inputs['max samples']),
                                          'max_features': (1, inputs['max features'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=BaggingC_TT, pbounds=BaggingCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_samples'] = int(params_best['max_samples'])
                        params_best['max_features'] = int(params_best['max_features'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = BaggingClassifier(estimator=tree.DecisionTreeClassifier(),
                                                      n_estimators=params_best['n_estimators'],
                                                      max_samples=params_best['max_samples'],
                                                      max_features=params_best['max_features'], n_jobs=-1)

                        export_loo_results_clf(clf, loo, "BaggingC_loo", col_name, unique_categories)

        if inputs['model'] == 'AdaBoostClassifier':

            with col2:
                with st.expander('Operator'):

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':
                        cv = st.number_input('cv', 1, 20, 5)

                    elif data_process == 'leave one out':
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=inputs['nestimators'],
                                                       learning_rate=inputs['learning rate'],
                                                       random_state=inputs['random state'])

                        clf.AdaBoostClassifier()
                        plot_and_export_results_clf(clf, 'AdaBoostC', col_name, unique_categories)

                    elif inputs['auto hyperparameters']:
                        def AdaBoostC_TT(n_estimators, learning_rate):
                            clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                           n_estimators=int(n_estimators), learning_rate=learning_rate)
                            clf.AdaBoostClassifier()
                            return clf.score


                        AdaBoostCbounds = {'n_estimators': (1, inputs['nestimators']),
                                           'learning_rate': (1, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=AdaBoostC_TT, pbounds=AdaBoostCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=params_best['n_estimators'],
                                                       learning_rate=params_best['learning_rate'],
                                                       random_state=inputs['random state'])
                        clf.AdaBoostClassifier()
                        plot_and_export_results_clf(clf, 'AdaBoostC', col_name, unique_categories)
                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=inputs['nestimators'],
                                                       learning_rate=inputs['learning rate'],
                                                       random_state=inputs['random state'])

                        export_cross_val_results_clf(clf, cv, "AdaBoostC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def AdaBoostC_TT(n_estimators, learning_rate):
                            clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                           n_estimators=int(n_estimators), learning_rate=learning_rate)
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        AdaBoostCbounds = {'n_estimators': (1, inputs['nestimators']),
                                           'learning_rate': (1, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=AdaBoostC_TT, pbounds=AdaBoostCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=params_best['n_estimators'],
                                                       learning_rate=params_best['learning_rate'],
                                                       random_state=inputs['random state'])

                        export_cross_val_results_clf(clf, cv, "AdaBoostC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=inputs['nestimators'],
                                                       learning_rate=inputs['learning rate'],
                                                       random_state=inputs['random state'])

                        export_loo_results_clf(clf, loo, "AdaBoostC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def AdaBoostC_TT(n_estimators, learning_rate):
                            clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                           n_estimators=int(n_estimators), learning_rate=learning_rate)
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        AdaBoostCbounds = {'n_estimators': (1, inputs['nestimators']),
                                           'learning_rate': (1, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=AdaBoostC_TT, pbounds=AdaBoostCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['base estimator'] = 'decision tree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = AdaBoostClassifier(estimator=tree.DecisionTreeClassifier(),
                                                       n_estimators=params_best['n_estimators'],
                                                       learning_rate=params_best['learning_rate'],
                                                       random_state=inputs['random state'])

                        export_loo_results_clf(clf, loo, "AdaBoostC_loo", col_name, unique_categories)

        if inputs['model'] == 'GradientBoostingClassifier':

            with col2:
                with st.expander('Operator'):

                    preprocess = st.selectbox('data preprocess', ['StandardScaler', 'MinMaxScaler'])

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        cv = st.number_input('cv', 1, 20, 5)

                    elif data_process == 'leave one out':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = GradientBoostingClassifier(learning_rate=inputs['learning rate'],
                                                               n_estimators=inputs['nestimators'],
                                                               max_depth=inputs['max depth'],
                                                               max_features=inputs['max features'],
                                                               random_state=inputs['random state'])
                        clf.GradientBoostingClassifier()

                        plot_and_export_results_clf(clf, 'GBC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def GBC_TT(learning_rate, n_estimators):
                            clf.model = GradientBoostingClassifier(learning_rate=learning_rate,
                                                                   n_estimators=int(n_estimators),
                                                                   max_features=inputs['max features'])
                            clf.GradientBoostingClassifier()
                            return clf.score


                        GBCbounds = {'learning_rate': (0.001, inputs['learning rate']),
                                     'n_estimators': (1, inputs['nestimators'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=GBC_TT, pbounds=GBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_features'] = inputs['max features']
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = GradientBoostingClassifier(learning_rate=params_best['learning_rate'],
                                                               n_estimators=params_best['n_estimators'],
                                                               max_features=params_best['max_features'],
                                                               random_state=inputs['random state'])
                        clf.GradientBoostingClassifier()
                        plot_and_export_results_clf(clf, 'GBC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = GradientBoostingClassifier(learning_rate=inputs['learning rate'],
                                                               n_estimators=inputs['nestimators'],
                                                               max_depth=inputs['max depth'],
                                                               max_features=inputs['max features'],
                                                               random_state=inputs['random state'])

                        export_cross_val_results_clf(clf, cv, "GBC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def GBC_TT(learning_rate, n_estimators):
                            clf.model = GradientBoostingClassifier(learning_rate=learning_rate,
                                                                   n_estimators=int(n_estimators),
                                                                   max_features=inputs['max features'])
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        GBCbounds = {'learning_rate': (0.001, inputs['learning rate']),
                                     'n_estimators': (1, inputs['nestimators'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=GBC_TT, pbounds=GBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_features'] = inputs['max features']
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = GradientBoostingClassifier(learning_rate=params_best['learning_rate'],
                                                               n_estimators=params_best['n_estimators'],
                                                               max_features=params_best['max_features'],
                                                               random_state=inputs['random state'])

                        export_cross_val_results_clf(clf, cv, "GBC_cv", col_name, unique_categories,
                                                     inputs['random state'])


                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = GradientBoostingClassifier(learning_rate=inputs['learning rate'],
                                                               n_estimators=inputs['nestimators'],
                                                               max_depth=inputs['max depth'],
                                                               max_features=inputs['max features'],
                                                               random_state=inputs['random state'])
                        export_loo_results_clf(clf, loo, "GBC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def GBC_TT(learning_rate, n_estimators):
                            clf.model = GradientBoostingClassifier(learning_rate=learning_rate,
                                                                   n_estimators=int(n_estimators),
                                                                   max_features=inputs['max features'])
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        GBCbounds = {'learning_rate': (0.001, inputs['learning rate']),
                                     'n_estimators': (1, inputs['nestimators'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=GBC_TT, pbounds=GBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_features'] = inputs['max features']
                        st.write("\n", "\n", "best params: ", params_best)
                        clf.model = GradientBoostingClassifier(learning_rate=params_best['learning_rate'],
                                                               n_estimators=params_best['n_estimators'],
                                                               max_features=params_best['max_features'],
                                                               random_state=inputs['random state'])

                        export_loo_results_clf(clf, loo, "GBC_loo", col_name, unique_categories)

        if inputs['model'] == 'XGBClassifier':

            with col2:
                with st.expander('Operator'):

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)

                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':
                        cv = st.number_input('cv', 1, 20, 5)
                    elif data_process == 'leave one out':
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=inputs['nestimators'], max_depth=inputs['max depth'],
                                                      subsample=inputs['subsample'],
                                                      colsample_bytree=inputs['subfeature'],
                                                      learning_rate=inputs['learning rate'])
                        # clf.Ytest = clf.Ytest.reset_index(drop=True)
                        clf.XGBClassifier()
                        plot_and_export_results_clf(clf, 'XGBC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def XGBC_TT(n_estimators, max_depth, subsample, colsample_bytree, learning_rate):

                            clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                          n_estimators=int(n_estimators),
                                                          max_depth=int(max_depth), subsample=subsample,
                                                          colsample_bytree=colsample_bytree,
                                                          learning_rate=learning_rate)
                            clf.XGBClassifier()
                            return clf.score


                        XGBCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                      'subsample': (0.5, inputs['subsample']),
                                      'colsample_bytree': (0.5, inputs['subsample']),
                                      'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=XGBC_TT, pbounds=XGBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['base estimator'] = 'gbtree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=params_best['n_estimators'],
                                                      max_depth=params_best['max_depth'],
                                                      subsample=params_best['subsample'],
                                                      colsample_bytree=params_best['colsample_bytree'],
                                                      learning_rate=params_best['learning_rate'])
                        clf.XGBClassifier()
                        plot_and_export_results_clf(clf, 'XGBC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=inputs['nestimators'],
                                                      subsample=inputs['subsample'],
                                                      colsample_bytree=inputs['subfeature'],
                                                      learning_rate=inputs['learning rate'])

                        export_cross_val_results_clf(clf, cv, "XGBC_cv", col_name, unique_categories,
                                                     inputs['random state'])
                    elif inputs['auto hyperparameters']:
                        def XGBC_TT(n_estimators, max_depth, subsample, colsample_bytree, learning_rate):

                            clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                          n_estimators=int(n_estimators),
                                                          max_depth=int(max_depth), subsample=subsample,
                                                          colsample_bytree=colsample_bytree,
                                                          learning_rate=learning_rate)
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        XGBCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                      'subsample': (0.5, inputs['subsample']),
                                      'colsample_bytree': (0.5, inputs['subsample']),
                                      'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=XGBC_TT, pbounds=XGBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['base estimator'] = 'gbtree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=params_best['n_estimators'],
                                                      max_depth=params_best['max_depth'],
                                                      subsample=params_best['subsample'],
                                                      colsample_bytree=params_best['colsample_bytree'],
                                                      learning_rate=params_best['learning_rate'])

                        export_cross_val_results_clf(clf, cv, "XGBC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=inputs['nestimators'],
                                                      subsample=inputs['subsample'],
                                                      colsample_bytree=inputs['subfeature'],
                                                      learning_rate=inputs['learning rate'])

                        export_loo_results_clf(clf, loo, "XGBC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def XGBC_TT(n_estimators, max_depth, subsample, colsample_bytree, learning_rate):

                            clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                          n_estimators=int(n_estimators),
                                                          max_depth=int(max_depth), subsample=subsample,
                                                          colsample_bytree=colsample_bytree,
                                                          learning_rate=learning_rate)
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        XGBCbounds = {'n_estimators': (1, inputs['nestimators']), 'max_depth': (1, inputs['max depth']),
                                      'subsample': (0.5, inputs['subsample']),
                                      'colsample_bytree': (0.5, inputs['subsample']),
                                      'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=XGBC_TT, pbounds=XGBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['n_estimators'] = int(params_best['n_estimators'])
                        params_best['max_depth'] = int(params_best['max_depth'])
                        params_best['base estimator'] = 'gbtree'
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = xgb.XGBClassifier(booster=inputs['base estimator'],
                                                      n_estimators=params_best['n_estimators'],
                                                      max_depth=params_best['max_depth'],
                                                      subsample=params_best['subsample'],
                                                      colsample_bytree=params_best['colsample_bytree'],
                                                      learning_rate=params_best['learning_rate'])

                        export_loo_results_clf(clf, loo, "XGBC_loo", col_name, unique_categories)

        if inputs['model'] == 'CatBoostClassifier':

            with col2:
                with st.expander('Operator'):

                    preprocess = st.selectbox('data preprocess', ['StandardScaler', 'MinMaxScaler'])

                    data_process = st.selectbox('data process',
                                                ('train test split', 'cross val score', 'leave one out'),
                                                label_visibility='collapsed')
                    if data_process == 'train test split':
                        inputs['test size'] = st.slider('test size', 0.1, 0.5, 0.2)
                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        clf.Xtrain, clf.Xtest, clf.Ytrain, clf.Ytest = TTS(clf.features, clf.targets,
                                                                           test_size=inputs['test size'],
                                                                           random_state=inputs['random state'])

                    elif data_process == 'cross val score':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        cv = st.number_input('cv', 1, 20, 5)

                    elif data_process == 'leave one out':

                        if preprocess == 'StandardScaler':
                            clf.features = StandardScaler().fit_transform(clf.features)
                        if preprocess == 'MinMaxScaler':
                            clf.features = MinMaxScaler().fit_transform(clf.features)
                        loo = LeaveOneOut()

            with st.container():
                button_train = st.button('Train', use_container_width=True)

            if button_train:
                if data_process == 'train test split':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = CatBoostClassifier(iterations=inputs['niteration'],
                                                       learning_rate=inputs['learning rate'], depth=inputs['max depth'],
                                                       random_seed=inputs['random state'])

                        clf.CatBoostClassifier()
                        plot_and_export_results_clf(clf, 'CatBoostC', col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def CatBC_TT(iterations, depth, learning_rate):
                            clf.model = CatBoostClassifier(iterations=int(iterations), learning_rate=learning_rate,
                                                           depth=int(depth))
                            clf.CatBoostClassifier()
                            return clf.score


                        CatBCbounds = {'iterations': (1, inputs['niteration']), 'depth': (1, inputs['max depth']),
                                       'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=CatBC_TT, pbounds=CatBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['iterations'] = int(params_best['iterations'])
                        params_best['depth'] = int(params_best['depth'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = CatBoostClassifier(iterations=params_best['iterations'],
                                                       learning_rate=params_best['learning_rate'],
                                                       depth=params_best['depth'], random_seed=inputs['random state'])

                        clf.CatBoostClassifier()
                        plot_and_export_results_clf(clf, 'CatBoostC', col_name, unique_categories)

                elif data_process == 'cross val score':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = CatBoostClassifier(iterations=inputs['niteration'],
                                                       learning_rate=inputs['learning rate'], depth=inputs['max depth'],
                                                       random_seed=inputs['random state'])

                        export_cross_val_results_clf(clf, cv, "CatBoostC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                    elif inputs['auto hyperparameters']:
                        def CatBC_TT(iterations, depth, learning_rate):
                            clf.model = CatBoostClassifier(iterations=int(iterations), learning_rate=learning_rate,
                                                           depth=int(depth))
                            cv_score = cv_cal_clf(clf, cv, inputs['random state'])
                            return cv_score


                        CatBCbounds = {'iterations': (1, inputs['niteration']), 'depth': (1, inputs['max depth']),
                                       'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=CatBC_TT, pbounds=CatBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['iterations'] = int(params_best['iterations'])
                        params_best['depth'] = int(params_best['depth'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = CatBoostClassifier(iterations=params_best['iterations'],
                                                       learning_rate=params_best['learning_rate'],
                                                       depth=params_best['depth'], random_seed=inputs['random state'])
                        export_cross_val_results_clf(clf, cv, "CatBoostC_cv", col_name, unique_categories,
                                                     inputs['random state'])

                elif data_process == 'leave one out':
                    if inputs['auto hyperparameters'] == False:
                        clf.model = CatBoostClassifier(iterations=inputs['niteration'],
                                                       learning_rate=inputs['learning rate'], depth=inputs['max depth'],
                                                       random_seed=inputs['random state'])

                        export_loo_results_clf(clf, loo, "CatBoostC_loo", col_name, unique_categories)
                    elif inputs['auto hyperparameters']:
                        def CatBC_TT(iterations, depth, learning_rate):
                            clf.model = CatBoostClassifier(iterations=int(iterations), learning_rate=learning_rate,
                                                           depth=int(depth))
                            loo_score = loo_cal_clf(clf, loo)
                            return loo_score


                        CatBCbounds = {'iterations': (1, inputs['niteration']), 'depth': (1, inputs['max depth']),
                                       'learning_rate': (0.001, inputs['learning rate'])}

                        with st.expander('hyperparameter opt'):
                            optimizer = BayesianOptimization(f=CatBC_TT, pbounds=CatBCbounds,
                                                             random_state=inputs['random state'],
                                                             allow_duplicate_points=True)
                            optimizer.maximize(init_points=inputs['init points'], n_iter=inputs['iteration number'])
                        params_best = optimizer.max["params"]
                        score_best = optimizer.max["target"]
                        params_best['iterations'] = int(params_best['iterations'])
                        params_best['depth'] = int(params_best['depth'])
                        st.write("\n", "\n", "best params: ", params_best)

                        clf.model = CatBoostClassifier(iterations=params_best['iterations'],
                                                       learning_rate=params_best['learning_rate'],
                                                       depth=params_best['depth'], random_seed=inputs['random state'])
                        export_loo_results_clf(clf, loo, "CatBoostC_loo", col_name, unique_categories)
        st.write('---')