# import sys
from prettytable import PrettyTable
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.feature_selection import mutual_info_regression as MIR
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression as LinearR
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score as CVS
# from sko.AFSA import AFSA
from streamlit_extras.colored_header import colored_header
from streamlit_option_menu import option_menu

from business.algorithm.utils import *


def run():
    with st.sidebar:
        sub_option = option_menu(None, ["特征转换", "重复值", "特征相关性",
                                        "特征与目标相关性", "独热编码", "特征重要性排序",
                                        "缺失值"])

    if sub_option == "缺失值":
        colored_header(label="缺失值", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            df = pd.read_csv(file)
            null_columns = df.columns[df.isnull().any()]
            if len(null_columns) == 0:
                st.error('No missing features!')
                st.stop()

            colored_header(label="数据信息", description=" ", color_name="violet-70")
            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            colored_header(label="method", description=" ", color_name="violet-70")
            sub_sub_option = option_menu(None, ["drop missing value", "fill missing value"],
                                         icons=['house', "list-task"],
                                         menu_icon="cast", default_index=0, orientation="horizontal")
            if sub_sub_option == "drop missing value":
                fs = FeatureSelector(features, targets)
                missing_threshold = st.slider("drop threshold", 0.001, 1.0, 0.5)
                fs.identify_missing(missing_threshold)
                fs.features_dropped_missing = fs.features.drop(columns=fs.ops['missing'])

                data = pd.concat([fs.features_dropped_missing, targets], axis=1)
                st.write(data)
                tmp_download_link = download_button(data, f'dropeddata.csv', button_text='download')
                st.markdown(tmp_download_link, unsafe_allow_html=True)
                st.write('%d features with $\gt$ %0.2f missing threshold.\n' % (
                    len(fs.ops['missing']), fs.missing_threshold))
                plot = customPlot()

                with st.expander('plot parameters'):
                    col1, col2 = st.columns([1, 3])
                    with col1:
                        options_selected = [plot.set_title_fontsize(1), plot.set_label_fontsize(2),
                                            plot.set_tick_fontsize(3), plot.set_legend_fontsize(4),
                                            plot.set_color('柱颜色', 19, 5)]
                    with col2:
                        plot.feature_missing(options_selected, fs.record_missing, fs.missing_stats)
                st.write('---')
            if sub_sub_option == "fill missing value":
                fs = FeatureSelector(features, targets)
                missing_feature_list = fs.features.columns[fs.features.isnull().any()].tolist()
                with st.container():
                    fill_method = st.selectbox('fill method', ('constant', 'random forest'))

                if fill_method == 'constant':

                    missing_feature = st.multiselect('feature of drop value', missing_feature_list,
                                                     missing_feature_list[-1])

                    option_filled = st.selectbox('mean', ('mean', 'constant', 'median', 'mode'))
                    if option_filled == 'mean':
                        # fs.features[missing_feature] = fs.features[missing_feature].fillna(fs.features[missing_feature].mean())
                        imp = SimpleImputer(missing_values=np.nan, strategy='mean')

                        fs.features[missing_feature] = imp.fit_transform(fs.features[missing_feature])
                    elif option_filled == 'constant':
                        # fs.features[missing_feature] = fs.features[missing_feature].fillna(0)
                        fill_value = st.number_input('value')
                        imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=fill_value)

                        fs.features[missing_feature] = imp.fit_transform(fs.features[missing_feature])
                    elif option_filled == 'median':
                        # fs.features[missing_feature] = fs.features[missing_feature].fillna(0)
                        imp = SimpleImputer(missing_values=np.nan, strategy='median')

                        fs.features[missing_feature] = imp.fit_transform(fs.features[missing_feature])
                    elif option_filled == 'mode':

                        imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')

                        fs.features[missing_feature] = imp.fit_transform(fs.features[missing_feature])

                    data = pd.concat([fs.features, targets], axis=1)
                else:
                    with st.expander('hyper parameters'):
                        num_estimators = st.number_input('评估器数量', 1, 10000, 100)
                        criterion = st.selectbox('criterion',
                                                 ('squared_error', 'absolute_error', 'friedman_mse', 'poisson'))
                        max_depth = st.number_input('max depth', 1, 1000, 5)
                        min_samples_leaf = st.number_input('min samples leaf', 1, 1000, 5)
                        min_samples_split = st.number_input('min samples split', 1, 1000, 5)
                        random_state = st.checkbox('random state 1024', True)

                    option_filled = st.selectbox('mean', ('mean', 'constant', 'median', 'mode'))
                    if option_filled == 'mean':
                        feature_missing_reg = fs.features.copy()
                        null_columns = feature_missing_reg.columns[feature_missing_reg.isnull().any()]

                        null_counts = feature_missing_reg.isnull().sum()[null_columns].sort_values()
                        null_columns_ordered = null_counts.index.tolist()

                        for i in null_columns_ordered:
                            df = feature_missing_reg
                            fillc = df[i]

                            df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(targets)], axis=1)

                            df_temp_fill = SimpleImputer(missing_values=np.nan, strategy='mean').fit_transform(df)

                            YTrain = fillc[fillc.notnull()]
                            YTest = fillc[fillc.isnull()]
                            XTrain = df_temp_fill[YTrain.index, :]
                            XTest = df_temp_fill[YTest.index, :]

                            rfc = RFR(n_estimators=num_estimators, criterion=criterion, max_depth=max_depth,
                                      min_samples_leaf=min_samples_leaf,
                                      min_samples_split=min_samples_split, random_state=random_state)

                            rfc = rfc.fit(XTrain, YTrain)
                            YPredict = rfc.predict(XTest)

                            feature_missing_reg.loc[feature_missing_reg[i].isnull(), i] = YPredict

                    elif option_filled == 'constant':

                        fill_value = st.number_input('value')
                        feature_missing_reg = fs.features.copy()

                        null_columns = feature_missing_reg.columns[feature_missing_reg.isnull().any()]

                        null_counts = feature_missing_reg.isnull().sum()[null_columns].sort_values()
                        null_columns_ordered = null_counts.index.tolist()

                        for i in null_columns_ordered:
                            df = feature_missing_reg
                            fillc = df[i]

                            df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(targets)], axis=1)

                            df_temp_fill = SimpleImputer(missing_values=np.nan, strategy='constant',
                                                         fill_value=fill_value).fit_transform(df)

                            YTrain = fillc[fillc.notnull()]
                            YTest = fillc[fillc.isnull()]
                            XTrain = df_temp_fill[YTrain.index, :]
                            XTest = df_temp_fill[YTest.index, :]

                            rfc = RFR(n_estimators=num_estimators, criterion=criterion, max_depth=max_depth,
                                      min_samples_leaf=min_samples_leaf,
                                      min_samples_split=min_samples_split, random_state=random_state)

                            rfc = rfc.fit(XTrain, YTrain)
                            YPredict = rfc.predict(XTest)

                            feature_missing_reg.loc[feature_missing_reg[i].isnull(), i] = YPredict

                    elif option_filled == 'median':
                        feature_missing_reg = fs.features.copy()

                        null_columns = feature_missing_reg.columns[feature_missing_reg.isnull().any()]

                        null_counts = feature_missing_reg.isnull().sum()[null_columns].sort_values()
                        null_columns_ordered = null_counts.index.tolist()

                        for i in null_columns_ordered:
                            df = feature_missing_reg
                            fillc = df[i]

                            df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(targets)], axis=1)

                            df_temp_fill = SimpleImputer(missing_values=np.nan, strategy='median').fit_transform(df)

                            YTrain = fillc[fillc.notnull()]
                            YTest = fillc[fillc.isnull()]
                            XTrain = df_temp_fill[YTrain.index, :]
                            XTest = df_temp_fill[YTest.index, :]

                            rfc = RFR(n_estimators=num_estimators, criterion=criterion, max_depth=max_depth,
                                      min_samples_leaf=min_samples_leaf,
                                      min_samples_split=min_samples_split, random_state=random_state)

                            rfc = rfc.fit(XTrain, YTrain)
                            YPredict = rfc.predict(XTest)

                            feature_missing_reg.loc[feature_missing_reg[i].isnull(), i] = YPredict

                    elif option_filled == 'mode':

                        feature_missing_reg = fs.features.copy()

                        null_columns = feature_missing_reg.columns[feature_missing_reg.isnull().any()]

                        null_counts = feature_missing_reg.isnull().sum()[null_columns].sort_values()
                        null_columns_ordered = null_counts.index.tolist()

                        for i in null_columns_ordered:
                            df = feature_missing_reg
                            fillc = df[i]

                            df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(targets)], axis=1)

                            df_temp_fill = SimpleImputer(missing_values=np.nan, strategy='most_frequent').fit_transform(
                                df)

                            YTrain = fillc[fillc.notnull()]
                            YTest = fillc[fillc.isnull()]
                            XTrain = df_temp_fill[YTrain.index, :]
                            XTest = df_temp_fill[YTest.index, :]

                            rfc = RFR(n_estimators=num_estimators, criterion=criterion, max_depth=max_depth,
                                      min_samples_leaf=min_samples_leaf,
                                      min_samples_split=min_samples_split, random_state=random_state)
                            rfc = rfc.fit(XTrain, YTrain)
                            YPredict = rfc.predict(XTest)

                            feature_missing_reg.loc[feature_missing_reg[i].isnull(), i] = YPredict

                    data = pd.concat([feature_missing_reg, targets], axis=1)

                st.write(data)

                tmp_download_link = download_button(data, f'fillmissing.csv', button_text='download')
                st.markdown(tmp_download_link, unsafe_allow_html=True)
                st.write('---')

    elif sub_option == "重复值":
        colored_header(label="重复值", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            df = pd.read_csv(file)
            check_string_NaN(df)

            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)

            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            colored_header(label="Feature of drop duplicate value", description=" ", color_name="violet-70")
            fs = FeatureSelector(features, targets)
            plot = customPlot()

            col1, col2 = st.columns([1, 3])
            with col1:
                fs.identify_nunique()
                option_counts = st.slider('number of drop duplicate value', 0, int(fs.unique_stats.max()) - 1, 1)
                st.write(fs.unique_stats)
            with col2:
                fs.identify_nunique(option_counts)
                fs.features_dropped_single = fs.features.drop(columns=fs.ops['single_unique'])
                data = pd.concat([fs.features_dropped_single, targets], axis=1)
                st.write(fs.features_dropped_single)

                tmp_download_link = download_button(data, f'dropduplicate.csv', button_text='download')
                st.markdown(tmp_download_link, unsafe_allow_html=True)
                st.write('%d features $\leq$  %d unique value.\n' % (len(fs.ops['single_unique']), option_counts))

            with st.expander('plot parameters'):
                col1, col2 = st.columns([1, 3])
                with col1:
                    options_selected = [plot.set_title_fontsize(6), plot.set_label_fontsize(7),
                                        plot.set_tick_fontsize(8), plot.set_legend_fontsize(9),
                                        plot.set_color('柱颜色', 19, 10)]
                with col2:
                    plot.feature_nunique(options_selected, fs.record_single_unique, fs.unique_stats)

            st.write('---')

    elif sub_option == "特征转换":
        colored_header(label="特征转换", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['成分'])
            table.add_row(['Ti50Cu42.5Ni7.5'])
            st.write(table)
        if file is not None:
            colored_header(label="数据信息", description=" ", color_name="violet-70")

            df = pd.read_csv(file)
            df_nrow = df.head()
            st.write(df_nrow)
            option = st.selectbox('option', ['合金', '无机'])
            button = st.button('转换', use_container_width=True)
            if button:
                df = feature_transform(df, option)
                st.write(df.head())
                tmp_download_link = download_button(df, f'trans_data.csv', button_text='download')
                st.markdown(tmp_download_link, unsafe_allow_html=True)

    elif sub_option == "特征相关性":
        colored_header(label="特征相关性", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            df = pd.read_csv(file)
            check_string_NaN(df)
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            colored_header(label="Drop collinear feature", description=" ", color_name="violet-30")
            fs = FeatureSelector(features, targets)
            plot = customPlot()

            target_selected_option = st.selectbox('target', list(fs.targets))
            target_selected = fs.targets[target_selected_option]

            col1, col2 = st.columns([1, 3])
            with col1:
                corr_method = st.selectbox("correlation analysis method", ["pearson", "spearman", "kendall"])
                correlation_threshold = st.slider("correlation threshold", 0.001, 1.0, 0.9)
                corr_matrix = pd.concat([fs.features, target_selected], axis=1).corr(corr_method)
                fs.identify_collinear(corr_matrix, correlation_threshold)
                fs.judge_drop_f_t_after_f_f([target_selected_option], corr_matrix)

                is_mask = st.selectbox('mask', ('Yes', 'No'))
                with st.expander('plot parameters'):
                    options_selected = [plot.set_tick_fontsize(21), plot.set_tick_fontsize(22)]
                with st.expander('collinear feature'):
                    st.write(fs.record_collinear)
            with col2:
                fs.features_dropped_collinear = fs.features.drop(columns=fs.ops['collinear'])
                assert fs.features_dropped_collinear.size != 0, 'zero feature !'
                corr_matrix_drop_collinear = fs.features_dropped_collinear.corr(corr_method)
                plot.corr_cofficient(options_selected, is_mask, corr_matrix_drop_collinear)
                with st.expander('dropped data'):
                    data = pd.concat([fs.features_dropped_collinear, targets], axis=1)
                    st.write(data)
                    tmp_download_link = download_button(data, f'droppedcollinear.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

    elif sub_option == "特征与目标相关性":
        colored_header(label="特征与目标相关性", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            df = pd.read_csv(file)
            # 检测缺失值
            check_string_NaN(df)
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)

            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            colored_header(label="Drop low correlation feature", description=" ", color_name="violet-70")
            fs = FeatureSelector(features, targets)
            plot = customPlot()
            target_selected_option = st.selectbox('特征', list(fs.targets))
            col1, col2 = st.columns([1, 3])

            with col1:
                corr_method = st.selectbox("correlation analysis method", ["pearson", "spearman", "kendall", "MIR"],
                                           key=15)
                if corr_method != "MIR":
                    option_dropped_threshold = st.slider('correlation threshold', 0.0, 1.0, 0.0)
                if corr_method == 'MIR':
                    options_seed = st.checkbox('random state 1024', True)
                with st.expander('plot parameters'):
                    options_selected = [plot.set_title_fontsize(11), plot.set_label_fontsize(12),
                                        plot.set_tick_fontsize(13), plot.set_legend_fontsize(14),
                                        plot.set_color('柱颜色', 19, 16)]

            with col2:
                target_selected = fs.targets[target_selected_option]
                if corr_method != "MIR":
                    corr_matrix = pd.concat([fs.features, target_selected], axis=1).corr(corr_method).abs()

                    fs.judge_drop_f_t([target_selected_option], corr_matrix, option_dropped_threshold)

                    fs.features_dropped_f_t = fs.features.drop(columns=fs.ops['f_t_low_corr'])
                    corr_f_t = pd.concat([fs.features_dropped_f_t, target_selected], axis=1).corr(corr_method)[
                                   target_selected_option][:-1]

                    plot.corr_feature_target(options_selected, corr_f_t)
                    with st.expander('dropped data'):
                        data = pd.concat([fs.features_dropped_f_t, targets], axis=1)
                        st.write(data)
                        tmp_download_link = download_button(data, f'droplowcorr.csv', button_text='download')
                        st.markdown(tmp_download_link, unsafe_allow_html=True)
                else:
                    if options_seed:
                        corr_mir = MIR(fs.features, target_selected, random_state=1024)
                    else:
                        corr_mir = MIR(fs.features, target_selected)
                    corr_mir = pd.DataFrame(corr_mir).set_index(pd.Index(list(fs.features.columns)))
                    corr_mir.rename(columns={0: 'mutual info'}, inplace=True)
                    plot.corr_feature_target_mir(options_selected, corr_mir)
            st.write('---')

    elif sub_option == "独热编码":
        colored_header(label="独热编码", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            df = pd.read_csv(file)
            check_string_NaN(df)
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)

            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            fs = FeatureSelector(features, targets)
            plot = customPlot()
            str_col_list = fs.features.select_dtypes(include=['object']).columns.tolist()
            fs.one_hot_feature_encoder(True)
            data = pd.concat([fs.features_plus_oneHot, targets], axis=1)
            # delete origin string columns
            data = data.drop(str_col_list, axis=1)
            st.write(data)
            tmp_download_link = download_button(data, f'one-hotcoding.csv', button_text='download')
            st.markdown(tmp_download_link, unsafe_allow_html=True)
            st.write('---')

    elif sub_option == "特征重要性排序":
        colored_header(label="特征重要性排序", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed")
        if file is None:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            st.write(table)
        if file is not None:
            df = pd.read_csv(file)
            # 检测缺失值
            check_string_NaN(df)
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            fs = FeatureSelector(features, targets)

            colored_header(label="target", description=" ", color_name="violet-70")

            target_selected_name = st.selectbox('target', list(fs.targets)[::-1])

            fs.targets = targets[target_selected_name]

            colored_header(label="Selector", description=" ", color_name="violet-70")

            model_path = './models/feature importance'

            template_alg = model_platform(model_path=model_path)

            colored_header(label="Training", description=" ", color_name="violet-70")

            inputs, col2 = template_alg.show()

            if inputs['model'] == 'LinearRegressor':

                fs.model = LinearR()

                with col2:
                    option_cumulative_importance = st.slider('cumulative importance threshold', 0.0, 1.0, 0.95)
                    Embedded_method = st.checkbox('Embedded method', False)
                    if Embedded_method:
                        cv = st.number_input('cv', 1, 20, 5)
                with st.container():
                    button_train = st.button('train', use_container_width=True)
                if button_train:
                    fs.LinearRegressor()
                    fs.identify_zero_low_importance(option_cumulative_importance)
                    fs.feature_importance_select_show()
                    if Embedded_method:
                        threshold = fs.cumulative_importance

                        feature_importances = fs.feature_importances.set_index('特征', drop=False)

                        features = []
                        scores = []
                        cumuImportance = []
                        for i in range(1, len(fs.features.columns) + 1):
                            features.append(feature_importances.iloc[:i, 0].values.tolist())
                            X_selected = fs.features[features[-1]]
                            score = CVS(fs.model, X_selected, fs.targets, cv=cv, scoring='r2').mean()

                            cumuImportance.append(feature_importances.loc[features[-1][-1], 'cumulative_importance'])
                            scores.append(score)
                        cumu_importance = np.array(cumuImportance)
                        scores = np.array(scores)
                        with plt.style.context(['nature', 'no-latex']):
                            fig, ax = plt.subplots()
                            ax = plt.plot(cumu_importance, scores, 'o-')
                            plt.xlabel("cumulative feature importance")
                            plt.ylabel("r2")
                            st.pyplot(fig)
            elif inputs['model'] == 'LassoRegressor':

                fs.model = Lasso(random_state=inputs['random state'])

                with col2:
                    option_cumulative_importance = st.slider('cumulative importance threshold', 0.0, 1.0, 0.95)
                    Embedded_method = st.checkbox('Embedded method', False)
                    if Embedded_method:
                        cv = st.number_input('cv', 1, 20, 5)

                with st.container():
                    button_train = st.button('train', use_container_width=True)
                if button_train:

                    fs.LassoRegressor()

                    fs.identify_zero_low_importance(option_cumulative_importance)
                    fs.feature_importance_select_show()
                    if Embedded_method:

                        threshold = fs.cumulative_importance

                        feature_importances = fs.feature_importances.set_index('特征', drop=False)

                        features = []
                        scores = []
                        cumuImportance = []
                        for i in range(1, len(fs.features.columns) + 1):
                            features.append(feature_importances.iloc[:i, 0].values.tolist())
                            X_selected = fs.features[features[-1]]
                            score = CVS(fs.model, X_selected, fs.targets, cv=cv, scoring='r2').mean()

                            cumuImportance.append(feature_importances.loc[features[-1][-1], 'cumulative_importance'])
                            scores.append(score)
                        cumu_importance = np.array(cumuImportance)
                        scores = np.array(scores)
                        with plt.style.context(['nature', 'no-latex']):
                            fig, ax = plt.subplots()
                            ax = plt.plot(cumu_importance, scores, 'o-')
                            plt.xlabel("cumulative feature importance")
                            plt.ylabel("r2")
                            st.pyplot(fig)

            elif inputs['model'] == 'RidgeRegressor':

                fs.model = Ridge(random_state=inputs['random state'])

                with col2:
                    option_cumulative_importance = st.slider('cumulative importance threshold', 0.0, 1.0, 0.95)
                    Embedded_method = st.checkbox('Embedded method', False)
                    if Embedded_method:
                        cv = st.number_input('cv', 1, 20, 5)
                with st.container():
                    button_train = st.button('train', use_container_width=True)
                if button_train:
                    fs.RidgeRegressor()
                    fs.identify_zero_low_importance(option_cumulative_importance)
                    fs.feature_importance_select_show()
                    if Embedded_method:

                        threshold = fs.cumulative_importance

                        feature_importances = fs.feature_importances.set_index('特征', drop=False)

                        features = []
                        scores = []
                        cumuImportance = []
                        for i in range(1, len(fs.features.columns) + 1):
                            features.append(feature_importances.iloc[:i, 0].values.tolist())
                            X_selected = fs.features[features[-1]]
                            score = CVS(fs.model, X_selected, fs.targets, cv=cv, scoring='r2').mean()

                            cumuImportance.append(feature_importances.loc[features[-1][-1], 'cumulative_importance'])
                            scores.append(score)
                        cumu_importance = np.array(cumuImportance)
                        scores = np.array(scores)
                        with plt.style.context(['nature', 'no-latex']):
                            fig, ax = plt.subplots()
                            ax = plt.plot(cumu_importance, scores, 'o-')
                            plt.xlabel("cumulative feature importance")
                            plt.ylabel("r2")
                            st.pyplot(fig)
            elif inputs['model'] == 'LassoRegressor':

                fs.model = Lasso(random_state=inputs['random state'])

                with col2:
                    option_cumulative_importance = st.slider('cumulative importance threshold', 0.0, 1.0, 0.95)
                    Embedded_method = st.checkbox('Embedded method', False)
                    if Embedded_method:
                        cv = st.number_input('cv', 1, 20, 5)
                with st.container():
                    button_train = st.button('train', use_container_width=True)
                if button_train:

                    fs.LassoRegressor()

                    fs.identify_zero_low_importance(option_cumulative_importance)
                    fs.feature_importance_select_show()
                    if Embedded_method:

                        threshold = fs.cumulative_importance

                        feature_importances = fs.feature_importances.set_index('特征', drop=False)

                        features = []
                        scores = []
                        cumuImportance = []
                        for i in range(1, len(fs.features.columns) + 1):
                            features.append(feature_importances.iloc[:i, 0].values.tolist())
                            X_selected = fs.features[features[-1]]
                            score = CVS(fs.model, X_selected, fs.targets, cv=cv, scoring='r2').mean()

                            cumuImportance.append(feature_importances.loc[features[-1][-1], 'cumulative_importance'])
                            scores.append(score)
                        cumu_importance = np.array(cumuImportance)
                        scores = np.array(scores)
                        with plt.style.context(['nature', 'no-latex']):
                            fig, ax = plt.subplots()
                            ax = plt.plot(cumu_importance, scores, 'o-')
                            plt.xlabel("cumulative feature importance")
                            plt.ylabel("r2")
                            st.pyplot(fig)

            elif inputs['model'] == 'RandomForestRegressor':

                fs.model = RFR(criterion=inputs['criterion'], n_estimators=inputs['nestimators'],
                               random_state=inputs['random state'], max_depth=inputs['max depth'],
                               min_samples_leaf=inputs['min samples leaf'],
                               min_samples_split=inputs['min samples split'], warm_start=inputs['warm start'],
                               n_jobs=inputs['njobs'])
                with col2:
                    option_cumulative_importance = st.slider('cumulative importance threshold', 0.5, 1.0, 0.95)
                    Embedded_method = st.checkbox('Embedded method', False)
                    if Embedded_method:
                        cv = st.number_input('cv', 1, 20, 5)

                with st.container():
                    button_train = st.button('train', use_container_width=True)
                if button_train:

                    fs.RandomForestRegressor()

                    fs.identify_zero_low_importance(option_cumulative_importance)
                    fs.feature_importance_select_show()

                    if Embedded_method:

                        threshold = fs.cumulative_importance

                        feature_importances = fs.feature_importances.set_index('特征', drop=False)

                        features = []
                        scores = []
                        cumuImportance = []
                        for i in range(1, len(fs.features.columns) + 1):
                            features.append(feature_importances.iloc[:i, 0].values.tolist())
                            X_selected = fs.features[features[-1]]
                            score = CVS(fs.model, X_selected, fs.targets, cv=cv, scoring='r2').mean()

                            cumuImportance.append(feature_importances.loc[features[-1][-1], 'cumulative_importance'])
                            scores.append(score)
                        cumu_importance = np.array(cumuImportance)
                        scores = np.array(scores)
                        with plt.style.context(['nature', 'no-latex']):
                            fig, ax = plt.subplots()
                            ax = plt.plot(cumu_importance, scores, 'o-')
                            plt.xlabel("cumulative feature importance")
                            plt.ylabel("r2")
                            st.pyplot(fig)

            st.write('---')