import Bgolearn.BGOsampling as BGOS
# import sys
from prettytable import PrettyTable
# from sko.AFSA import AFSA
from streamlit_extras.colored_header import colored_header
from streamlit_option_menu import option_menu

from business.algorithm.mobo import Mobo4mat
from business.algorithm.utils import *


def run():
    with st.sidebar:
        sub_option = option_menu(None, ["单目标主动学习", "多目标主动学习"])

    if sub_option == "单目标主动学习":

        colored_header(label="单目标主动学习", description=" ", color_name="violet-90")

        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed",
                                accept_multiple_files=True)
        if len(file) != 2:
            table = PrettyTable(['file name', 'class', 'descriptor'])
            table.add_row(['file_1', 'dataset', 'data file'])
            table.add_row(['file_2', 'visual data', 'design space'])
            st.write(table)
        if len(file) == 2:

            colored_header(label="数据信息", description=" ", color_name="violet-70")

            df = pd.read_csv(file[0])
            df_vs = pd.read_csv(file[1])
            check_string_NaN(df)

            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)

            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            sp = SAMPLING(features, targets)

            colored_header(label="target", description=" ", color_name="violet-70")

            target_selected_option = st.selectbox('target', list(sp.targets))

            sp.targets = sp.targets[target_selected_option]

            colored_header(label="Sampling", description=" ", color_name="violet-70")

            model_path = './models/active learning'

            template_alg = model_platform(model_path)

            inputs, col2 = template_alg.show()

            if inputs['model'] == 'BayeSampling':

                with col2:

                    sp.vsfeatures = df_vs
                    st.info('You have upoaded the visual sample point file.')
                    feature_name = sp.features.columns.tolist()
                    if inputs['sample criterion'] == 'Augmented Expected Improvement':
                        with st.expander('EI HyperParamters'):
                            alpha = st.slider('alpha', 0.0, 3.0, 1.0)
                            tao = st.slider('tao', 0.0, 1.0, 0.0)
                    if inputs['sample criterion'] == 'Expected Quantile Improvement':
                        with st.expander('EQI HyperParamters'):
                            beta = st.slider('beta', 0.2, 0.8, 0.5)
                            tao = st.slider('tao_new', 0.0, 1.0, 0.0)
                    if inputs['sample criterion'] == 'Upper confidence bound':
                        with st.expander('UCB HyperParamters'):
                            alpha = st.slider('alpha', 0.0, 3.0, 1.0)
                    if inputs['sample criterion'] == 'Probability of Improvement':
                        with st.expander('PoI HyperParamters'):
                            tao = st.slider('tao', 0.0, 0.3, 0.0)
                    if inputs['sample criterion'] == 'Predictive Entropy Search':
                        with st.expander('PES HyperParamters'):
                            sam_num = st.number_input('sample number', 100, 1000, 500)
                    if inputs['sample criterion'] == 'Knowledge Gradient':
                        with st.expander('Knowldge_G Hyperparameters'):
                            MC_num = st.number_input('MC number', 50, 300, 50)
                with st.expander('visual samples'):
                    st.write(sp.vsfeatures)
                    tmp_download_link = download_button(sp.vsfeatures, f'visual samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                Bgolearn = BGOS.Bgolearn()

                colored_header(label="Optimize", description=" ", color_name="violet-70")
                with st.container():
                    button_train = st.button('Train', use_container_width=True)
                if button_train:
                    if inputs['noise std'] != 'heteroheneous':
                        Mymodel = Bgolearn.fit(data_matrix=sp.features, Measured_response=sp.targets,
                                               virtual_samples=sp.vsfeatures,
                                               opt_num=inputs['opt num'], min_search=inputs['min search'],
                                               noise_std=float(inputs['noise std']))
                    else:
                        if 'noise' in df.columns:
                            noise_std = df['noise'].values
                            Mymodel = Bgolearn.fit(data_matrix=sp.features, Measured_response=sp.targets,
                                                   virtual_samples=sp.vsfeatures,
                                                   opt_num=inputs['opt num'], min_search=inputs['min search'],
                                                   noise_std=noise_std)
                        else:
                            st.write("Column 'noise' is not exist")

                    if inputs['sample criterion'] == 'Expected Improvement algorith':
                        res = Mymodel.EI()

                    elif inputs['sample criterion'] == 'Expected improvement with "plugin"':
                        res = Mymodel.EI_plugin()

                    elif inputs['sample criterion'] == 'Augmented Expected Improvement':
                        # with st.expander('EI HyperParamters'):
                        #     alpha = st.slider('alpha', 0.0, 3.0, 1.0)
                        #     tao = st.slider('tao',0.0, 1.0, 0.0)
                        res = Mymodel.Augmented_EI(alpha=alpha, tao=tao)

                    elif inputs['sample criterion'] == 'Expected Quantile Improvement':
                        # with st.expander('EQI HyperParamters'):
                        #     beta= st.slider('beta',0.2, 0.8, 0.5)
                        #     tao = st.slider('tao_new',0.0, 1.0, 0.0)
                        res = Mymodel.EQI(beta=beta, tao_new=tao)

                    elif inputs['sample criterion'] == 'Reinterpolation Expected Improvement':
                        res = Mymodel.Reinterpolation_EI()

                    elif inputs['sample criterion'] == 'Upper confidence bound':
                        # with st.expander('UCB HyperParamters'):
                        #     alpha = st.slider('alpha', 0.0, 3.0, 1.0)
                        res = Mymodel.UCB(alpha=alpha)

                    elif inputs['sample criterion'] == 'Probability of Improvement':
                        # with st.expander('PoI HyperParamters'):
                        #     tao = st.slider('tao',0.0, 0.3, 0.0)
                        res = Mymodel.PoI(tao=tao)

                    elif inputs['sample criterion'] == 'Predictive Entropy Search':
                        # with st.expander('PES HyperParamters'):
                        #     sam_num = st.number_input('sample number',100, 1000, 500)
                        res = Mymodel.PES(sam_num=sam_num)

                    elif inputs['sample criterion'] == 'Knowledge Gradient':
                        # with st.expander('Knowldge_G Hyperparameters'):
                        #     MC_num = st.number_input('MC number', 50,300,50)
                        res = Mymodel.Knowledge_G(MC_num=MC_num)

                    elif inputs['sample criterion'] == 'Least Confidence':

                        Mymodel = Bgolearn.fit(Mission='Classification', Classifier=inputs['Classifier'],
                                               data_matrix=sp.features, Measured_response=sp.targets,
                                               virtual_samples=sp.vsfeatures,
                                               opt_num=inputs['opt num'])
                        res = Mymodel.Least_cfd()

                    elif inputs['sample criterion'] == 'Margin Sampling':
                        Mymodel = Bgolearn.fit(Mission='Classification', Classifier=inputs['Classifier'],
                                               data_matrix=sp.features, Measured_response=sp.targets,
                                               virtual_samples=sp.vsfeatures,
                                               opt_num=inputs['opt num'])
                        res = Mymodel.Margin_S()

                    elif inputs['sample criterion'] == 'Entropy-based approach':
                        Mymodel = Bgolearn.fit(Mission='Classification', Classifier=inputs['Classifier'],
                                               data_matrix=sp.features, Measured_response=sp.targets,
                                               virtual_samples=sp.vsfeatures,
                                               opt_num=inputs['opt num'])
                        res = Mymodel.Entropy()

                    st.info('Recommmended Sample')
                    sp.sample_point = pd.DataFrame(res[1], columns=feature_name)
                    st.write(sp.sample_point)
                    tmp_download_link = download_button(sp.sample_point, f'recommended samples.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

    elif sub_option == "多目标主动学习":

        colored_header(label="多目标主动学习", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.csv`file", type=['csv'], label_visibility="collapsed",
                                accept_multiple_files=True)
        if len(file) != 2:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            table.add_row(['file_2', 'visual data', 'design space'])
            st.write(table)
        elif len(file) == 2:
            colored_header(label="数据信息", description=" ", color_name="violet-70")
            # with st.expander('Data Information'):
            df = pd.read_csv(file[0])

            df_vs = pd.read_csv(file[1])
            check_string_NaN(df_vs)
            check_string_NaN(df)

            nrow = st.slider("rows", 1, len(df), 5)
            df_nrow = df.head(nrow)
            st.write(df_nrow)
            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=2, max_value=2, value=2)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            col_feature, col_target = st.columns(2)

            # =================== model ====================================
            reg = REGRESSOR(features, targets)

            colored_header(label="target", description=" ", color_name="violet-70")
            target_selected_option = st.multiselect('target', list(reg.targets)[::-1], default=targets.columns.tolist())

            reg.targets = targets[target_selected_option]
            reg.Xtrain = features
            reg.Ytrain = targets
            feature_name = reg.Xtrain.columns
            colored_header(label="Sampling", description=" ", color_name="violet-30")
            model_path = './models/multi-obj'

            template_alg = model_platform(model_path)
            inputs, col2 = template_alg.show()

            if inputs['model'] == 'MOBO':

                mobo = Mobo4mat()

                with col2:

                    vs_features = df_vs

                    reg.Xtest = vs_features
                    st.info('You have upoaded the visual sample point file.')

                    if inputs['normalize'] == 'StandardScaler':
                        reg.X = pd.concat([reg.Xtrain, reg.Xtest])
                        reg.X, scaler = normalize(reg.X, "StandardScaler")
                        reg.X = pd.DataFrame(reg.X, columns=feature_name)
                        reg.Xtrain = reg.X.iloc[:len(reg.Xtrain), :]
                        reg.Xtest = reg.X.iloc[len(reg.Xtrain):, :].reset_index(drop=True)
                    elif inputs['normalize'] == 'MinMaxScaler':
                        reg.X = pd.concat([reg.Xtrain, reg.Xtest])
                        reg.X, scaler = normalize(reg.X, "StandardScaler")
                        reg.X = pd.DataFrame(reg.X, columns=feature_name)
                        reg.Xtrain = reg.X.iloc[:len(reg.Xtrain), :]
                        reg.Xtest = reg.X.iloc[len(reg.Xtrain):, :].reset_index(drop=True)

                pareto_front = find_non_dominated_solutions(reg.targets.values, target_selected_option)
                pareto_front = pd.DataFrame(pareto_front, columns=target_selected_option)

                if inputs['objective'] == 'max':
                    reg.targets = - reg.targets
                    pareto_front = find_non_dominated_solutions(reg.targets.values, target_selected_option)
                    pareto_front = pd.DataFrame(pareto_front, columns=target_selected_option)
                    pareto_front = -pareto_front
                    reg.targets = -reg.targets

                col1, col2 = st.columns([2, 1])
                with col1:
                    with plt.style.context(['nature', 'no-latex']):
                        fig, ax = plt.subplots()
                        ax.plot(pareto_front[target_selected_option[0]], pareto_front[target_selected_option[1]], 'k--')
                        ax.scatter(reg.targets[target_selected_option[0]], reg.targets[target_selected_option[1]])
                        ax.set_xlabel(target_selected_option[0])
                        ax.set_ylabel(target_selected_option[1])
                        ax.set_title('Pareto front of visual space')
                        st.pyplot(fig)
                with col2:
                    pareto_front = pareto_front.reset_index(drop=True)
                    st.write(pareto_front)
                    tmp_download_link = download_button(pareto_front, f'Pareto_front.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

                ref_point = []
                for i in range(len(target_selected_option)):
                    ref_point_loc = st.number_input(target_selected_option[i] + ' ref location', 0, 10000, 0)
                    ref_point.append(ref_point_loc)
                colored_header(label="Optimize", description=" ", color_name="violet-70")
                with st.container():
                    button_train = st.button('Opt', use_container_width=True)
                if button_train:
                    if reg.Xtrain.columns.tolist() != reg.Xtest.columns.tolist():
                        st.error('the feature number in Visual sample file is wrong')
                        st.stop()
                    HV_value, recommend_point, Ypred_recommend = mobo.fit(X=reg.Xtrain, y=reg.Ytrain,
                                                                          visual_data=reg.Xtest,
                                                                          method=inputs['method'],
                                                                          kernel_option=inputs['kernel'],
                                                                          number=inputs['num'],
                                                                          objective=inputs['objective'],
                                                                          ref_point=ref_point)
                    HV_value = pd.DataFrame(HV_value, columns=["HV value"])

                    recommend_point = pd.DataFrame(recommend_point, columns=feature_name)

                    if inputs['normalize'] == 'StandardScaler':
                        recommend_point = inverse_normalize(recommend_point, scaler, "StandardScaler")
                    elif inputs['normalize'] == 'MinMaxScaler':
                        recommend_point = inverse_normalize(recommend_point, scaler, "MinMaxScaler")

                    st.write(recommend_point)
                    tmp_download_link = download_button(recommend_point, f'recommended.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

                with st.expander('visual samples'):
                    if inputs['normalize'] == 'StandardScaler':
                        reg.Xtest = inverse_normalize(reg.Xtest, scaler, "StandardScaler")
                    elif inputs['normalize'] == 'MinMaxScaler':
                        reg.Xtest = inverse_normalize(reg.Xtest, scaler, "MinMaxScaler")
                    st.write(reg.Xtest)
                    tmp_download_link = download_button(reg.Xtest, f'visual samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)