# import sys
from prettytable import PrettyTable
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.algorithms.moo.nsga2 import SBX as nsgaSBX
from pymoo.algorithms.moo.sms import SMSEMOA
from pymoo.algorithms.soo.nonconvex.ga import GA
from pymoo.core.problem import ElementwiseProblem
from pymoo.operators.crossover.sbx import SBX
from pymoo.operators.mutation.pm import PM
from pymoo.optimize import minimize
from pymoo.termination import get_termination
from sko.DE import DE
from sko.PSO import PSO
# from sko.AFSA import AFSA
from sko.SA import SABoltzmann
from streamlit_extras.colored_header import colored_header
from streamlit_option_menu import option_menu

from business.algorithm.utils import *


def run():
    with st.sidebar:
        sub_option = option_menu(None,
                                 ["Single-objective Surrogate Optimization", "Multi-objective Surrogate Optimization"])
    if sub_option == "Single-objective Surrogate Optimization":

        colored_header(label="Single-objective Surrogate Optimization", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.pickle` model and `.csv` file", label_visibility="collapsed",
                                accept_multiple_files=True)
        if len(file) < 3:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            table.add_row(['file_2', 'boundary', 'feature design constraint'])
            table.add_row(['file_3', 'model', 'model'])
            st.write(table)

        elif len(file) >= 3:
            df = pd.read_csv(file[0])
            check_string_NaN(df)

            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=1, max_value=10, value=1)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())

            df_var = pd.read_csv(file[1])
            features_name = df_var.columns.tolist()
            range_var = df_var.values
            vars_min = get_column_min(range_var)
            vars_max = get_column_max(range_var)
            array_vars_min = np.array(vars_min).reshape(1, -1)
            array_vars_max = np.array(vars_max).reshape(1, -1)
            vars_bound = np.concatenate([array_vars_min, array_vars_max], axis=0)
            colored_header(label="feature design constriant", description=" ", color_name="violet-70")
            vars_bound = pd.DataFrame(vars_bound, columns=features_name)
            st.write(vars_bound)

            colored_header(label="Optimize", description=" ", color_name="violet-70")
            model = pickle.load(file[2])
            model_path = './models/surrogate optimize'
            template_alg = model_platform(model_path)

            inputs, col2 = template_alg.show()
            inputs['lb'] = vars_min
            inputs['ub'] = vars_max
            with col2:
                preprocess = st.selectbox('data preprocess', [None, 'StandardScaler', 'MinMaxScaler'])
                data = pd.concat([features, vars_bound])
                if preprocess == 'StandardScaler':
                    features, scaler = normalize(data, 'StandardScaler')
                    vars_bound = features.tail(2)
                    inputs['lb'] = vars_bound.iloc[0]
                    inputs['ub'] = vars_bound.iloc[1]
                elif preprocess == 'MinMaxScaler':
                    features, scaler = normalize(data, 'MinMaxScaler')
                    vars_bound = features.tail(2)
                    inputs['lb'] = vars_bound.iloc[0]
                    inputs['ub'] = vars_bound.iloc[1]
            if not len(inputs['lb']) == len(inputs['ub']) == inputs['n dim']:
                st.warning('the variable number should be %d' % vars_bound.shape[1])
            else:
                st.info("the variable number is correct")

            with st.container():
                button_train = st.button('Opt', use_container_width=True)

            if button_train:
                plot = customPlot()


                def opt_func(x):
                    x = x.reshape(1, -1)
                    y_pred = model.predict(x)
                    if inputs['objective'] == 'max':
                        y_pred = -y_pred
                    return y_pred


                if inputs['model'] == 'PSO':

                    alg = PSO(func=opt_func, dim=inputs['n dim'], pop=inputs['size pop'], max_iter=inputs['max iter'],
                              lb=inputs['lb'], ub=inputs['ub'],
                              w=inputs['w'], c1=inputs['c1'], c2=inputs['c2'])

                    alg.run()
                    best_x = alg.gbest_x
                    best_y = alg.gbest_y

                    loss_history = alg.gbest_y_hist
                    if inputs['objective'] == 'max':
                        loss_history = -np.array(loss_history)

                    st.info('Recommmended Sample')
                    truncate_func = np.vectorize(lambda x: '{:,.4f}'.format(x))
                    best_x = truncate_func(best_x).reshape(1, -1)

                    best_x = pd.DataFrame(best_x, columns=features_name)
                    if preprocess == 'StandardScaler':
                        best_x = inverse_normalize(best_x, scaler, 'StandardScaler')
                    elif preprocess == 'MinMaxScaler':
                        best_x = inverse_normalize(best_x, scaler, 'MinMaxScaler')

                    st.write(best_x)
                    tmp_download_link = download_button(best_x, f'recommended samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                    if inputs['objective'] == 'max':
                        best_y = -best_y
                    st.info('PSO best_y: %s' % best_y.item())
                    plot.evolutionary_history(loss_history, 'PSO')
                    loss_history = pd.DataFrame(loss_history)
                    tmp_download_link = download_button(loss_history, f'evolutionary history.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

                elif inputs['model'] == 'GA':

                    alg = GA(pop_size=inputs['size pop'],
                             crossover=SBX(prob=0.9, eta=15),
                             mutation=PM(eta=20),
                             eliminate_duplicates=True)

                    termination = get_termination("n_gen", inputs['max iter'])


                    class MyProblem(ElementwiseProblem):
                        def __init__(self):
                            super().__init__(n_var=inputs['n dim'],
                                             n_obj=1,
                                             xl=np.array(inputs['lb']),
                                             xu=np.array(inputs['ub']))

                        def _evaluate(self, x, out, *args, **kwargs):
                            x = x.reshape(1, -1)
                            y_pred = model.predict(x)
                            if inputs['objective'] == 'max':
                                y_pred = -y_pred
                            out["F"] = y_pred


                    problem = MyProblem()
                    res = minimize(problem,
                                   alg,
                                   termination,
                                   seed=1,
                                   save_history=True,
                                   verbose=False)
                    if inputs['objective'] == 'max':
                        best_y = -res.F
                    else:
                        best_y = res.F
                    best_x = res.X
                    hist = res.history
                    hist_F = []  # the objective space values in each generation

                    for algo in hist:
                        # retrieve the optimum from the algorithm
                        opt = algo.opt
                        # filter out only the feasible and append and objective space values
                        feas = np.where(opt.get("feasible"))[0]
                        hist_F.append(opt.get("F")[feas])
                    # replace this line by `hist_cv` if you like to analyze the least feasible optimal solution and not the population
                    if inputs['objective'] == 'max':
                        loss_history = - np.array(hist_F).reshape(-1, 1)
                    else:
                        loss_history = np.array(hist_F).reshape(-1, 1)
                    st.info('Recommmended Sample')
                    truncate_func = np.vectorize(lambda x: '{:,.4f}'.format(x))
                    best_x = truncate_func(best_x).reshape(1, -1)

                    best_x = pd.DataFrame(best_x, columns=features_name)
                    if preprocess == 'StandardScaler':
                        best_x = inverse_normalize(best_x, scaler, 'StandardScaler')
                    elif preprocess == 'MinMaxScaler':
                        best_x = inverse_normalize(best_x, scaler, 'MinMaxScaler')

                    st.write(best_x)
                    tmp_download_link = download_button(best_x, f'recommended samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                    st.info('GA best_y: %s' % best_y.item())
                    plot.evolutionary_history(loss_history, 'GA')
                    loss_history = pd.DataFrame(loss_history)
                    tmp_download_link = download_button(loss_history, f'evolutionary history.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

                elif inputs['model'] == 'DE':
                    alg = DE(func=opt_func, n_dim=inputs['n dim'], size_pop=inputs['size pop'],
                             max_iter=inputs['max iter'], lb=inputs['lb'], ub=inputs['ub'],
                             prob_mut=inputs['prob mut'], F=inputs['F'])

                    best_x, best_y = alg.run()
                    best_x = pd.DataFrame(best_x, columns=features_name)
                    if preprocess == 'StandardScaler':
                        best_x = inverse_normalize(best_x, scaler, 'StandardScaler')
                    elif preprocess == 'MinMaxScaler':
                        best_x = inverse_normalize(best_x, scaler, 'MinMaxScaler')
                    loss_history = alg.generation_best_Y
                    if inputs['objective'] == 'max':
                        loss_history = -np.array(loss_history)

                    st.info('Recommmended Sample')
                    truncate_func = np.vectorize(lambda x: '{:,.4f}'.format(x))
                    best_x = truncate_func(best_x).reshape(1, -1)

                    best_x = pd.DataFrame(best_x, columns=features_name)

                    st.write(best_x)
                    tmp_download_link = download_button(best_x, f'recommended samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                    if inputs['objective'] == 'max':
                        best_y = -best_y
                    st.info('DE best_y: %s' % best_y.item())
                    plot.evolutionary_history(loss_history, 'DE')
                    loss_history = pd.DataFrame(loss_history)
                    tmp_download_link = download_button(loss_history, f'evolutionary history.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

                elif inputs['model'] == 'SA':

                    x0 = calculate_mean(inputs['lb'], inputs['ub'])
                    alg = SABoltzmann(func=opt_func, x0=x0, T_max=inputs['T max'], q=inputs['q'], L=inputs['L'],
                                      max_stay_counter=inputs['max stay counter'], lb=inputs['lb'], ub=inputs['ub'])
                    best_x, best_y = alg.run()

                    loss_history = alg.best_y_history
                    if inputs['objective'] == 'max':
                        loss_history = -np.array(loss_history)

                    st.info('Recommmended Sample')
                    truncate_func = np.vectorize(lambda x: '{:,.4f}'.format(x))
                    best_x = truncate_func(best_x).reshape(1, -1)

                    best_x = pd.DataFrame(best_x, columns=features_name)
                    if preprocess == 'StandardScaler':
                        best_x = inverse_normalize(best_x, scaler, 'StandardScaler')
                    elif preprocess == 'MinMaxScaler':
                        best_x = inverse_normalize(best_x, scaler, 'MinMaxScaler')

                    st.write(best_x)
                    tmp_download_link = download_button(best_x, f'recommended samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                    if inputs['objective'] == 'max':
                        best_y = -best_y
                    st.info('SA best_y: %s' % best_y.item())
                    plot.evolutionary_history(loss_history, 'SA')

                    loss_history = pd.DataFrame(loss_history)
                    tmp_download_link = download_button(loss_history, f'evolutionary history.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)


    elif sub_option == "Multi-objective Surrogate Optimization":

        colored_header(label="Multi-objective surrogate Optimization", description=" ", color_name="violet-90")
        file = st.file_uploader("Upload `.pickle` model and `.csv` file", label_visibility="collapsed",
                                accept_multiple_files=True)
        if len(file) < 4:
            table = PrettyTable(['file name', 'class', 'description'])
            table.add_row(['file_1', 'dataset', 'data file'])
            table.add_row(['file_2', 'boundary', 'design feature constraint'])
            table.add_row(['file_3', 'model_1', 'obj1 model'])
            table.add_row(['file_4', 'model_2', 'obj2 model'])
            table.add_row(['file_5', '...', '...'])
            st.write(table)
        elif len(file) >= 4:
            df = pd.read_csv(file[0])
            check_string_NaN(df)
            colored_header(label="特征&目标", description=" ", color_name="violet-70")

            target_num = st.number_input('目标数量', min_value=2, max_value=2, value=2)

            col_feature, col_target = st.columns(2)
            # features
            features = df.iloc[:, :-target_num]
            # targets
            targets = df.iloc[:, -target_num:]
            with col_feature:
                st.write(features.head())
            with col_target:
                st.write(targets.head())
            colored_header(label="target", description=" ", color_name="violet-70")
            target_selected_option = st.multiselect('target', list(targets)[::-1], default=targets.columns.tolist())
            df_var = pd.read_csv(file[1])
            features_name = df_var.columns.tolist()
            range_var = df_var.values
            vars_min = get_column_min(range_var)
            vars_max = get_column_max(range_var)
            array_vars_min = np.array(vars_min).reshape(1, -1)
            array_vars_max = np.array(vars_max).reshape(1, -1)
            vars_bound = np.concatenate([array_vars_min, array_vars_max], axis=0)
            colored_header(label="Feature design constraint", description=" ", color_name="violet-70")
            vars_bound = pd.DataFrame(vars_bound, columns=features_name)
            st.write(vars_bound)

            colored_header(label="Optimize", description=" ", color_name="violet-70")
            model_1 = pickle.load(file[2])
            model_2 = pickle.load(file[3])
            model_path = './models/moo'
            template_alg = model_platform(model_path)

            inputs, col2 = template_alg.show()
            inputs['lb'] = vars_min
            inputs['ub'] = vars_max

            with col2:
                preprocess = st.selectbox('data preprocess', [None, 'StandardScaler', 'MinMaxScaler'])
                data = pd.concat([features, vars_bound])
                if preprocess == 'StandardScaler':
                    features, scaler = normalize(data, 'StandardScaler')
                    vars_bound = features.tail(2)
                elif preprocess == 'MinMaxScaler':
                    features, scaler = normalize(data, 'MinMaxScaler')
                    vars_bound = features.tail(2)

            if not len(inputs['lb']) == len(inputs['ub']) == inputs['n dim']:
                st.warning('the variable number should be %d' % vars_bound.shape[1])
            else:
                st.info("the variable number is correct")

                pareto_front = find_non_dominated_solutions(targets.values, target_selected_option)
                pareto_front = pd.DataFrame(pareto_front, columns=target_selected_option)

                if inputs['objective'] == 'max':
                    targets = - targets
                    pareto_front = find_non_dominated_solutions(targets.values, target_selected_option)
                    pareto_front = pd.DataFrame(pareto_front, columns=target_selected_option)
                    pareto_front = -pareto_front
                    targets = -targets

                col1, col2 = st.columns([2, 1])
                with col1:
                    with plt.style.context(['nature', 'no-latex']):
                        fig, ax = plt.subplots()
                        ax.plot(pareto_front[target_selected_option[0]], pareto_front[target_selected_option[1]], 'k--')
                        ax.scatter(targets[target_selected_option[0]], targets[target_selected_option[1]])
                        ax.set_xlabel(target_selected_option[0])
                        ax.set_ylabel(target_selected_option[1])
                        ax.set_title('Pareto front of visual space')
                        st.pyplot(fig)
                with col2:
                    pareto_front = pareto_front.reset_index(drop=True)
                    st.write(pareto_front)
                    tmp_download_link = download_button(pareto_front, f'Pareto_front.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)

            with st.container():
                button_train = st.button('Opt', use_container_width=True)

            if button_train:
                plot = customPlot()
                alg = NSGA2(
                    pop_size=inputs['size pop'],
                    crossover=nsgaSBX(prob=0.9, eta=15),
                    mutation=PM(eta=20),
                    eliminate_duplicates=True
                )
                if inputs['model'] == 'SMSEMOA':
                    alg = SMSEMOA()

                termination = get_termination("n_gen", inputs['max iter'])


                class MyProblem(ElementwiseProblem):
                    def __init__(self):
                        super().__init__(n_var=inputs['n dim'],
                                         n_obj=2,
                                         xl=np.array(inputs['lb']),
                                         xu=np.array(inputs['ub']))

                    def _evaluate(self, x, out, *args, **kwargs):
                        x = x.reshape(1, -1)
                        y1_pred = model_1.predict(x)
                        if inputs['objective'] == 'max':
                            y1_pred = -y1_pred
                        y2_pred = model_2.predict(x)
                        if inputs['objective'] == 'max':
                            y2_pred = -y2_pred
                        out["F"] = [y1_pred, y2_pred]


                problem = MyProblem()
                res = minimize(problem,
                               alg,
                               termination,
                               seed=inputs['random state'],
                               save_history=True,
                               verbose=False)
                res.F[:, [0, 1]] = res.F[:, [1, 0]]
                if inputs['objective'] == 'max':
                    best_y = res.F
                    targets = - targets
                    iter_data = np.concatenate([targets.values, best_y], axis=0)
                    iter_pareto_front = find_non_dominated_solutions(iter_data, target_selected_option)
                    iter_pareto_front = pd.DataFrame(iter_pareto_front, columns=target_selected_option)
                    iter_pareto_front = -iter_pareto_front
                    pareto_front = find_non_dominated_solutions(targets.values, target_selected_option)
                    pareto_front = pd.DataFrame(pareto_front, columns=target_selected_option)
                    pareto_front = -pareto_front
                    targets = - targets
                    best_y = - res.F

                else:
                    best_y = res.F
                    iter_data = np.concatenate([targets.values, best_y], axis=0)
                    iter_pareto_front = find_non_dominated_solutions(iter_data, target_selected_option)
                    iter_pareto_front = pd.DataFrame(iter_pareto_front, columns=target_selected_option)

                with plt.style.context(['nature', 'no-latex']):
                    fig, ax = plt.subplots()
                    ax.plot(iter_pareto_front[target_selected_option[0]], iter_pareto_front[target_selected_option[1]],
                            'r--')
                    ax.plot(pareto_front[target_selected_option[0]], pareto_front[target_selected_option[1]], 'k--')
                    ax.scatter(targets[target_selected_option[0]], targets[target_selected_option[1]])
                    ax.scatter(best_y[:, 0], best_y[:, 1])
                    ax.set_xlabel(target_selected_option[0])
                    ax.set_ylabel(target_selected_option[1])
                    ax.set_title('Pareto front of visual space')
                    st.pyplot(fig)
                best_x = res.X

                st.info('Recommmended Sample')
                truncate_func = np.vectorize(lambda x: '{:,.4f}'.format(x))

                best_x = truncate_func(best_x)

                best_x = pd.DataFrame(best_x, columns=features_name)
                if preprocess == 'StandardScaler':
                    best_x = inverse_normalize(best_x, scaler, 'StandardScaler')
                elif preprocess == 'MinMaxScaler':
                    best_x = inverse_normalize(best_x, scaler, 'MinMaxScaler')
                col1, col2 = st.columns([3, 1])
                with col1:
                    best_y = pd.DataFrame(best_y, columns=targets.columns.tolist())
                    data = pd.concat([best_x, best_y], axis=1)
                    st.write(data)
                    tmp_download_link = download_button(data, f'recommended samples.csv', button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)
                with col2:
                    iter_pareto_front = iter_pareto_front.reset_index(drop=True)
                    st.write(iter_pareto_front)
                    tmp_download_link = download_button(iter_pareto_front, f'iter_pareto_front.csv',
                                                        button_text='download')
                    st.markdown(tmp_download_link, unsafe_allow_html=True)