import pandas as pd
import numpy as np
from scipy import optimize



def normalization(data:np.ndarray)-> np.ndarray:
    _max = np.max(data, axis=0)
    _min = np.min(data, axis=0)
    return (data - _min) / (_max - _min)

def entropyWeight(data:np.ndarray)-> np.ndarray:
    data = normalization(data) + 1
    _Pij = data / np.sum(data, axis=0)
    _ln_Pij = np.log(_Pij)
    _Ej = np.diagonal(-np.dot(_Pij.T, _ln_Pij)/np.log(4), offset=0)
    _Gj = 1-_Ej
    _Wi = _Gj/np.sum(_Gj)
    return np.dot(data, _Wi)

def SBM(x:np.ndarray, y_g:np.ndarray, y_b:np.ndarray) -> tuple[list, np.ndarray]:

    """
    x = [λ_1, ..., λ_n, S^-_1, ..., S^-_m, S^g_1, ..., S^g_s1, S^b_1, ..., S^b_s2]
    """

    m, n = x.shape
    s1 = y_g.shape[0]
    s2 = y_b.shape[0]
    theta = [] # 用于存储结果，每一个DMU的效率值，也即目标函数值都会append到theta里
    slack = []
    for i in range(n):
        f = np.concatenate([np.zeros(n), -1/(m*x[:, i]),
                        np.zeros(s1+s2), np.array([1])])

        Aeq1 = np.hstack([x,
                        np.identity(m),
                        np.zeros((m, s1+s2)),
                        -x[:, i, None]])

        Aeq2 = np.hstack([y_g,
                        np.zeros((s1, m)),
                        -np.identity(s1),
                        np.zeros((s1, s2)),
                        -y_g[:, i, None]])

        Aeq3 = np.hstack([y_b,
                        np.zeros((s2, m)),
                        np.zeros((s2, s1)),
                        np.identity(s2),
                        -y_b[:, i, None]])

        Aeq4 = np.hstack([np.zeros(n),
                        np.zeros(m),
                        1/((s1+s2)*(y_g[:, i])),
                        1/((s1+s2)*(y_b[:, i])),
                        np.array([1])]).reshape(1, -1)

        Aeq = np.vstack([Aeq1, Aeq2, Aeq3, Aeq4])
        beq = np.concatenate([np.zeros(m+s1+s2),np.array([1])])
        bounds = tuple([(0, None) for t in range(n+s1+s2+m+1)])
        res = optimize.linprog(c=f, A_eq=Aeq, b_eq=beq, bounds=bounds, method='highs')
        theta.append(res.fun)
        slack.append(res.x[8:-1]/res.x[-1].reshape(1, -1))
    return theta, np.vstack(slack)

def SuperSBM(x:np.ndarray, y_g:np.ndarray, y_b:np.ndarray) -> tuple[list, np.ndarray]:
    """
    x = [λ_1, ..., λ_n, S^-_1, ..., S^-_m, S^g_1, ..., S^g_s1, S^b_1, ..., S^b_s2]
    """
    m, n = x.shape
    s1 = y_g.shape[0]
    s2 = y_b.shape[0]
    theta = [] # 用于存储结果，每一个DMU的效率值，也即目标函数值都会append到theta里
    slack = []
    for i in range(n):
        f = np.concatenate([np.zeros(n), 1/(m*x[:, i]),
                            np.zeros(s1+s2), np.array([1])])

        Aeq = np.hstack([np.zeros(n),
                        np.zeros(m),
                        -1/((s1+s2)*(y_g[:, i])),
                        -1/((s1+s2)*(y_b[:, i])),
                        np.array([1])]).reshape(1, -1)
        beq = np.array([1])

        Aub1 = np.hstack([x,
                        -np.identity(m),
                        np.zeros((m, s1+s2)),
                        -x[:, i, None]])

        Aub2 = np.hstack([-y_g,
                        np.zeros((s1, m)),
                        -np.identity(s1),
                        np.zeros((s1, s2)),
                        y_g[:, i, None]])

        Aub3 = np.hstack([y_b,
                        np.zeros((s2, m)),
                        np.zeros((s2, s1)),
                        -np.identity(s2),
                        -y_b[:, i, None]])

        Aub = np.vstack([Aub1,Aub2,Aub3])
        Aub[:,i]=0
        bub = np.zeros(m+s1+s2)
        bounds = tuple([(0, None) for i in range(n+s1+s2+m+1)])
        res = optimize.linprog(c=f, A_ub=Aub, b_ub=bub, A_eq=Aeq, b_eq=beq, bounds=bounds, method='highs')

        theta.append(res.fun)
        slack.append(res.x[8:-1]/res.x[-1].reshape(1, -1))
    return theta, np.vstack(slack)

def evaluate(factory_list, total_invest, cost_data, ele_consum, wast_treat, cod_tn_data, direct_weight, emission_reduction_data, sludge_prod, indirect_data, indirect_weight):
    """
    factory_list 0
    total_invest 1
    cost_data 2-5
    ele_consum 6
    wast_treat 7
    cod_tn_data 8-11
    direct_weight [COD Weight, TN Wight, Regional emission factors]
    emission_reduction_data 12-15
    sludge_prod 16
    indirect_data 17 - ...
    indirect_weight 




    0           , 1             , 2         , 3         , 4         , 5         , 6         , 7
    factory_name, total_invest  , ele_cost  , phar_cost , empl_cost , other_cost, ele_consum, wast_treat
    水厂名称     , 总投资          , 电费      , 药剂费     , 员工成本   , 其他费用    , 耗电量     , 污水处理量
    
    8               , 9             , 10            , 11            , 12        , 13        , 14
    cod_in          , tn_in         , cod_out       , tn_out        , cod_redu  , nh_redu   , tn_redu
    平均进水COD浓度   , 平均进水TN浓度  , 平均出水COD浓度 , 平均出水TN浓度  , COD削减量  , 氨氮削减量  , TN削减量

    15      , 16
    tp_redu , sludge_prod
    TP削减量 , 污泥产量

    naoh, h2so4, pac, pam-, pam+, h2o2, feso4, naclo, ...
    氢氧化钠, 硫酸, PAC, PAM-, PAM+, 过氧化氢, 硫酸亚铁, 次氯酸钠, ...
    """
    # step 1 Keep Cod & TN, the other divided by volume of wasterwater
    total_invest = total_invest / wast_treat
    cost_data = cost_data / wast_treat
    ele_consum = ele_consum / wast_treat
    emission_reduction_data = emission_reduction_data / wast_treat
    sludge_prod = sludge_prod / wast_treat
    indirect_data = indirect_data / wast_treat


    # step 2.1 Entropy weight method
    cost_index = entropyWeight(cost_data).reshape(-1,1)
    emission_reduction_index = entropyWeight(emission_reduction_data).reshape(-1,1)

    # step 2.2 Calculation of greenhouse gas emissions
    direct_emission = np.concatenate((cod_tn_data[:, 0:2]-cod_tn_data[:, 2:], ele_consum), axis=1)
    direct_emission = np.dot(direct_emission, direct_weight)
    indirect_emission = np.dot(indirect_data, indirect_weight)
    total_emission = direct_emission + indirect_emission
    total_emission = total_emission.reshape(-1,1)

    step_data = np.concatenate((total_invest, cost_index, emission_reduction_index, sludge_prod, total_emission), axis=1)
    # step 3 Super-SBM model

    x = np.concatenate((total_invest, cost_index),axis=1).T
    y_g = emission_reduction_index.T
    y_b = np.concatenate((sludge_prod, total_emission),axis=1).T

    sbm_values, sbm_slack = SBM(x, y_g, y_b)
    super_sbm_values, super_sbm_slack = SuperSBM(x, y_g, y_b)
    resultSBM = []
    for i in range(len(factory_list)):
        if sbm_values[i] >= 0.995:
            resultSBM.append(
                {
                    'factory_name' : factory_list[i],
                    'benefit_score' : super_sbm_values[i],
                    'total_invest_input':total_invest[i][0],
                    'total_invest_slack':super_sbm_slack[i][0],
                    'cost_index_input':cost_index[i][0],
                    'cost_index_slack':super_sbm_slack[i][1],
                    'emission_reduction_index_input':emission_reduction_index[i][0],
                    'emission_reduction_index_slack':super_sbm_slack[i][2],
                    'sludge_prod_input':sludge_prod[i][0],
                    'sludge_prod_slack':super_sbm_slack[i][3],
                    'total_emission_input':total_emission[i][0],
                    'total_emission_slack':super_sbm_slack[i][4],
                }
            )
        else:
            resultSBM.append(
                {
                    'factory_name' : factory_list[i],
                    'benefit_score' : sbm_values[i],
                    'total_invest_input':total_invest[i][0],
                    'total_invest_slack':sbm_slack[i][0],
                    'cost_index_input':cost_index[i][0],
                    'cost_index_slack':sbm_slack[i][1],
                    'emission_reduction_index_input':emission_reduction_index[i][0],
                    'emission_reduction_index_slack':sbm_slack[i][2],
                    'sludge_prod_input':sludge_prod[i][0],
                    'sludge_prod_slack':sbm_slack[i][3],
                    'total_emission_input':total_emission[i][0],
                    'total_emission_slack':sbm_slack[i][4],
                }
            )
    return resultSBM



if __name__ == "__main__":
    excel_path = "eva_function_test/original_data1.xlsx"
    df = pd.read_excel(io=excel_path)
    original_data = df.to_numpy()

    factory_list = original_data[:,0] # Separate the factory list
    original_data = original_data[:, 1:].astype(np.float64) # Separate the original data

    cost_data = original_data[:, 1:5]
    emission_reduction_data = original_data[:, 11:15]

    direct_weight = [0.005*28*0.001*1.2, 0.016*265*0.001*1.57, 0.8042] # [COD Weight, TN Wight, Regional emission factors]
    indirect_weight = [0.46, 0.16, 0.53, 1.5, 1.6, 1.6, 0.03, 0.99]

    evaluate(factory_list, 
             original_data[:, 0:1], 
             cost_data, 
             original_data[:, 5:6],
             original_data[:, 6:7],
             original_data[:, 7:11],
             direct_weight,
             original_data[:, 11:15],
             original_data[:, 15:16],
             original_data[:, 16:],
             indirect_weight
             )