import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from scipy.spatial import distance
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor
import sklearn.model_selection as ms
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb
from sklearn.linear_model import Lasso, Ridge, ElasticNet
import datetime
from SKO.AbstractDPJob import AbstractDPJob
class Predict_FESJob(AbstractDPJob):
    def __init__(self,
                 p_bf_no=None,p_avg_iron_temp=None,p_avg_c_s_value=None,p_compute_slag_rate=None,p_compute_fill_s_value=None):

        super(Predict_FESJob, self).__init__()
        self.bf_no = p_bf_no
        self.avg_iron_temp = p_avg_iron_temp
        self.avg_c_s_value = p_avg_c_s_value
        self.compute_slag_rate = p_compute_slag_rate
        self.compute_fill_s_value = p_compute_fill_s_value
        pass


    def execute(self):
        return self.do_execute()


    def do_execute(self):
        super(Predict_FESJob, self).do_execute()
        #预测铁水硫接口传入参数
        bf_no = self.bf_no
        avg_iron_temp = self.avg_iron_temp
        avg_c_s_value = self.avg_c_s_value
        compute_slag_rate = self.compute_slag_rate
        compute_fill_s_value = self.compute_fill_s_value
        start = datetime.datetime.now()
        dict_input = {}
        dict_input['AVG_IRON_TEMP'] = avg_iron_temp
        dict_input['AVG_C_S_VALUE'] = avg_c_s_value
        dict_input['COMPUTE_SLAG_RATE'] = compute_slag_rate
        dict_input['COMPUTE_FILL_S_VALUE'] = compute_fill_s_value
        #读取预测参数
        #SQL查询，本地暂时使用文件
        df_coef = pd.read_excel('铁水硫模型参数.xlsx')
        df_coef.columns = df_coef.columns.str.upper()
        gamma = df_coef.loc[0]['GAMMA']
        neighbors_num = df_coef.loc[0]['NEIGHBORS_NUM']
        guiyihua = df_coef.loc[0]['GUIYIHUA']
        modelname = df_coef.loc[0]['MODELNAME']
        # gamma = 1.5
        # neighbors_num = 20
        # guiyihua = 'MinMaxScaler'
        # modelname = 'ExtraTrees'
        #读取该BF的历史数据
        #SQL查询，本地暂时使用文件
        xlsx_name = 'D:/repos/sicost/fe_s_' + str(bf_no) +'.xlsx'
        df0 = pd.read_excel(xlsx_name)
        df0.columns = df0.columns.str.upper()
        df0['PROD_DATE'] = df0['PROD_DATE'].astype(str)
        df0_train = df0.copy()
        df0_train = df0_train.reset_index(drop=True)
        df0_train.drop(['UNIT_NO'], axis=1, inplace=True)
        #数据清理
        def clean_data(df, gamma):
            column_name_list = df.columns.tolist()
            column_name_list.remove('PROD_DATE')
            column_name_num = len(column_name_list)
            clean_str_start = 'df_new = df['
            clean_str_end = ']'
            ldict1 = {}
            for i in range(0, column_name_num):
                print(i)
                print(column_name_list[i])
                column_name_tmp = column_name_list[i]
                exec("q1_{} = df['{}'].quantile(0.25)".format(i, column_name_tmp), locals(), ldict1)
                exec("q3_{} = df['{}'].quantile(0.75)".format(i, column_name_tmp), locals(), ldict1)
                exec("iqr_val_{} = q3_{} - q1_{}".format(i, i, i), locals(), ldict1)
                exec('''clean_str1 = "(df['{}'] >= ldict1['q1_{}'] - gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i, i), locals(), ldict1)
                exec('''clean_str2 = "(df['{}'] < ldict1['q3_{}'] + gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i, i), locals(), ldict1)
                exec('''clean_str3 = "(df['{}'] > 0)"'''.format(column_name_tmp), locals(), ldict1)
                clean_str1 = ldict1["clean_str1"]
                clean_str2 = ldict1["clean_str2"]
                clean_str3 = ldict1["clean_str3"]
                if i == 0:
                    clean_str_start = clean_str_start + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
                else:
                    clean_str_start = clean_str_start + ' & ' + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
            clean_str = clean_str_start + clean_str_end
            print(clean_str)
            exec(clean_str, locals(), ldict1)
            df_new = ldict1["df_new"]
            df_new = df_new.reset_index(drop=True)
            return df_new

        df0_train_clean = clean_data(df0_train, gamma)
        df0_train_clean.drop(['PROD_DATE'], axis=1, inplace=True)
        df0_train_clean.drop(['SUM_CACULATE_IRON_WGT'], axis=1, inplace=True)
        df0_train_clean_Y = df0_train_clean['AVG_S_VALUE']
        #将需要预测的数据拼接上
        new_row = pd.Series(dict_input)
        df0_train_clean = df0_train_clean.append(new_row, ignore_index=True)
        df0_train_clean_X = df0_train_clean.drop(labels=['AVG_S_VALUE'], axis=1, inplace=False)
        #对所有X归一化
        if guiyihua == 'StandardScaler':
            transfer = StandardScaler()
        else:
            transfer = MinMaxScaler()
        X_trans = transfer.fit_transform(df0_train_clean_X)
        df_X_trans = pd.DataFrame(X_trans)
        #求欧式距离
        input_data_X = df_X_trans.iloc[-1].tolist()
        input_X = df_X_trans.iloc[-1:]
        df_X_new = df_X_trans.iloc[:-1]
        df_X_new_copy = df_X_new.copy()
        df_X_new_copy['distance'] = df_X_new_copy.apply(lambda row: distance.euclidean(row, input_data_X), axis=1)
        df_X_new_copy['y'] = df0_train_clean_Y
        df_sorted = df_X_new_copy.sort_values(by='distance')
        df_sorted = df_sorted.reset_index(drop=True)
        #找邻居回归出预测结果
        df_head = df_sorted.head(neighbors_num)
        #选择回归模型
        if modelname == 'Linear':
            model = LinearRegression()
        elif modelname == 'XGB':
            model = xgb.XGBRegressor()
        elif modelname == 'RandomForest':
            model = RandomForestRegressor()
        elif modelname == 'GradientBoosting':
            model = GradientBoostingRegressor()
        elif modelname == 'AdaBoost':
            model = AdaBoostRegressor()
        elif modelname == 'Bagging':
            model = BaggingRegressor()
        elif modelname == 'DecisionTree':
            model = DecisionTreeRegressor()
        elif modelname == 'ExtraTrees':
            model = ExtraTreesRegressor()
        elif modelname == 'KNeighbors':
            model = KNeighborsRegressor()
        elif modelname == 'Lasso':
            model = Lasso(alpha=0.1)
        elif modelname == 'Ridge':
            model = Ridge(alpha=1.0)
        elif modelname == 'ElasticNet':
            model = ElasticNet(alpha=1.0, l1_ratio=0.5)
        else:
            model = LinearRegression()
        X = df_head.drop(labels=['distance', 'y'], axis=1, inplace=False).values
        y = df_head['y'].values
        X_input = input_X.values
        model.fit(X, y)
        y_pred = model.predict(X_input)
        print(y_pred)
        y_pred_output = float(y_pred)
        print(y_pred_output)
        elapsed = float((datetime.datetime.now() - start).seconds)
        print("Time Used 4 All ----->>>> %f seconds" % (elapsed))
        print('finish')
        return y_pred_output
