import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.spatial import distance
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, \
    ExtraTreesRegressor
import sklearn.model_selection as ms
from sklearn.neighbors import KNeighborsRegressor
import xgboost as xgb
from sklearn.linear_model import Lasso, Ridge, ElasticNet
import datetime

#预测铁水硫接口传入参数

mode = 1
bf_no = 1
avg_iron_temp = 1511
avg_c_s_value = 122
compute_slag_rate = 303
compute_fill_s_value = 349
y1 = 31
avg_iron_temp2 = 1511
avg_c_s_value2 = 122
compute_slag_rate2 = 303
compute_fill_s_value2 = 380
# dict_input = {}
# dict_input['AVG_IRON_TEMP'] = avg_iron_temp
# dict_input['AVG_C_S_VALUE'] = avg_c_s_value
# dict_input['COMPUTE_SLAG_RATE'] = compute_slag_rate
# dict_input['COMPUTE_FILL_S_VALUE'] = compute_fill_s_value
# #读取预测参数
#
# gamma = 3
# neighbors_num = 4
# guiyihua = 'MinMaxScaler'
# modelname = 'ElasticNet'
#
# #读取该BF的历史数据
# #SQL查询，本地暂时使用文件
# start = datetime.datetime.now()
# delta_day2 = 10
# delta_day1 = delta_day2 + 365
# p_day_2 = (start - datetime.timedelta(days=delta_day2)).strftime("%Y%m%d")
# p_day_1 = (start - datetime.timedelta(days=delta_day1)).strftime("%Y%m%d")
#
#
# xlsx_name = 'D:/repos/sicost/fe_s_' + str(bf_no) +'.xlsx'
# df0 = pd.read_excel(xlsx_name)
#
# df0.columns = df0.columns.str.upper()
# df0['PROD_DATE'] = df0['PROD_DATE'].astype(str)
# df0_train = df0.copy()
# df0_train = df0_train[df0_train['PROD_DATE'] >= p_day_1]
#
# df0_train = df0_train.reset_index(drop=True)
# df0_train.drop(['UNIT_NO'], axis=1, inplace=True)
# #数据清理
# def clean_data(df, gamma):
#     column_name_list = df.columns.tolist()
#     column_name_list.remove('PROD_DATE')
#     column_name_num = len(column_name_list)
#     clean_str_start = 'df_new = df['
#     clean_str_end = ']'
#     ldict1 = {}
#     for i in range(0, column_name_num):
#         print(i)
#         print(column_name_list[i])
#         column_name_tmp = column_name_list[i]
#         exec("q1_{} = df['{}'].quantile(0.25)".format(i, column_name_tmp), locals(), ldict1)
#         exec("q3_{} = df['{}'].quantile(0.75)".format(i, column_name_tmp), locals(), ldict1)
#         exec("iqr_val_{} = q3_{} - q1_{}".format(i, i, i), locals(), ldict1)
#         exec('''clean_str1 = "(df['{}'] >= ldict1['q1_{}'] - gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i, i), locals(), ldict1)
#         exec('''clean_str2 = "(df['{}'] < ldict1['q3_{}'] + gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i, i), locals(), ldict1)
#         exec('''clean_str3 = "(df['{}'] > 0)"'''.format(column_name_tmp), locals(), ldict1)
#         clean_str1 = ldict1["clean_str1"]
#         clean_str2 = ldict1["clean_str2"]
#         clean_str3 = ldict1["clean_str3"]
#         if i == 0:
#             clean_str_start = clean_str_start + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
#         else:
#             clean_str_start = clean_str_start + ' & ' + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
#     clean_str = clean_str_start + clean_str_end
#     print(clean_str)
#     exec(clean_str, locals(), ldict1)
#     df_new = ldict1["df_new"]
#     df_new = df_new.reset_index(drop=True)
#     return df_new
#
# df0_train_clean = clean_data(df0_train, gamma)
# df0_train_clean.drop(['PROD_DATE'], axis=1, inplace=True)
# df0_train_clean.drop(['SUM_CACULATE_IRON_WGT'], axis=1, inplace=True)
# df0_train_clean.rename(columns={'AVG_IRON_TEMP': 'x1'}, inplace=True)
# df0_train_clean.rename(columns={'AVG_C_S_VALUE': 'x2'}, inplace=True)
# df0_train_clean.rename(columns={'COMPUTE_SLAG_RATE': 'x3'}, inplace=True)
# df0_train_clean.rename(columns={'COMPUTE_FILL_S_VALUE': 'x4'}, inplace=True)
# df0_train_clean.rename(columns={'AVG_S_VALUE': 'y'}, inplace=True)
#
# df1 = df0_train_clean.copy()
# # df1['x1_x1'] = df1['x1'] * df1['x1']
# # df1['x1_x2'] = df1['x1'] * df1['x2']
# # df1['x1_x3'] = df1['x1'] * df1['x3']
# # df1['x1_x4'] = df1['x1'] * df1['x4']
# # df1['x2_x2'] = df1['x2'] * df1['x2']
# # df1['x2_x3'] = df1['x2'] * df1['x3']
# # df1['x2_x4'] = df1['x2'] * df1['x4']
# # df1['x3_x3'] = df1['x3'] * df1['x3']
# # df1['x3_x4'] = df1['x3'] * df1['x4']
# # df1['x4_x4'] = df1['x4'] * df1['x4']
#
# # df1['x1_1'] = 1 / df1['x1']
# # df1['x2_1'] = 1 / df1['x2']
# # df1['x3_1'] = 1 / df1['x3']
# # df1['x4_1'] = 1 / df1['x4']
# # import seaborn as sns; sns.set(color_codes=True)
# # df1.corr()
# # sns.pairplot(df1)
# corr1 = df1.corr()
# print(type(corr1))
# print(corr1)
# model = LinearRegression()
# X = df1.drop(labels=['y'], axis=1, inplace=False).values
# y = df1['y'].values
# # X_input = input_X.values
# model.fit(X, y)
# # y_pred = model.predict(X_input)
# print(model.coef_)
# print('finish')


if bf_no == 'all':
    sql_condition = ''
else:
    bf_no = int(bf_no)
    sql_condition = " AND UNIT_NO='%s' " % (bf_no)
print(sql_condition)


dict_input = {}
dict_input['AVG_IRON_TEMP'] = avg_iron_temp
dict_input['AVG_C_S_VALUE'] = avg_c_s_value
dict_input['COMPUTE_SLAG_RATE'] = compute_slag_rate
dict_input['COMPUTE_FILL_S_VALUE'] = compute_fill_s_value
dict_input['AVG_S_VALUE'] = y1
dict_input2 = {}
dict_input2['AVG_IRON_TEMP'] = avg_iron_temp2
dict_input2['AVG_C_S_VALUE'] = avg_c_s_value2
dict_input2['COMPUTE_SLAG_RATE'] = compute_slag_rate2
dict_input2['COMPUTE_FILL_S_VALUE'] = compute_fill_s_value2
# 读取预测参数
# df_coef = pd.read_excel('铁水硫模型参数.xlsx')
# df_coef.columns = df_coef.columns.str.upper()
# gamma = df_coef.loc[0]['GAMMA']
# neighbors_num = df_coef.loc[0]['NEIGHBORS_NUM']
# guiyihua = df_coef.loc[0]['GUIYIHUA']
# modelname = df_coef.loc[0]['MODELNAME']
gamma = 3
neighbors_num = 200
guiyihua = 'MinMaxScaler'
modelname = 'Linear'

# 读取该BF的历史数据
# SQL查询，本地暂时使用文件
start = datetime.datetime.now()
delta_day2 = 10
delta_day1 = delta_day2 + 365
p_day_2 = (start - datetime.timedelta(days=delta_day2)).strftime("%Y%m%d")
p_day_1 = (start - datetime.timedelta(days=delta_day1)).strftime("%Y%m%d")
# sql = " SELECT PROD_DATE,UNIT_NO,SUM_CACULATE_IRON_WGT, " \
#       " AVG_IRON_TEMP,AVG_S_VALUE,AVG_C_S_VALUE,COMPUTE_SLAG_RATE,COMPUTE_FILL_S_VALUE " \
#       " FROM BGTAMOMMIR.T_ODS_TMMIRF3 " \
#       " WHERE PROD_DATE>='%s' " \
#       " AND PROD_DATE<'%s' " \
#       " %s ORDER BY PROD_DATE " % (p_day_1,p_day_2,sql_condition)
# print(sql)
# df0 = pd.read_sql_query(sql, con=db_conn_mpp)

xlsx_name = 'D:/repos/sicost/fe_s_' + str(bf_no) + '.xlsx'
df0 = pd.read_excel(xlsx_name)

df0.columns = df0.columns.str.upper()
df0['PROD_DATE'] = df0['PROD_DATE'].astype(str)
df0_train = df0.copy()
df0_train = df0_train.reset_index(drop=True)
df0_train.drop(['UNIT_NO'], axis=1, inplace=True)


# 数据清理
def clean_data(df, gamma):
    column_name_list = df.columns.tolist()
    column_name_list.remove('PROD_DATE')
    column_name_num = len(column_name_list)
    clean_str_start = 'df_new = df['
    clean_str_end = ']'
    ldict1 = {}
    for i in range(0, column_name_num):
        print(i)
        print(column_name_list[i])
        column_name_tmp = column_name_list[i]
        exec("q1_{} = df['{}'].quantile(0.25)".format(i, column_name_tmp), locals(), ldict1)
        exec("q3_{} = df['{}'].quantile(0.75)".format(i, column_name_tmp), locals(), ldict1)
        exec("iqr_val_{} = q3_{} - q1_{}".format(i, i, i), locals(), ldict1)
        exec(
            '''clean_str1 = "(df['{}'] >= ldict1['q1_{}'] - gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i,
                                                                                                     i), locals(),
            ldict1)
        exec('''clean_str2 = "(df['{}'] < ldict1['q3_{}'] + gamma * ldict1['iqr_val_{}'])"'''.format(column_name_tmp, i,
                                                                                                     i), locals(),
             ldict1)
        exec('''clean_str3 = "(df['{}'] > 0)"'''.format(column_name_tmp), locals(), ldict1)
        clean_str1 = ldict1["clean_str1"]
        clean_str2 = ldict1["clean_str2"]
        clean_str3 = ldict1["clean_str3"]
        if i == 0:
            clean_str_start = clean_str_start + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
        else:
            clean_str_start = clean_str_start + ' & ' + clean_str1 + ' & ' + clean_str2 + ' & ' + clean_str3
    clean_str = clean_str_start + clean_str_end
    print(clean_str)
    exec(clean_str, locals(), ldict1)
    df_new = ldict1["df_new"]
    df_new = df_new.reset_index(drop=True)
    return df_new


df0_train_clean = clean_data(df0_train, gamma)
df0_train_clean.drop(['PROD_DATE'], axis=1, inplace=True)
df0_train_clean.drop(['SUM_CACULATE_IRON_WGT'], axis=1, inplace=True)
new_row = pd.Series(dict_input)
df0_train_clean = df0_train_clean.append(new_row, ignore_index=True)
df0_train_clean_Y = df0_train_clean['AVG_S_VALUE']
# 将需要预测的数据拼接上
new_row2 = pd.Series(dict_input2)
df0_train_clean = df0_train_clean.append(new_row2, ignore_index=True)
df0_train_clean_X = df0_train_clean.drop(labels=['AVG_S_VALUE'], axis=1, inplace=False)
# 对所有X归一化
if guiyihua == 'StandardScaler':
    transfer = StandardScaler()
else:
    transfer = MinMaxScaler()
X_trans = transfer.fit_transform(df0_train_clean_X)
df_X_trans = pd.DataFrame(X_trans)
# 求欧式距离
input_data_X = df_X_trans.iloc[-2].tolist()

input_data_X2 = df_X_trans.iloc[-1].tolist()
# input_X = df_X_trans.iloc[-1:]
input_X = df0_train_clean_X.iloc[-1:]
input_X2 = df0_train_clean_X.iloc[-2:-1]

df_X_new = df_X_trans.iloc[:-1]
df_X_new_copy = df_X_new.copy()
df_X_new_copy2 = df_X_new.copy()
df_X_new_copy['distance1'] = df_X_new_copy.apply(lambda row: distance.euclidean(row, input_data_X), axis=1)
df_X_new_copy2['distance2'] = df_X_new_copy2.apply(lambda row: distance.euclidean(row, input_data_X2), axis=1)
df_X_new_copy = df_X_new_copy.reset_index(drop=False)
df_X_new_copy.rename(columns={'index': 'index_old'}, inplace=True)
df_X_new_copy2 = df_X_new_copy2.reset_index(drop=False)
df_X_new_copy2.rename(columns={'index': 'index_old'}, inplace=True)
df_X_new_copy = df_X_new_copy[['index_old','distance1']]
df_X_new_copy2 = df_X_new_copy2[['index_old','distance2']]
v = ['index_old']
df_X_new_copy0 = pd.merge(df_X_new_copy, df_X_new_copy2, on=v, how='left')

df_X_new_copy0['distance'] = df_X_new_copy0['distance1'] + df_X_new_copy0['distance2']
df_X_new_copy0.drop(['distance1'], axis=1, inplace=True)
df_X_new_copy0.drop(['distance2'], axis=1, inplace=True)
df_sorted = df_X_new_copy0.sort_values(by='distance')
df_sorted = df_sorted.reset_index(drop=True)

for i in range(1,101):
    print(i)
    neighbors_num = int(i * 5)

    df_head = df_sorted.head(neighbors_num)

    df0_train_clean_copy = df0_train_clean.copy()
    df0_train_clean_copy = df0_train_clean_copy.reset_index(drop=False)
    df0_train_clean_copy.rename(columns={'index': 'index_old'}, inplace=True)
    v = ['index_old']
    df_head0 = pd.merge(df_head, df0_train_clean_copy, on=v, how='left')
    df_head0.drop(['index_old'], axis=1, inplace=True)
    # df_head0['AVG_S_VALUE'] = df0_train_clean_Y

    # 选择回归模型
    if modelname == 'Linear':
        model = LinearRegression()
    elif modelname == 'XGB':
        model = xgb.XGBRegressor()
    elif modelname == 'RandomForest':
        model = RandomForestRegressor()
    elif modelname == 'GradientBoosting':
        model = GradientBoostingRegressor()
    elif modelname == 'AdaBoost':
        model = AdaBoostRegressor()
    elif modelname == 'Bagging':
        model = BaggingRegressor()
    elif modelname == 'DecisionTree':
        model = DecisionTreeRegressor()
    elif modelname == 'ExtraTrees':
        model = ExtraTreesRegressor()
    elif modelname == 'KNeighbors':
        model = KNeighborsRegressor()
    elif modelname == 'Lasso':
        model = Lasso(alpha=0.1)
    elif modelname == 'Ridge':
        model = Ridge(alpha=1.0)
    elif modelname == 'ElasticNet':
        model = ElasticNet(alpha=1.0, l1_ratio=0.5)
    else:
        model = LinearRegression()
    X = df_head0.drop(labels=['distance', 'AVG_S_VALUE'], axis=1, inplace=False).values
    y = df_head0['AVG_S_VALUE'].values
    X_input = input_X.values
    model.fit(X, y)
    print(model.coef_)
    coef1 = model.coef_[0]
    coef2 = model.coef_[1]
    coef3 = model.coef_[2]
    coef4 = model.coef_[3]
    if coef1<0 and coef3<0 and coef4>0:
        print('找到合适的')
        break
    if i == 101:
        neighbors_num = 100
        df_head = df_sorted.head(neighbors_num)

        df0_train_clean_copy = df0_train_clean.copy()
        df0_train_clean_copy = df0_train_clean_copy.reset_index(drop=False)
        df0_train_clean_copy.rename(columns={'index': 'index_old'}, inplace=True)
        v = ['index_old']
        df_head0 = pd.merge(df_head, df0_train_clean_copy, on=v, how='left')
        df_head0.drop(['index_old'], axis=1, inplace=True)
        # df_head0['AVG_S_VALUE'] = df0_train_clean_Y

        # 选择回归模型
        if modelname == 'Linear':
            model = LinearRegression()
        elif modelname == 'XGB':
            model = xgb.XGBRegressor()
        elif modelname == 'RandomForest':
            model = RandomForestRegressor()
        elif modelname == 'GradientBoosting':
            model = GradientBoostingRegressor()
        elif modelname == 'AdaBoost':
            model = AdaBoostRegressor()
        elif modelname == 'Bagging':
            model = BaggingRegressor()
        elif modelname == 'DecisionTree':
            model = DecisionTreeRegressor()
        elif modelname == 'ExtraTrees':
            model = ExtraTreesRegressor()
        elif modelname == 'KNeighbors':
            model = KNeighborsRegressor()
        elif modelname == 'Lasso':
            model = Lasso(alpha=0.1)
        elif modelname == 'Ridge':
            model = Ridge(alpha=1.0)
        elif modelname == 'ElasticNet':
            model = ElasticNet(alpha=1.0, l1_ratio=0.5)
        else:
            model = LinearRegression()
        X = df_head0.drop(labels=['distance', 'AVG_S_VALUE'], axis=1, inplace=False).values
        y = df_head0['AVG_S_VALUE'].values
        X_input = input_X.values
        model.fit(X, y)




y_pred = model.predict(X_input)
print(y_pred)
y_pred_output = float(y_pred)

X_input2 = input_X2.values
y_pred2 = model.predict(X_input2)
print(y_pred2)
y_pred_output2 = float(y_pred2)

y2 = y1+(y_pred_output-y_pred_output2)
print(y2)
print('finish')