"""
# Author : YuuSoo
# version : python 3.8
# Time : 2023/1/7 11:40
"""
# 程序所用扩展包
import geatpy as ea
import requests
import yaml
import pickle
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
import warnings

warnings.filterwarnings('ignore', message='X does not have valid feature names')
# ==================================================================

# ==================================================================
# import ......
import logging
import flask_restful
# 自定义模块
# ps.在import时，路径为相对该文件的相对路径or绝对路径，在引用文件时，路径为相对主程序的路径
from errors import my_abort, generate_response, ResponseCode

# 自定义错误
flask_restful.abort = my_abort
root_logger = logging.getLogger('root')
logger = logging.getLogger('main')


# ==================================================================


# ==================================================================
def function(Authorization, id):
    datas = [[]]
    # pklPath = current_app.config['PKL_PATH'] #建议把pklPath作为函数参数传入

    try:  # 简单搞一个粗放型的错误控制，TODO建议predict_*子函数通过抛出自定义异常控制错误代码
        value_qs = function_qs(Authorization, id)
        if value_qs['resp_code'] != 0:
            return value_qs
        datas = value_qs['datas']
        return generate_response(datas)
    except Exception as e:
        return generate_response(datas=datas, code=ResponseCode.ERROR, appendmessage='{0}'.format(str(e)))


def function_qs(Authorization, id):
    res = {}

    logger.info('------------------')
    logger.info('印刷参数优化开始')
    logger.info('------------------')
    try:
        # model= joblib.load(path_model)
        y_pred = dataProcess(Authorization, id)
        logger.info('印刷参数优化完成')

        return generate_response(datas=y_pred, appendmessage=y_pred['message'])
        # ......
        # return generate_response(y_pred)
    except Exception as e:
        logger.error('印刷参数优化失败，请检查文件名是否正确')
        res = generate_response(code=ResponseCode.WRONG_PARAM, appendmessage='{0}'.format(str(e)))
    return res


def dataProcess(Authorization, id):
    print('开始')
    savePath = readConfig()

    # 运行预测模型并读取文件
    prediction(Authorization, id, savePath)
    # 实例化问题对象
    # global i
    # i = "./Data/output/dtr.dat"

    problem = SPIProblem()

    # 构建算法
    algorithm = ea.moea_NSGA2_templet(
        problem,
        ea.Population(Encoding='BG', NIND=50),
        MAXGEN=500,  # 最大进化代数
        logTras=0)  # 表示每隔多少代记录一次日志信息，0表示不记录。
    algorithm.mutOper.Pm = 0.2  # 修改变异算子的变异概率
    algorithm.recOper.XOVR = 0.9  # 修改交叉算子的交叉概率

    # 求解
    res = ea.optimize(algorithm,
                      verbose=False,
                      drawing=0,
                      outputMsg=True,
                      drawLog=False,
                      saveFlag=False)
    print(res['Vars'])

    if res['Vars'] is not None:

        r = pd.DataFrame(res['Vars'])
        s = r

        # print(s.info())

        # s.columns = ['板厚', '板长', '板宽', '刮刀长度', '刮刀压力', '印刷速度', '脱模速度',
        #              'PosX(um)', 'PosY(um)', 'ShiftX(um)', 'ShiftY', 'ShiftXU(um)', 'ShiftYU',
        #              'JudgeRes_encoded']
        # s = s.drop(columns=['板厚', '板长', '板宽', 'PosX(um)', 'PosY(um)', 'ShiftX(um)',
        #                     'ShiftY', 'ShiftXU(um)', 'ShiftYU', 'JudgeRes_encoded'])
        r = r.values
        if r[0][3] is not None:
            r[0][3] = r[0][3] + 325 if r[0][3] > 15 else r[0][3] + 240

        r_value = r[0][6].astype('float')
        if r_value == 0:
            r6 = r_value + 0.8
        else:
            r6 = r_value - 0.3

        res = {"刮刀长度": [r[0][3].astype('float')], "刮刀压力": [r[0][4].astype('float')],
               "印刷速度": [r[0][5].astype('float')], "脱模速度": [r6], "message": "优化成功！"}

        data = pd.DataFrame(res)
        data.to_csv('./Data/output/res-opt.csv', index=False)


    else:
        res = {"message": "未找到最优解，请重新运行"}

    return res  # "印刷参数优化成功"


def encode_judge_res(value):
    if value in ['良品', '通过']:
        return 1
    else:
        return 0


def prediction(Authorization, id, savePath):
    # 下载文件到本地
    fileName = id

    data = pd.read_csv(savePath + fileName)  # '.\\Data\\input\\opt-spi-data.csv'

    dt = pd.DataFrame(data)

    remove_columns = ['Rotation(um)', 'AreaU(um)', 'AreaL', 'VolU(um)', 'VolL',
                      'HeightU(um)', 'HeightL', 'ShapeDeltaU', 'StencilHeight',
                      'BridgeLength(um)', 'BridgeHeight', 'BridgeWidth(um)']

    df = dt.drop(columns=remove_columns, axis=1)

    # 使用apply方法应用自定义函数到"JudgeRes"列
    df['JudgeRes'] = df['JudgeRes'].apply(encode_judge_res)

    print(df.info())

    dt = pd.DataFrame(df)

    X = dt.iloc[:, 0:14]


    y = dt.iloc[:, 14:16]


    X_train, X_test, y_train, y_test = train_test_split(
        X, y, train_size=400, test_size=200, random_state=4)

    # 定义模型
    regr_multirf = MultiOutputRegressor(RandomForestRegressor(n_estimators=200, max_depth=30, random_state=0,
                                                              min_samples_leaf=1, min_samples_split=2,
                                                              max_features='sqrt',
                                                              bootstrap='True'))
    # 拟合模型
    regr_multirf.fit(X_train, y_train)
    # 保存模型
    pickle.dump(regr_multirf, open('./Data/output/spi-opt.dat', "wb"))

    # 预测
    y_multirf = regr_multirf.predict(X_test)

    print(y_multirf)

    # y_test = pd.DataFrame(y_test)
    # y_multirf = pd.DataFrame(y_multirf)

    # # 输出模型评估结果
    # print("mean_absolute_error_height-1:", mean_absolute_error(y_test.iloc[:, 0], y_multirf.iloc[:, 0]))
    # print("mean_absolute_error_area-1:", mean_absolute_error(y_test.iloc[:, 0], y_multirf.iloc[:, 0]))
    # print("r2 score-1:", r2_score(y_test.iloc[:, 0], y_multirf.iloc[:, 0]))
    # print("r2 score-1:", r2_score(y_test.iloc[:, 1], y_multirf.iloc[:, 1))
    return "面积和高度预测成功"


# def download(Authorization, id, savePath):
#     """
#     将文件从minio下载下来
#     :param Authorization:
#     :param fileId:
#     :return:
#     """
#
#     headers = {
#         'Connection': 'keep-alive',
#         'Accept': 'application/json,tex/plain,*/*',
#         'X-Requested-with': 'XMLHttpRequest',
#         'Authorization': Authorization
#     }
#
#     Params = {
#         'id': id
#     }
#
#     datas = requests.get(url, headers=headers, params=Params)
#
#     savePath = savePath + id
#
#     with open(savePath, "wb") as code:
#         code.write(datas.content)
#
#     return id


def readConfig():
    """
    读取配置文件
    :return:
    """
    with open('./conf/application.yml', 'r', encoding='utf-8') as f:
        file_content = f.read()
        content = yaml.load(file_content, yaml.FullLoader)
        # url = content["fileUploadDownload"]["download"]["url"]
        savePath = content["fileUploadDownload"]["download"]["savePath"]
        return savePath


class SPIProblem(ea.Problem):  # 继承Problem父类
    def __init__(self, M=2):
        name = 'MyProblem'  # 初始化name（函数名称，可以随意设置）
        Dim = 14  # 初始化Dim（决策变量维数）
        maxormins = [1] * M  # 初始化maxormins（目标最小最大化标记列表，1：最小化该目标；-1：最大化该目标）
        varTypes = [1] * Dim  # 初始化varTypes（决策变量的类型，0：实数；1：整数）

        # ub = [3, 240, 180, 350, 8, 25, 1, 101780, 168160, 1230, 1030, 2, 2, 1]

        ub = [5, 240, 180, 350, 8, 25, 1, 250, 250, 250, 300, 300, 300, 150]

        # ub = [3, 240, 180, 350, 8, 25, 1, 101780, 168160, 1230, 1030, 2, 2, 1]

        # lb = [0, 80, 75, 250, 5, 24, 0, -181000, -15200, -1000, -1100, 0, 0, 0]
        # lb = [-100, -100, -100, 200, 5, 24, 0, -100, -100, -100, -100, -100, -100, -100]

        lb = [-10, -20, -20, 10, 5, 24, 0, -20, -20, -20, -20, -20, -20, -20]
        # lb = [-100] * Dim
        # lb = [-20] * Dim

        lbin = [1] * Dim  # 决策变量下边界（0表示不包含该变量的下边界，1表示包含）
        ubin = [1] * Dim  # 决策变量上边界（0表示不包含该变量的上边界，1表示包含）
        # 调用父类构造方法完成实例化
        ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)

    def evalVars(self, Vars):  # 目标函数
        x1 = Vars[:, [0]]
        x2 = Vars[:, [1]]
        x3 = Vars[:, [2]]
        x4 = Vars[:, [3]]
        x5 = Vars[:, [4]]
        x6 = Vars[:, [5]]
        x7 = Vars[:, [6]]
        x8 = Vars[:, [7]]
        x9 = Vars[:, [8]]
        x10 = Vars[:, [9]]
        x11 = Vars[:, [10]]
        x12 = Vars[:, [11]]
        x13 = Vars[:, [12]]
        x14 = Vars[:, [13]]

        model = pickle.load(open("./Data/output/spi-opt.dat", "rb"))

        f1 = model.predict(Vars.reshape(-1, 14)) + 8000

        f = 2 / (f1.reshape(-1, 2))

        # CV = np.hstack(
        #     [x1 + x7 + 4, x2 - 2 * x6 -x3, x4 - x2 - x3 + 50, x9 / 100 - x8,
        #      x10 - x11 - 200, x12 - x14, x13 - x14, x13 - x12, x4 - 20, x5 - 22, x8 + x10 - 5,
        #      x9 - x13 -30, x11 + x12, x1 - x5])
        CV = np.hstack(
            [x1 + x7 + 4, x2 - 2 * x6 - x3, x4 - x2 - x3 + 50, x9 / 100 - x8,
             x10 - x11 - 200, x12 - x14, x13 - x14, x13 - x12, x4 - 20, x5 - 22, x8 + x10 - 5,
             x9 - x13 - 30, x11 + x12, x1 - x5])

        return f1, CV
