# -*- coding: utf-8 -*-
# @Time    : 2023/5/8
# @Author  : tangyu
import json
import os

import matplotlib.pyplot as plt
import requests

from app.main.dao.performance_prediction_model_dao import PerformancePredirtionModelDao
from app.main.entity.performance_prediction_model_entity import PerformancePredirtionModelEntity
from app.util.result import Result
from app.util.get_static_path import get_group_path,get_nogroup_path
from app.util.data_clean.outlier_missing import OutlierMissing
import numpy as np
import xgboost as xgb
import pandas as pd
import sklearn.model_selection as ms
from sklearn import metrics
from sklearn.metrics import mean_squared_error
import pickle
# import matplotlib.pyplot as plt
# import seaborn as sns
from datetime import datetime
import copy
# from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor,ExtraTreesRegressor,GradientBoostingRegressor
#model = LGBMRegressor(num_leaves=62, n_estimators=70,learning_rate=0.3)
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor
from config import Config


# 业务逻辑实现，简单添加方法即可
class PerformancePredirtionModel:
    """
    屈服强度model
    """

    def cout1(self,pred, y_test, upper_error,lower_error):  # 相对误差命中率
        count = 0
        for i in range(len(y_test)):
            # if y_test[i] != 0:
            if pred[i] <= y_test[i] + upper_error and pred[i] >= y_test[i] + lower_error:
                count += 1
            # else:
            #     continue
        if len(y_test) == 0:
            return Result.error(msg="请检查数据集")
        return count / len(y_test)

    def rmsle(self,y, y_pred):
        return np.sqrt(mean_squared_error(y, y_pred))

    def data_clean(self,data,input):
        data = data.fillna(value=np.NaN)
        # # 要搜索的字符串
        # search_str = "crown"
        #
        # # 删除包含search_str的列
        # data = data[data.columns[~data.columns.str.contains(search_str)]]
        # search_str = "water"
        #
        # # 删除包含search_str的列
        # data = data[data.columns[~data.columns.str.contains(search_str)]]
        # 选择所有不包含字母的列
        data.replace(" ",np.nan,inplace = True)
        data = data.apply(pd.to_numeric, errors='ignore')
        # no_list = input.sting_names[1:-1].replace(" ","").lower().split(",")
        # print(no_list[-1][-2:])
        # no_to_drop = [col for col in no_list if col[-2:] == 'no' and col != input.reel_key.lower()]
        # data = data.drop(no_to_drop, axis=1)
        cols_with_letters = data.select_dtypes(include='object').columns.tolist()
        # 除了'mat_no'以外，删除所有带有字母的列
        cols_to_drop = [col for col in cols_with_letters if col != input.reel_key.lower() and col != input.group_var.lower()]
        data = data.drop(cols_to_drop, axis=1)
        data = OutlierMissing.remove_all_nan_columns(data)
        column_x_names = data.drop(
            labels=[input.time_type.lower(), input.Y.lower(), input.reel_key.lower(), input.Y.lower()], axis=1,
            inplace=False).columns.tolist()
        data = OutlierMissing.outliers(data, column_x_names)
        a = data.shape
        if len(data) > 1:
            data = OutlierMissing.remove_same_value_columns(data,input)
        data = OutlierMissing.remove_nan_percentage_columns(data, 0.05)
        data = OutlierMissing.remove_nan_percentage_rows(data, 0.05)

        # for column_name, column_data in data.items():
            # if column_name == input.reel_key.lower() or column_name == 'SG_SMALL_CLASS' or column_name == '屈服强度Rp0.2真实值' or column_name =='INSERT_TIME':
            #     continue
            # if column_name == input.reel_key.lower() or column_name == 'sg_small_class' or column_name == 'insert_time':
            #     continue
        #     # if data[column_name].isnull().values.any():
        #     #     data[column_name] = data[column_name].fillna(data[column_name].mean())
        #     # print(column_name,column_data)
        #     data[column_name] = data[column_name].astype(float).round(6)
        # nan_values = data.isnull().values.any()
        # inf_values = not np.isfinite(data.values).all()
        return data

    def save_model(self,model,input,group_val,columns,modelloads,in_time):
        # 如果只要保留一个model_name的模型先删除再存入
        modelload = []
        if input.group_var == '':
            model_path = 'app/train/no_group_file'
            model_path_app = 'app\\train\\no_group_file'
            model_name = '{}_{}.pkl'.format(input.modelname, in_time.strftime('%Y_%m_%d_%H_%M_%S'))
        else:
            model_path = 'app/train/group_file'
            model_path_app = 'app\\train\\group_file'
            model_name = '{}_{}_{}_{}.pkl'.format(input.modelname,input.group_var,group_val, in_time.strftime('%Y_%m_%d_%H_%M_%S'))
        for file_name in os.listdir(model_path):
            # print(file_name)
            if input.group_var == '':
                if input.modelname in file_name:
                    os.remove(get_nogroup_path(file_name))
                    break
            else:
                if '{}_{}_{}'.format(input.modelname, input.group_var, group_val) in file_name:
                    os.remove(get_group_path(file_name))
                    break
        # folder_path = get_path(model_name)
        # 保存模型并把路径存入数据库
        if input.group_var == '':
            pickle.dump(model, open(
                get_nogroup_path('{}_{}.pkl'.format(input.modelname, in_time.strftime('%Y_%m_%d_%H_%M_%S'))), "wb"))
        else:
            pickle.dump(model, open(
                get_group_path('{}_{}_{}_{}.pkl'.format(input.modelname, input.group_var, group_val,
                                                          in_time.strftime('%Y_%m_%d_%H_%M_%S'))), "wb"))

        # PerformancePredirtionModelDao().insert_model_root(input.modelname, 'app\\train\\{}'.format(model_name),
        #                                                   datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        for file_name in os.listdir(model_path):
            model_root = '{}\\{}'.format(model_path_app,model_name)
            if input.group_var == '':
                for m in modelloads:
                    modelload = m
            else:
                for m in modelloads:
                    if m['group_var'] == input.group_var and m['group_var_val'] == group_val:
                        modelload = m
            # if modelload == [] or modelload ==None:
            if input.group_var == '':
                sql = PerformancePredirtionModelDao().insert_model_root_nogroup(input.modelname, model_root,
                                                              in_time.strftime('%Y-%m-%d %H:%M:%S'),','.join(columns),input.upper_error,input.lower_error,input.reel_key,input.Y)
            else:
                sql = PerformancePredirtionModelDao().insert_model_root_group(input.modelname, model_root,
                                                                          in_time.strftime('%Y-%m-%d %H:%M:%S'),input.group_var,group_val,','.join(columns),input.upper_error,input.lower_error,input.reel_key,input.Y)
            break
            # else:
            #     for modelload in modelloads:
            #         if modelload['model_name'] == input.modelname:
            #             if input.group_var == '':
            #                 sql = PerformancePredirtionModelDao().update_model_nogroup(input.modelname, model_root,
            #                                                          datetime.now().strftime('%Y-%m-%d %H:%M:%S'),','.join(columns),input.upper_error,input.lower_error,input.reel_key,input.Y)
            #             elif input.group_var != '' and group_val == modelload['group_var_val'] and input.group_var == modelload['group_var'] :
            #                 sql = PerformancePredirtionModelDao().update_model_group(input.modelname, model_root,
            #                                                                    datetime.now().strftime('%Y-%m-%d %H:%M:%S'),input.group_var,group_val,','.join(columns),input.upper_error,input.lower_error,input.reel_key,input.Y)
        return sql

    def corr_create(self,data,input):
        X = data.drop(labels=[input.time_type.lower(), input.Y.lower(),input.reel_key.lower()], axis=1, inplace=False)  # 输入特征

        Y = data.loc[:, input.Y]
        # 相关性系数表分析
        corr_dict = dict()
        corr_group_list = []
        corr_list = []
        # 对所有钢种画相关性系数表
        if input.group_var != '':
            data_corr = X.groupby(input.group_var)
            for group_var, group in data_corr:
                if len(group) <= 5:
                    continue
                group = PerformancePredirtionModel().data_clean(group, input)
                corr_dt = group.drop(labels=[input.group_var.lower()], axis=1, inplace=False).corr(method='pearson')
                corr_group_list = []
                for corr_x, corr_y in corr_dt.items():
                    corr_dict['group_var'] = input.group_var
                    corr_dict['group_var_val'] = group_var
                    corr_dict['axistX'] = corr_x
                    corr_dict['axistY'] = [x for x in corr_y.index],
                    corr_dict['axistX_val'] = corr_y.values.tolist()
                    corr_group_list.append(copy.deepcopy(corr_dict))
                corr_list.append(corr_group_list)
        else:
            group = PerformancePredirtionModel().data_clean(X, input)
            data_corr = group.corr()
            for corr_x, corr_y in data_corr.items():
                corr_dict['group_var'] = input.group_var
                corr_dict['group_var_val'] = ""
                corr_dict['axistX'] = corr_x
                corr_dict['axistY'] = [x for x in corr_y.index],
                corr_dict['axistX_val'] = corr_y.values.tolist()
                corr_group_list.append(copy.deepcopy(corr_dict))
            corr_list.append(corr_group_list)
        return corr_list

    def create_model(self,data,input,group_val,corr_list,modelloads,in_time):
        importance_dict = dict()
        corr_dict = dict()
        corr_group_list = []
        data = PerformancePredirtionModel().data_clean(data,input)
        X = data.drop(labels = [input.time_type, input.Y],axis = 1,inplace = False)  # 输入特征
        Y = data.loc[:, input.Y]  # 输出特征：'屈服强度Rp0.2', '抗拉强度Rm', '断后伸长率A'
        # 划分数据集
        # x_train, X_test, Y_train, Y_test = ms.train_test_split(X, Y, test_size=0.2, random_state=4)  # 测试集训练集分割
        X_test = X
        # 指标
        l = 5
        # 对应head中的三个指标
        # i = 2
        X_train = X.drop(labels = [input.reel_key],axis = 1,inplace = False)
        y_train = Y
        y_test = Y
        x_test = X
        x_train, X_test, Y_train, Y_test = ms.train_test_split(X, Y, test_size=0.2, random_state=4)  # 测试集训练集分割4
        X_train = x_train.drop(labels=[input.reel_key], axis=1, inplace=False)
        y_train = Y_train
        y_test = Y_test
        x_test = X_test.drop(labels=[input.reel_key], axis=1, inplace=False)
        X_test = X_test
        columns = X_train.columns

        # 模型选择
        if input.algorithm == 'xgboost' or input.algorithm == 'XGBRegressor':
            model = xgb.XGBRegressor()
        elif input.algorithm == 'LGBMRegressor':
            model = xgb.XGBRegressor()
        elif input.algorithm == 'RandomFORSEST':
            model = RandomForestRegressor()
        elif input.algorithm == 'GradientBoostingRegressor':
            model = GradientBoostingRegressor()
        elif input.algorithm == 'AdaBoostRegressor':
            model = AdaBoostRegressor()
        elif input.algorithm == 'BaggingRegressor':
            model = BaggingRegressor()
        elif input.algorithm == 'LinearRegression':
            model = LinearRegression()
        elif input.algorithm == 'DecisionTreeRegressor':
            model = DecisionTreeRegressor()
        elif input.algorithm == 'ExtraTree':
            model = ExtraTreesRegressor()
        else:
            return Result.error(
                msg="{}模型未写入,现有模型XGBRegressor,LGBMRegressor, RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, LinearRegression, DecisionTreeRegressor,ExtraTreesRegressor".format(
                    input.algorithm))
        # try:
        model.fit(X_train, y_train)
        # except:
        #     return Result.error(msg="数据集训练错误")
        # 获取特征重要性
        if input.algorithm == 'BaggingRegressor':
            feature_importances = model.estimators_[0].feature_importances_
        else:
            feature_importances = model.feature_importances_

        # 获取特征列名
        feature_names = X_train.columns

        # 使用argsort进行排序并获取索引
        sorted_indices = np.argsort(feature_importances)

        # 根据排序后的索引获取相应的列名和特征重要性值
        importance_columns = feature_names[sorted_indices]
        importance_datas = feature_importances[sorted_indices]
        importance_dict['importance_columns'] = importance_columns.tolist()[-20:]
        importance_dict['importance_datas'] = importance_datas.tolist()[-20:]
        importance_dict['group_var'] = input.group_var
        importance_dict['group_var_val'] = group_val

        X_corr = X_train.corr()
        # 提取前十个最重要的特征
        top_10_important_features = importance_columns.tolist()[-10:]

        # 现在从 X_corr 中提取这些特征的相关性数据
        top_10_corr = X_corr.loc[top_10_important_features, top_10_important_features]
        for corr_x, corr_y in top_10_corr.items():
            corr_dict['group_var'] = input.group_var
            corr_dict['group_var_val'] = ""
            corr_dict['axistX'] = corr_x
            corr_dict['axistY'] = [x for x in corr_y.index],
            corr_dict['axistX_val'] = corr_y.values.tolist()
            corr_group_list.append(copy.deepcopy(corr_dict))
        corr_list.append(corr_group_list)
        sql = PerformancePredirtionModel().save_model(model,input,group_val,columns,modelloads,in_time)
        return X_test,X_train, x_test, y_train, y_test,model,corr_list,sql,importance_dict

    def prediction_model(self,request_data):
        in_time = datetime.now()
        # print(request_data)
        input = PerformancePredirtionModelEntity(request_data)
        data = PerformancePredirtionModelDao().select_model_data(request_data)  # 数据库操作示例
        if data == [] :
            return Result.error(msg = "{}表中在{}至{}之间数据查询为空".format(input.tablename,input.begin_data,input.end_data))
        elif data is None:
            return Result.error(msg = "{}表中在{}至{}之间数据查询失败，请检查参数".format(input.tablename, input.begin_data, input.end_data))
        # print(data)
        if len(data) < Config.TRIAN_LENGTH:
            return Result.error(msg="训练数据少于{}条".format(Config.TRIAN_LENGTH))
        data = pd.DataFrame(data)
        varname_list = input.sting_names[1:-1].split(",")
        if input.group_var in varname_list:
            varname_list.remove(input.group_var)
        if input.reel_key in varname_list:
            varname_list.remove(input.reel_key)
        data = data.drop(columns=varname_list)

        # X_train = data.drop(labels = [input.reel_key],axis = 1,inplace = False)
        get_sql_time = datetime.now()
        # print(type(data['prod_time']))
        print(get_sql_time-in_time)
        # data = data.sort_values(by='sg_small_class')5
        # # 数据清洗
        # data = PerformancePredirtionModel().data_clean(data,input)
        # 样本集特征
        # 绝对误差分别对应'屈服强度Rp0.2', '抗拉强度Rm', '断后伸长率A'
        index = [15, 15, 2]
        # 相对误差，分别对应'屈服强度Rp0.2', '抗拉强度Rm', '断后伸长率A'
        index1 = [8, 8, 5]
        head = [input.Y]  # 三个  指标
        result_list = []
        result_group_list = []
        value_dict = dict()
        corr_dict = dict()
        corr_group_list = []
        corr_list = []
        sql_list = []
        importance_list = []
        untrained_list = []
        # data = pd.read_excel(r'C:\Users\Administrator\Desktop\dbvis-20230522-6483126517449731534.xlsx')  # 输入数据
        # data = data.sort_values(by = 'CLEANINGACIDCON')
        # for k1,k2 in data.iterrows():
        #     print(k1,k2)
        #     a = k2.to_dict()
        # 查看输入数据的样本数和输入维度
        # print(f"总数据量：{data.shape[0]}")
        # data.drop(labels = ["SG_SMALL_CLASS_y","SG_SMALL_CLASS.1"],axis= 1,inplace= True)
        # corr_list = PerformancePredirtionModel().corr_create(data,input)

        if input.group_var == '':
            modelloads = PerformancePredirtionModelDao().select_model_name_nogroup(input.modelname)
        else:
            modelloads = PerformancePredirtionModelDao().select_model_name_group(input.modelname, input.group_var)
        # get_corr_time = datetime.now()
        # print(get_corr_time - get_sql_time)
        if input.group_var == '':
            X_test,X_train, x_test, y_train, y_test,model,corr_list,sql,importance_dict= PerformancePredirtionModel().create_model(data,input,'',corr_list,modelloads,in_time)
            sql_list.append(sql)
            importance_list.append(importance_dict)
            pred_y_XGB = model.predict(x_test)
            pred_y_XGB = np.around(pred_y_XGB,2)
            # print ("rmsle:",self.rmsle(y_test,pred_y_XGB))
            y_test = y_test.to_numpy()
            y1 = pred_y_XGB.tolist()
            y2 = y_test.tolist()
            # 保存到表格
            X_test[f"Y_true"] = y2
            X_test[f"Y_pre"] = y1
            X_test[f"Y_error"] = abs(X_test[f"Y_pre"] - X_test[f"Y_true"])
            y = X_test[f"Y_true"]
            acc = round(self.cout1(pred_y_XGB.tolist(), y_test.tolist(), input.upper_error,input.lower_error) * 100, 4)
            for key,value in X_test[[input.reel_key.lower(),'Y_true','Y_pre','Y_error']].iterrows():
                value = value.to_dict()
                value_dict['reel_key'] = input.reel_key.lower()
                value_dict['reel_key_cname'] = input.reel_key_cname.lower()
                value_dict['reel_key_val'] = value[input.reel_key.lower()]
                value_dict['Y'] = input.Y
                value_dict['Y_true'] = int(value['Y_true'])
                value_dict['Y_pre'] = int(value['Y_pre'])
                value_dict['Y_error'] = abs(value_dict['Y_true'] - value_dict['Y_pre'])
                value_dict['group_var'] = input.group_var
                value_dict['group_var_val'] = ''
                value_dict['Y'] = input.Y
                value_dict['group_accuracy'] = acc
                result_list.append(copy.deepcopy(value_dict))
        #结果保存
        else:
            time_num = 0
            time_num2 = 0
            time_num3 = 0
            time_num4 = 0
            Y_true = []
            Y_pre = []
            data_grby = data.groupby(input.group_var.lower())
            for grby_x,grby_y in data_grby:
                importance_dict = dict()
                if len(grby_y) <= Config.TRIAN_LENGTH :
                    untrained_list.append(str(grby_x))
                    continue
                grby_x = str(grby_x)
                # time1 = datetime.now()
                data = PerformancePredirtionModel().data_clean(grby_y, input)
                # time_num4 = time_num4 + (datetime.now() - time1).total_seconds
                X = data.drop(labels=[input.time_type.lower(), input.Y.lower()], axis=1, inplace=False)  # 输入特征
                Y = data.loc[:, input.Y]  # 输出特征：'屈服强度Rp0.2', '抗拉强度Rm', '断后伸长率A'
                corr_dt = X.drop(labels=[input.group_var.lower(),input.reel_key], axis=1, inplace=False).corr(method='pearson')
                corr_dt = corr_dt.corr()
                corr_group_list = []
                # 划分数据集
                # x_train, X_test, Y_train, Y_test = ms.train_test_split(X, Y, test_size=0.2, random_state=4)  # 测试集训练集分割
                X_test = X
                # 指标
                l = 5
                # 对应head中的三个指标
                # i = 2
                X_train = X.drop(labels=[input.reel_key,input.group_var], axis=1, inplace=False)
                y_train = Y
                y_test = Y
                x_test = X
                x_train, X_test, Y_train, Y_test = ms.train_test_split(X, Y, test_size=0.2, random_state=4)  # 测试集训练集分割4
                X_train = x_train.drop(labels=[input.reel_key], axis=1, inplace=False)
                y_train = Y_train
                y_test = Y_test
                x_test = X_test.drop(labels=[input.reel_key], axis=1, inplace=False)
                X_test = X_test
                columns = X_train.columns
                # 模型选择
                if input.algorithm == 'xgboost' or input.algorithm == 'XGBRegressor':
                    model = xgb.XGBRegressor()
                elif input.algorithm == 'LGBMRegressor':
                    model = xgb.XGBRegressor()
                elif input.algorithm == 'RandomFORSEST':
                    model = RandomForestRegressor()
                elif input.algorithm == 'GradientBoostingRegressor':
                    model = GradientBoostingRegressor()
                elif input.algorithm == 'AdaBoostRegressor':
                    model = AdaBoostRegressor()
                elif input.algorithm == 'BaggingRegressor':
                    model = BaggingRegressor()
                elif input.algorithm == 'LinearRegression':
                    model = LinearRegression()
                elif input.algorithm == 'DecisionTreeRegressor':
                    model = DecisionTreeRegressor()
                elif input.algorithm == 'ExtraTree':
                    model = ExtraTreesRegressor()
                else:
                    return Result.error(
                        msg="{}模型未写入,现有模型XGBRegressor,LGBMRegressor, RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, LinearRegression, DecisionTreeRegressor,ExtraTreesRegressor".format(
                            input.algorithm))
                # try:
                time1 = datetime.now()
                model.fit(X_train, y_train)
                time_num2 = time_num2 + (datetime.now() - time1).total_seconds()
                # except:
                #     return Result.error(msg="数据集训练错误")
                # time1 = datetime.now()
                # 获取特征重要性
                if input.algorithm == 'BaggingRegressor':
                    feature_importances = model.estimators_[0].feature_importances_
                else:
                    feature_importances = model.feature_importances_

                # 获取特征列名
                feature_names = X_train.columns

                # 使用argsort进行排序并获取索引
                sorted_indices = np.argsort(feature_importances)

                # 根据排序后的索引获取相应的列名和特征重要性值
                importance_columns = feature_names[sorted_indices]
                importance_datas = feature_importances[sorted_indices]
                importance_dict['importance_columns'] = importance_columns.tolist()[-20:]
                importance_dict['importance_datas'] = importance_datas.tolist()[-20:]
                importance_dict['group_var'] = input.group_var
                importance_dict['group_var_val'] = grby_x
                importance_list.append(importance_dict)
                X_corr = X_train.corr()
                # 提取前十个最重要的特征
                top_10_important_features = importance_columns.tolist()[-10:]

                # 现在从 X_corr 中提取这些特征的相关性数据
                top_10_corr = corr_dt.loc[top_10_important_features, top_10_important_features]
                for corr_x, corr_y in top_10_corr.items():
                    corr_dict['group_var'] = input.group_var
                    corr_dict['group_var_val'] = ""
                    corr_dict['axistX'] = corr_x
                    corr_dict['axistY'] = [x for x in corr_y.index],
                    corr_dict['axistX_val'] = corr_y.values.tolist()
                    corr_group_list.append(copy.deepcopy(corr_dict))
                corr_list.append(corr_group_list)
                sql = PerformancePredirtionModel().save_model(model, input, grby_x, columns,modelloads,in_time)
                sql_list.append(sql)
                # time_num = time_num + (datetime.now() - time1).total_seconds()
                # time1 = datetime.now()
                pred_y_XGB = model.predict(x_test)
                # time_num3 = time_num3 + (datetime.now() - time1).total_seconds()
                # print ("rmsle:",self.rmsle(y_test,pred_y_XGB))
                y_test = y_test.to_numpy()
                y1 = pred_y_XGB.tolist()
                Y_pre.extend(y1)
                y2 = y_test.tolist()
                Y_true.extend(y2)
                # 保存到表格
                X_test[f"Y_true"] = y2
                X_test[f"Y_pre"] = y1
                X_test[f"Y_error"] = abs(X_test[f"Y_pre"] - X_test[f"Y_true"])
                y = X_test[f"Y_true"]
                acc = round(self.cout1(pred_y_XGB.tolist(), y_test.tolist(), input.upper_error,input.lower_error) * 100, 4)
                if len(grby_y) <= 5:
                    continue
                group_acc = round(self.cout1(X_test['Y_pre'].tolist(),X_test['Y_true'].tolist(),input.upper_error,input.lower_error)*100,4)
                result_group_list = []
                for key,value in X_test[[input.reel_key.lower(),input.group_var,'Y_true','Y_pre','Y_error']].iterrows():
                    value = value.to_dict()
                    value_dict['reel_key'] = input.reel_key.lower()
                    value_dict['reel_key_cname'] = input.reel_key_cname.lower()
                    value_dict['reel_key_val'] = value[input.reel_key.lower()]
                    value_dict['Y'] = input.Y
                    value_dict['Y_true'] = int(value['Y_true'])
                    value_dict['Y_pre'] = int(value['Y_pre'])
                    value_dict['Y_error'] = abs(value_dict['Y_true']-value_dict['Y_pre'])
                    value_dict['group_var'] = input.group_var
                    value_dict['group_var_val'] = value[input.group_var]
                    value_dict['group_accuracy'] = group_acc
                    del value[input.group_var]
                    result_group_list.append(copy.deepcopy(value_dict))
                result_list.append(result_group_list)
            if Y_pre != []:
                acc = round(self.cout1(Y_pre, Y_true, input.upper_error,input.lower_error) * 100, 4)
            else:
                return Result.error(msg="请重新选择分组变量")
        PerformancePredirtionModelDao().update_data(sql_list)
        # result_time = datetime.now()
        # print(result_time-get_corr_time,time_num,time_num2,time_num3)
        if untrained_list == []:
            remark = ''
        else:
            remark = (','.join(untrained_list) + '分组数据少于{}条，分组未训练').format(Config.TRIAN_LENGTH)
        result ={
                'forecast': acc,
                'importance':importance_list,
                'corr': corr_list,
                'data': result_list,
                'remark': remark,
                'train_time': in_time.strftime('%Y-%m-%d %H:%M:%S')
            }
        # print(result)
        return Result.success(data=result)


# 传输图片示例,将图片放到static文件夹下，调用get_path传入文件名获得绝对路径，url填写接收方接口的url
# def send_picture():
#     files = {
#         "image_file": ("demo.jpg", open(get_path('demo.jpg'), 'rb'))}
#     requests.post('http://127.0.0.1:8081/upload', files=files)


if __name__ == '__main__':
    test_data = {
        "begin_data":"2021-05-08 00:00:00",
        "end_data": "2023-05-08 16:42:32",
        "Y": "yield_str",
        "tablename":"XNYB.TGBX_YIELD",
        "group_var": "sg_small_class",
        "algorithm":"xgboost",
        "modelname":"n11屈服预测"
}
    PerformancePredirtionModel().prediction_model(test_data)
