#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
    Module Documentation
    here
"""

# Created by  : Zhang Chengdong
# Create Date : 2024/11/22 13:58
# Version = v0.1.0

__author__ = "Zhang Chengdong"
__copyright__ = "Copyright 2024. Large scale model"
__credits__ = ['Zhang Chengdong']

__liscence__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Zhang Chengdong"
__status__ = "Production"

import copy
import os
import shap
import json
import time
import glob
import logging
import joblib
import optuna
import logging
import seaborn as sns
import numpy as np
import pandas as pd
from datetime import datetime
from matplotlib import pyplot as plt
from typing import Tuple, Union
from functools import partial
from collections import defaultdict

from catboost import CatBoostRegressor, Pool
from catboost import EFeaturesSelectionAlgorithm

from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE, RFECV
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import cross_val_score, train_test_split, KFold

from service.FeatureTool.feature_tools import FeatureTools as featool
from .data_outlier_process import AbnormalDataFilter

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# plt.rcParams['font.sans-serif'] = [os.path.join(settings.BASE_DIR, "static/simhei.ttf")]  # 用黑体显示中文
# plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题


class NonLinearModel():
    """
    """

    def __init__(self, data: pd.DataFrame, save_model_path: str, model_name: str, mas_model_type: str,
                 train_optuna: dict, model_type_suffix: dict):
        self.data = data
        self.model_name = model_name
        self.mas_model_type = mas_model_type
        self.train_optuna = train_optuna
        self.model_type_suffix = model_type_suffix
        self.save_pca_path = os.path.join(save_model_path, "PCAmodel")
        self.save_model_path = os.path.join(save_model_path, self.model_name)  # 模型保存的路径
        self.model_filename = None  # 模型名称（根据时间自命名）
        self.thresher = None
        self.target_1d = '水泥1天实测值'
        self.target_3d = "水泥3天实测值"
        self.target_28d = "水泥28天实测值"
        self.model_type = None
        self.columns_mean = None
        feature_col = self.data.columns.tolist()
        remove_list = ['磨号', '品种', "时间"]
        feature_col = list(set(feature_col) - set(remove_list))
        self.target_list = [self.target_1d, self.target_3d, self.target_28d]
        self.feature = list(set(feature_col) - set(self.target_list))
        if not os.path.exists(self.save_model_path):
            os.makedirs(self.save_model_path)

        if not os.path.exists(self.save_pca_path):
            os.makedirs(self.save_pca_path)

    # ================================= 数据处理部分 ========================================
    def deal_data(self, origin_data, flag_28: bool = False) -> pd.DataFrame:
        """
        处理数据部分, 将前2天的1天值进行mean
        :param origin_data:
        :param flag_28:
        """
        origin_data['水泥1天实测值'].fillna(origin_data['水泥1天实测值'].mean(), inplace=True)
        # origin_data['水泥1天实测值'].interpolate(method='linear', inplace=True)  # 线性插值
        origin_data['前2天1天实测均值'] = origin_data['水泥1天实测值'].rolling(window=2).mean().shift(1)
        if flag_28:
            origin_data['水泥3天实测值'].fillna(origin_data['水泥3天实测值'].mean(), inplace=True)
            # origin_data['水泥3天实测值'].interpolate(method='linear', inplace=True)  # 线性插值
        return origin_data

    def characteristic_engineering(self, df: pd.DataFrame, feature_combination: list = None) -> pd.DataFrame:
        """
        使用对特征进行组合
        :param df:
        :param feature_combination:
        """
        feature = featool(df, feature_combination)
        new_data_df = feature.combination_feature()
        return new_data_df

    def deal_na_data(self, data: pd.DataFrame, columns_name_list: list) -> pd.DataFrame:
        """
        :param data:
        :param columns_name:
        :return:
        """
        return data.dropna(subset=columns_name_list, axis=0)

    def compute_promotion_rate(self, data_df: pd.DataFrame, compute_promotion_dict: dict = None) -> pd.DataFrame:
        """
        计算模型增进率
        :param data_df:
        :param compute_promotion_dict:
        """
        if compute_promotion_dict is None:
            compute_promotion_dict = {"use_1": True, "use_3": False}
        data_df['时间'] = pd.to_datetime(data_df['时间'])
        data_df = data_df.sort_values(by="时间")
        if compute_promotion_dict['use_3']:
            data_df['工况3_1'] = data_df['水泥3天实测值'] - data_df['水泥1天实测值']
            # TODO 是否考虑向下移动3天
            data_df['工况3_1'] = data_df['工况3_1'].shift(1)

        if compute_promotion_dict['use_1']:
            if ~data_df['水泥1天实测值'].isna().all():
                data_df['工况1_1'] = data_df['水泥1天实测值'].diff().abs().shift(1)
            else:
                compute_promotion_dict['use_1'] = False
        return data_df

    # ============================================ 最优超参查找部分 =======================================
    def object_tive_old(self, trial, x_train, y_train):
        """
        超参数配置，并加入到模型中
        :param trial:
        :param x_train:
        :param y_train:
        :return:s
        """
        model_name = self.model_name

        # 定义模型及其参数
        if model_name == 'CatBoost':
            params = {
                'thread_count': 10,
                'iterations': trial.suggest_int('iterations', 2, 5, log=True),
                'learning_rate': trial.suggest_float('learning_rate', 0.001, 0.1, log=True),
                'depth': trial.suggest_int('depth', 3, 10, log=True),
                # 'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 1, 20, log=True),
                "verbose": False,
                'loss_function': 'RMSE',
            }
            params.update(self.train_optuna)
        elif model_name == "LightGBM":
            params = {
                'boosting_type': 'gbdt',
                'objective': 'regression',
                'num_leaves': 31,
                'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1, log=True),
                'verbose': 0,
                'max_depth': trial.suggest_int('max_depth', 5, 10)
            }
        model = CatBoostRegressor(**params)
        kf = KFold(n_splits=5, shuffle=True, random_state=42)
        if model_name == 'CatBoost':
            scores = cross_val_score(model, x_train, y_train, cv=kf, scoring='neg_mean_squared_error',
                                     fit_params={'cat_features': ['月份']})
        else:
            scores = cross_val_score(model, x_train, y_train, cv=kf, scoring='neg_mean_squared_error')
        score = np.mean(scores)
        return -score

    def object_tive(self, trial, x_train, y_train):
        """
        超参数配置，并加入到模型中
        :param trial:
        :param x_train:
        :param y_train:
        :return:s
        """
        model_name = self.model_name

        # 定义模型及其参数

        params = {
            'thread_count': 10,
            'iterations': 150,
            'learning_rate': trial.suggest_float('learning_rate', 0.001, 0.1, log=True),
            'depth': trial.suggest_int('depth', 2, 10),
            "verbose": False,
            'loss_function': 'RMSE',
        }
        model = CatBoostRegressor(**params)
        kf = KFold(n_splits=5, shuffle=True, random_state=42)
        scores = cross_val_score(model, x_train, y_train, cv=kf, scoring='neg_mean_squared_error')
        return -np.mean(scores)

    def get_best_parameter(self, x_train, y_train, feature_name):
        """
        利用5折叠获取最优超参数
        :param x_train:
        :param y_train:
        :param feature_name:
        :return:
        """
        study = optuna.create_study(direction="minimize")
        partial_object = partial(self.object_tive, x_train=x_train, y_train=y_train)
        study.optimize(partial_object, n_trials=100, n_jobs=5)
        # study.optimize(partial_object, n_trials=100)
        return study.best_trial.params

    # ================================== 模型保存与画图 =======================================
    def save_transform_model_to_pkl(self, feature_list: list, pca_model: PCA):
        """
        保存 pca 特征降维模型
        :param feature_list:
        :param pca_model:
        """
        feature_list = [item.replace("-", "_") for item in feature_list]
        model_filename = os.path.join(self.save_pca_path, 'PCA.pkl')
        model_info = {
            'feature_list': feature_list,
            'pca_model': pca_model,
        }
        joblib.dump(model_info, model_filename)

    def save_model_to_pkl(self, model, mse, r2, linear_coef, feature_names, target_name, cat_features="月份",
                          best_pass_rate=None,
                          model_type="model3_1d", columns_mean=None, feature_combination=None, use_pca_func=False):
        """

        :param model:
        :param mse:
        :param r2:
        :param linear_coef:
        :param feature_names:
        :param target_name:
        :param cat_features:
        :param best_pass_rate:
        :param model_type:
        :param columns_mean:
        :param feature_combination:
        :param use_pca_func:
        :return:
        """
        # 保存模型到本地文件
        if not os.path.exists(self.save_model_path):
            os.makedirs(self.save_model_path)
        self.model_filename = os.path.join(self.save_model_path,
                                           '{}_{}.pkl'.format(self.mas_model_type, model_type))
        if self.model_name == "CatBoost":
            model_info = {
                "type": "通用模型",
                'model': model,
                'feature_names': feature_names,
                'target_name': target_name,
                'cat_features': cat_features,
                'model_name': model_type,
                "mse": mse,
                "r2": r2,
                "use_pca_func": use_pca_func,
                "pass_rate": best_pass_rate,
                "columns_mean": columns_mean,
                "feature_combination": feature_combination,
                "linear_coef": linear_coef
            }
        elif self.model_name == "LightGBM":
            model_info = {
                'model': model,
                'feature_names': feature_names,
                'target_name': target_name,
                'model_name': model_type
            }
        joblib.dump(model_info, self.model_filename)
        print(f"模型已保存到 {self.model_filename}")

    def optimal_parameters(self, model_parameters: dict, save_para_for_model_path: str):
        """
        将最优参数进行保存
        :param model_parameters:
        :param save_para_for_model_path:
        :return:
        """
        if not os.path.exists(self.save_model_path):
            os.makedirs(self.save_model_path)
        save_para_for_model = save_para_for_model_path
        with open(save_para_for_model, "w", encoding="utf-8") as f:
            json.dump(model_parameters, f, indent=4)
        print(f"模型参数已经保存到 {save_para_for_model}")

    def load_model_parameters(self, path: str):
        """
        加载参数
        :param path:
        :return:
        """
        with open(path, "r", encoding="utf-8") as f:
            model_parameters = json.load(f)
        return model_parameters

    def test_and_eval(self, final_model, x_test, y_test, feature: list = None, tolerance: float = 0.5):
        """
        多个样本进行预测
        :param final_model:
        :param x_test:
        :param y_test:
        :param feature:
        :param tolerance:
        :return:
        """
        y_pred = final_model.predict(x_test[feature])
        mse = mean_squared_error(y_test, y_pred)
        r2 = r2_score(y_test, y_pred)
        # 计算合格率
        # 计算预测值与真实值之间的差异，并统计合格的样本比例
        tolerance = tolerance
        within_tolerance = np.abs(y_pred - y_test) <= tolerance
        percentage_within_tolerance = np.mean(within_tolerance) * 100
        logging.info(f"MSE: {mse}")
        logging.info(f"R2 Score: {r2}")
        logging.info(f"Percentage of predictions within ±{tolerance}: {percentage_within_tolerance:.2f}%")
        # self.plt_all_local_image(x_test[feature], self.model_filename, feature=feature)
        # self.plt_train_test_images(y_pred.tolist(), y_test.tolist(), within_tolerance.tolist(), mse, r2, tolerance,
        #                            percentage_within_tolerance)

    def load_model(self, model_filename: str = None):
        """
        加载模型
        :param model_filename:
        :return:
        """
        if model_filename is None:
            loaded_model = joblib.load(self.model_filename)
        else:
            self.model_filename = model_filename
            loaded_model = joblib.load(model_filename)
        return loaded_model

    def plt_train_test_images(self, y_pred, y_test, within_tolerance, mse, r2, tolerance, percentage_within_tolerance):
        """
        :param y_pred:
        :param y_test:
        :param within_tolerance:
        :param mse:
        :param r2:
        :param tolerance:
        :param percentage_within_tolerance:
        :return:
        """
        # 绘制折线图
        plt.figure(figsize=(10, 6))
        sns.lineplot(x=range(len(y_test)), y=y_test, label='Actual')
        sns.lineplot(x=range(len(y_pred)), y=y_pred, label='Predicted')

        # 标注误差
        for i in range(len(y_test)):
            plt.plot([i, i], [y_test[i] - self.thresher, y_test[i] + self.thresher], color='grey', linestyle='--',
                     linewidth=0.5)
            if within_tolerance[i]:
                plt.text(i, y_test[i], f'{y_pred[i]:.2f}', color='black', ha='center', va='bottom')
            else:
                plt.text(i, y_test[i], f'{y_pred[i]:.2f}', color='red', ha='center', va='bottom')

        plt.xlabel('Sample Index')
        plt.ylabel('Value')
        plt.title(
            f"MSE: {mse}\nR2 Score: {r2}\n Percentage of predictions within ±{tolerance}: {percentage_within_tolerance:.2f}%")
        plt.legend()

        # 保存图像
        image_path = os.path.splitext(self.model_filename)[0]  # 获取模型文件名（不带扩展名）
        plt.savefig("{}_test.png".format(image_path), dpi=300, bbox_inches='tight')

        # 显示图表
        plt.show()

    def plt_all_local_image(self, x_test, model_filename: str = None, feature: list = None):
        """
        画图
        :param x_test:
        :param model_filename:
        :param feature:
        :return:
        """
        if feature is None:
            feature = self.feature
        loaded_model = self.load_model(model_filename)['model']
        # 使用 SHAP 可视化模型权重
        explainer = shap.TreeExplainer(loaded_model)
        shap_values = explainer.shap_values(x_test[feature])
        # 绘制全局特征重要性图
        img_name = str(int(time.time() * 1000)) + ".png"
        image_path = os.path.splitext(self.model_filename)[0]  # 获取模型文件名（不带扩展名）
        if not os.path.exists(image_path):
            os.makedirs(image_path)
        image_plt_path = os.path.join(image_path, img_name)
        try:
            shap.summary_plot(shap_values, x_test[feature], feature_names=feature, show=False)
            plt.savefig(image_plt_path, bbox_inches='tight')
            plt.close()
        except Exception as e:
            plt.close()
            print(e)

    def convert_month_to_category(self, data: pd.DataFrame, model_name: str) -> pd.DataFrame:
        """
        将 '月份' 列转换为类别编码，并根据模型名称决定是否执行转换。

        :param data: 输入的 DataFrame
        :param model_name: 模型名称，支持 'LightGBM' 或 'CatBoost'
        :return: 转换后的 DataFrame
        """
        if model_name == 'LightGBM':
            data['月份'] = data['月份'].astype('category').cat.codes
        return data

    def cal_linear_coef(self, x, y):
        """
        计算线性模型的系数
        :param x:
        :param y:
        :return:
        """
        x = pd.DataFrame(x['DCS反馈配比平均值_熟料'], columns=['DCS反馈配比平均值_熟料'])

        # 创建线性回归模型
        model = LinearRegression(fit_intercept=False)

        # 训练模型
        model.fit(x, y)
        # 获取权重
        w = float(model.coef_)
        return w

    # ====================================== 模型主训练方法部分 ================================
    def select_model_params_old(self, model_name: str, final_model: Union[CatBoostRegressor], x_train,
                                y_train, pass_threshold=1.) -> Tuple:
        """
        根据模型名称选择模型参数并训练模型。

        :param model_name: 模型名称，支持 'LightGBM' 或 'CatBoost'
        :param final_model: 最终模型实例，可以是 LGBMRegressor 或 CatBoostRegressor
        :param x_train: 训练数据的特征
        :param y_train: 训练数据的目标值
        :param pass_threshold: 阈值信息
        :return: 训练后的模型实例
        """
        if model_name == "LightGBM":

            final_model.fit(x_train, y_train)
            return final_model, None, None

        elif model_name == "CatBoost":
            best_model = None
            best_score = float("inf")
            best_mse = None
            best_r2 = None
            best_pass_rate = None
            kf = KFold(n_splits=2, shuffle=True)
            for fold, (train_index, val_index) in enumerate(kf.split(x_train)):
                x_train_fold, x_val_fold = x_train.iloc[train_index], x_train.iloc[val_index]
                y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]
                train_pool = Pool(x_train_fold, y_train_fold, cat_features=['月份'])
                val_pool = Pool(x_val_fold, y_val_fold, cat_features=['月份'])
                final_model.fit(train_pool, eval_set=val_pool, early_stopping_rounds=50, plot=True)
                val_score = final_model.get_best_score()['validation']['RMSE']
                # 计算验证集上的 MSE 和 R²
                y_val_pred = final_model.predict(val_pool)
                mse = mean_squared_error(y_val_fold, y_val_pred)
                r2 = r2_score(y_val_fold, y_val_pred)

                # 合格率
                abs_errors = np.abs(y_val_fold - y_val_pred)
                pass_rate = np.mean(abs_errors < pass_threshold)
                # 保存最佳模型
                if val_score < best_score:
                    best_score = val_score
                    best_mse = mse
                    best_r2 = r2
                    best_pass_rate = pass_rate
                    best_model = final_model.copy()
            return best_model, best_mse, best_r2, best_pass_rate

    def select_model_params(self, model_name: str, final_model: Union[CatBoostRegressor], x_train,
                            y_train, pass_threshold=1., x_test=None, y_test=None,
                            select_model_feature: bool = False) -> Tuple:
        """
        根据模型名称选择模型参数并训练模型。

        :param model_name: 模型名称，支持 'LightGBM' 或 'CatBoost'
        :param final_model: 最终模型实例，可以是 LGBMRegressor 或 CatBoostRegressor
        :param x_train: 训练数据的特征
        :param y_train: 训练数据的目标值
        :param pass_threshold: 阈值信息
        :param x_test:
        :param y_test:
        :param select_model_feature:
        :return: 训练后的模型实例
        """
        if model_name == "LightGBM":

            final_model.fit(x_train, y_train, plot=True)
            return final_model, None, None, None

        elif model_name == "CatBoost":
            features_for_select = x_train.columns.tolist()  # 使用所有特征名称
            if select_model_feature is False:
                select_feature_name = features_for_select
                final_model.fit(x_train, y_train, eval_set=(x_test, y_test), use_best_model=True)
                y_pred = final_model.predict(x_test)
            else:
                # 模型对特征进行选择方法1
                # num_features_to_select = len(features_for_select)-3  # 最终选择的特征数量
                # summary = final_model.select_features(
                #     x_train, y_train,
                #     eval_set=(x_test, y_test),
                #     features_for_select=features_for_select,
                #     num_features_to_select=num_features_to_select,
                #     steps=1,
                #     algorithm=EFeaturesSelectionAlgorithm.RecursiveByShapValues
                # )

                # # 模型特征选择方法2
                # rfe = RFE(
                #     estimator=final_model,
                #     n_features_to_select=40
                # )
                # rfe.fit(x_train, y_train)
                # # 获取选择的特征
                # selected_features = rfe.support_.tolist()

                # 模型特征选择方法3
                rfecv = RFECV(
                    estimator=final_model,
                    step=0.1,
                    cv=5,
                    scoring="r2"
                )
                rfecv.fit(x_train, y_train)
                selected_features = rfecv.support_.tolist()
                select_feature_name = [item for item, flag in zip(features_for_select, selected_features) if flag]
                final_model.fit(x_train[select_feature_name], y_train, eval_set=(x_test[select_feature_name], y_test),
                                use_best_model=True)
                y_pred = final_model.predict(x_test[select_feature_name])

            mse = mean_squared_error(y_test, y_pred)
            r2 = r2_score(y_test, y_pred)
            tolerance = pass_threshold
            within_tolerance = np.abs(y_pred - y_test) <= tolerance
            percentage_within_tolerance = np.mean(within_tolerance) * 100
            return final_model, round(mse, 2), round(r2, 2), round(percentage_within_tolerance, 2), select_feature_name

    def train_use_kfold(self, model_class, best_params, x_train, y_train):
        """
        用来做k折叠验证找最优模型
        :param model_class:
        :param best_params:
        :param x_train:
        :param y_train:
        :return:
        """
        # 定义 K 折交叉验证的折数
        k = 5  # 例如，5 折交叉验证
        kf = KFold(n_splits=k, shuffle=True, random_state=42)

        # 初始化变量来保存最优模型的分数和模型对象
        best_score = np.inf
        best_model = None

        # 进行 K 折交叉验证并保存最优模型
        for fold, (train_index, val_index) in enumerate(kf.split(x_train)):
            print(f"Training on fold {fold + 1}/{k}")

            # 获取训练和验证数据
            x_train_fold, x_val_fold = x_train.iloc[train_index], x_train.iloc[val_index]
            y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]

            # 训练模型
            model = model_class(**best_params)
            model.fit(x_train_fold, y_train_fold, plot=True)

            # 在验证集上进行预测并计算分数
            y_val_pred = model.predict(x_val_fold)
            val_score = np.mean((y_val_pred - y_val_fold) ** 2)  # 计算均方误差

            # 如果当前模型的分数优于之前的最佳分数，则更新最佳模型
            if val_score < best_score:
                best_score = val_score
                best_model = model
        return best_model

    # ============================================ 1, 3, 28天水泥强度模型训练部分 ============================
    def common_feature_data(self, need_combination: bool = False, feature_combination: list = None,
                            need_average_1d: bool = False, target_list: list = None, flag_28: bool = False,
                            need_chemical: bool = False, filter_columns: list = None, use_pca: bool = False,
                            compute_promotion: bool = True,
                            compute_promotion_dict: dict = None):
        """
        模型训练前期共有部分
        :param need_combination: 是否需要特征组合
        :param feature_combination: 需要特征组合的列
        :param need_average_1d: 是否需要前2天的1天实测平局
        :param target_list: 标签，用于过滤空值
        :param flag_28: 28天的模型中，3天水泥实测强度进行补全
        :param need_chemical: 是否需要化学分析
        :param filter_columns: 需要过滤的列名
        :param use_pca: 是否需要pca进行特征降维
        :param compute_promotion: 是否需要计算增进率
        :param compute_promotion_dict: 增进率的使用
        """
        if compute_promotion is True and compute_promotion_dict is None:
            compute_promotion_dict = {"use_1": True, "use_3": False}
        data = copy.deepcopy(self.data)
        data = self.deal_na_data(data, columns_name_list=target_list)
        data = data.reset_index(drop=True)
        if use_pca:
            pca = PCA(n_components=1)
            pca_columns = ['DCS反馈配比平均值-熟料', '过程质量平均值-CaO', '熟料3天强度预测', '熟料28天强度预测']
            use_pca_data = data.loc[~data['过程质量平均值-CaO'].isnull(), :]
            if data.shape[0] > 2 and use_pca_data.shape[0] != 0:

                data_pca = pca.fit_transform(use_pca_data[pca_columns])
                x_data_pca = pd.DataFrame(data_pca, columns=['pca_feature'])
                data = pd.concat([data, x_data_pca], axis=1)
                logging.info("PCA模型完成降维")
                self.save_transform_model_to_pkl(pca_columns, pca)
            else:
                use_pca = False
                need_combination = False

        if compute_promotion:
            data = self.compute_promotion_rate(data, compute_promotion_dict)

        if need_combination and feature_combination is None:
            feature_combination = ["DCS反馈配比平均值-熟料", "熟料3天强度预测", "index_0"]

        if need_combination:
            data = self.characteristic_engineering(data, feature_combination)
            feature = data.columns.tolist()
            feature = list(set(feature) - set(self.target_list) - {'磨号', '品种', "时间", "index_0"})
        else:
            feature = data.columns.tolist()
            feature = list(set(feature) - set(self.target_list) - {'磨号', '品种', "时间", "index_0"})

        if need_average_1d:
            data = self.deal_data(data, flag_28)
            feature.append("前2天1天实测均值")

        if need_chemical is False:
            feature = [item for item in feature if "new" not in item]
        columns_mean = data[feature].mean(numeric_only=True).round(2).to_dict()
        columns_mean = {k: v for k, v in columns_mean.items() if
                        "DCS反馈配比平均值" not in k and "过程质量平均值" not in k}
        # data.fillna(columns_mean, inplace=True)
        columns_mean['水泥1天实测值'] = data['水泥1天实测值'].mean()
        columns_mean['水泥3天实测值'] = data['水泥3天实测值'].mean()
        data = self.convert_month_to_category(data, model_name=self.model_name)
        feature.remove("熟料28天强度预测")
        return data, feature, columns_mean, use_pca

    def common_train_part(self, model_name, final_model, x_train, y_train, threshold, x_test, y_test, model_type,
                          linear_coef,
                          select_feature_bool: bool = False, columns_mean: dict = None,
                          feature_combination: list = None, use_pca_func: bool = False):
        """
        公共模型训练部分
        :param model_name:
        :param final_model:
        :param x_train:
        :param y_train:
        :param threshold:
        :param x_test:
        :param y_test:
        :param model_type:
        :param linear_coef:
        :param select_feature_bool:
        :param columns_mean:
        :param feature_combination:
        :param use_pca_func:
        """
        final_model, best_mse, best_r2, best_pass_rate, select_features = self.select_model_params(
            model_name=model_name, final_model=final_model, x_train=x_train, y_train=y_train,
            pass_threshold=threshold, x_test=x_test, y_test=y_test, select_model_feature=select_feature_bool)
        self.save_model_to_pkl(final_model, best_mse, best_r2, linear_coef, select_features, self.target_1d,
                               cat_features='月份',
                               best_pass_rate=best_pass_rate,
                               model_type=model_type, columns_mean=columns_mean,
                               feature_combination=feature_combination, use_pca_func=use_pca_func)
        # TODO 如果需要r2和mas的话需要重写这个方法
        self.test_and_eval(final_model, x_test, y_test, feature=select_features, tolerance=threshold)
        return best_mse, best_r2, best_pass_rate

    def train_model_3d(self, actual1d: bool = True, need_combination: bool = True, need_chemical: bool = True,
                       need_pca: bool = False, need_rfe: bool = True, promotion_rate: dict = None):
        """
        训练3天模型
        :param actual1d: 是否加入1天训练模型
        :param need_combination: 是否需要特征组合进行特征增强
        :param need_chemical: 训练过程是否需要化学分析
        :param need_pca: 是否需要加入pca进行特征降维作为特征增强
        :param need_rfe: 是否使用递归特征消除进行特征筛选
        :param promotion_rate: 1天，3天水泥实测值考虑工况情况
        :return:
        """
        self.model_type = "3"
        feature_combination = ["DCS反馈配比平均值-熟料", "熟料3天强度预测", "过程质量平均值-比表", "过程质量平均值-CaO",
                               "index_0"]
        filter_columns = ["DCS反馈配比平均值-熟料", "过程质量平均值-比表", "水泥3天实测值"]
        if actual1d:
            model_type = self.model_type_suffix["model3_1d"]
            data, feature, columns_mean, use_pca = self.common_feature_data(need_combination=need_combination,
                                                                            feature_combination=feature_combination,
                                                                            need_average_1d=False,
                                                                            target_list=[self.target_3d],
                                                                            need_chemical=need_chemical,
                                                                            filter_columns=filter_columns,
                                                                            compute_promotion_dict=promotion_rate,
                                                                            use_pca=need_pca)
            feature.append(self.target_1d)
        else:
            model_type = self.model_type_suffix["model3_n1d"]
            data, feature, columns_mean, use_pca = self.common_feature_data(need_combination=need_combination,
                                                                            feature_combination=feature_combination,
                                                                            need_average_1d=True,
                                                                            target_list=[self.target_3d],
                                                                            need_chemical=need_chemical,
                                                                            filter_columns=filter_columns,
                                                                            compute_promotion_dict=promotion_rate,
                                                                            use_pca=need_pca)

        data = data[data[self.target_3d] >= 12]
        data = data.reset_index(drop=True)
        if data.shape[0] <= 10:
            logging.info("训练3天水泥实测模型，有效数据量为：{}，不足以完成非线性模型训练".format(data.shape[0]))
            return None, None, None
        logging.info("训练3天水泥实测模型，使用的数据量为：{} ".format(data.shape[0]))

        x = data[feature]
        y = data[self.target_3d]

        linear_coef = self.cal_linear_coef(x, y)

        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
        save_para_for_model = os.path.join(self.save_model_path, "{}_optimal_parameters.json".format(model_type))
        self.thresher = 1.5
        model_class = CatBoostRegressor

        if not os.path.exists(save_para_for_model):
            best_params = self.get_best_parameter(x_train, y_train, feature)
            best_params.update(self.train_optuna)
            self.optimal_parameters(best_params, save_para_for_model)
            final_model = model_class(**best_params)
            best_mse, best_r2, best_pass_rate = self.common_train_part(self.model_name, final_model, x_train, y_train,
                                                                       self.thresher,
                                                                       x_test, y_test, model_type, linear_coef,
                                                                       select_feature_bool=need_rfe,
                                                                       columns_mean=columns_mean,
                                                                       feature_combination=feature_combination)

        else:
            best_params = self.load_model_parameters(save_para_for_model)
            final_model = model_class(**best_params)
            best_mse, best_r2, best_pass_rate = self.common_train_part(self.model_name, final_model, x_train, y_train,
                                                                       self.thresher,
                                                                       x_test, y_test, model_type, linear_coef,
                                                                       select_feature_bool=need_rfe,
                                                                       columns_mean=columns_mean,
                                                                       feature_combination=feature_combination)
        return best_mse, best_r2, best_pass_rate

    def train_model_28d(self, actual1d: bool = False, actual3d: bool = False, need_combination: bool = True,
                        need_chemical: bool = True, need_pca: bool = True, need_rfe: bool = True,
                        promotion_rate: dict = None):
        """
        训练28天实测水泥强度
        :param actual1d: 是否加入1天实测值作为特征
        :param actual3d: 是否加入3天实测值作为特征
        :param need_combination: 是否需要特征组合进行特征增强
        :param need_chemical: 训练过程是否需要化学分析
        :param need_pca: 是否需要加入pca进行特征降维作为特征增强
        :param need_rfe: 是否使用递归特征消除进行特征筛选
        :param promotion_rate: 1天，3天水泥实测值考虑工况情况
        :return:
        """
        self.model_type = "28"
        filter_columns = ["DCS反馈配比平均值-熟料", "过程质量平均值-比表", "水泥28天实测值"]
        if actual1d:
            feature_combination = ["DCS反馈配比平均值-熟料", "熟料3天强度预测", "过程质量平均值-CaO", "index_0"]
            data, feature, columns_mean, use_pca = self.common_feature_data(need_combination=need_combination,
                                                                            feature_combination=feature_combination,
                                                                            need_average_1d=False,
                                                                            target_list=[self.target_28d],
                                                                            flag_28=False, need_chemical=need_chemical,
                                                                            filter_columns=filter_columns,
                                                                            use_pca=need_pca,
                                                                            compute_promotion_dict=promotion_rate)
            feature.extend([self.target_1d])
            model_type = self.model_type_suffix["model28_1d"]
        elif actual3d:
            feature_combination = ["DCS反馈配比平均值-熟料", "熟料3天强度预测", "过程质量平均值-CaO", "index_0"]
            model_type = self.model_type_suffix["model28_3d"]
            data, feature, columns_mean, use_pca = self.common_feature_data(need_combination=need_combination,
                                                                            feature_combination=feature_combination,
                                                                            need_average_1d=False,
                                                                            target_list=[self.target_28d],
                                                                            flag_28=False, need_chemical=need_chemical,
                                                                            filter_columns=filter_columns,
                                                                            use_pca=need_pca,
                                                                            compute_promotion_dict=promotion_rate)
            feature.extend([self.target_1d, self.target_3d])
        else:
            feature_combination = ["DCS反馈配比平均值-熟料", "熟料3天强度预测", "过程质量平均值-CaO", "index_0"]
            model_type = self.model_type_suffix["model28_n1d"]
            data, feature, columns_mean, use_pca = self.common_feature_data(need_combination=need_combination,
                                                                            feature_combination=feature_combination,
                                                                            need_average_1d=True,
                                                                            target_list=[self.target_28d],
                                                                            flag_28=False, need_chemical=need_chemical,
                                                                            filter_columns=filter_columns,
                                                                            use_pca=need_pca,
                                                                            compute_promotion_dict=promotion_rate)

        data = data[data[self.target_28d] >= 32]
        data = data.reset_index(drop=True)
        if data.shape[0] <= 10:
            logging.info("训练28天水泥实测模型，有效数据量为：{}，不足以完成非线性模型训练".format(data.shape[0]))
            return None, None, None
        logging.info("训练28天水泥实测模型，使用的数据量为：{} ".format(data.shape[0]))

        x = data[feature]
        y = data[self.target_28d]

        linear_coef = self.cal_linear_coef(x, y)

        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
        if not os.path.join(self.save_model_path):
            os.mkdir(self.save_model_path)
        save_para_for_model = os.path.join(self.save_model_path, "{}_optimal_parameters.json".format(model_type))
        self.thresher = 1.5
        model_class = CatBoostRegressor

        if not os.path.exists(save_para_for_model):
            best_params = self.get_best_parameter(x_train, y_train, feature)
            best_params.update(self.train_optuna)
            self.optimal_parameters(best_params, save_para_for_model)
            final_model = model_class(**best_params)
            best_mse, best_r2, best_pass_rate = self.common_train_part(self.model_name, final_model, x_train, y_train,
                                                                       self.thresher,
                                                                       x_test, y_test, model_type, linear_coef,
                                                                       select_feature_bool=need_rfe,
                                                                       columns_mean=columns_mean,
                                                                       feature_combination=feature_combination)

        else:
            best_params = self.load_model_parameters(save_para_for_model)
            final_model = model_class(**best_params)
            best_mse, best_r2, best_pass_rate = self.common_train_part(self.model_name, final_model, x_train, y_train,
                                                                       self.thresher,
                                                                       x_test, y_test, model_type, linear_coef,
                                                                       select_feature_bool=need_rfe,
                                                                       columns_mean=columns_mean,
                                                                       feature_combination=feature_combination)
        return best_mse, best_r2, best_pass_rate
