import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import r2_score, mean_squared_error
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
import time
import shap
import warnings
from sklearn.multioutput import MultiOutputRegressor
warnings.filterwarnings("ignore", category=UserWarning)


class GradientBoostingModel:
    def __init__(self, task_type,model_type,excel_path, features, target, test_size=0.2, random_state=42,
                 learning_rate=0.1, max_depth=6, min_samples_leaf=1, min_samples_split=2,
                 n_estimators=90, progress_callback=None):
        # 读取数据
        data = pd.read_excel(excel_path)
        self.df = pd.DataFrame(data)

        self.task_type = task_type
        self.model_type = model_type
        # self.test_size = test_size if test_size is not None else 0.2
        # self.random_state = random_state if random_state is not None else 42
        # self.learning_rate = learning_rate if learning_rate is not None else 0.1
        # self.max_depth = max_depth if max_depth is not None else 6
        # self.min_samples_leaf = min_samples_leaf if min_samples_leaf is not None else 1
        # self.min_samples_split = min_samples_split if min_samples_split is not None else 2
        # self.n_estimators = n_estimators if n_estimators is not None else 90

        # 初始化特征、目标
        self.X = self.df[features]
        self.y = self.df[target]

        # 数据分割
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y,
                                                                                test_size=test_size,
                                                                                random_state=random_state)

        # 数据标准化
        self.scaler = StandardScaler()
        self.X_train_scaled = self.scaler.fit_transform(self.X_train)
        self.X_test_scaled = self.scaler.transform(self.X_test)

        # 初始化模型
        self.gb_model = GradientBoostingRegressor(
            random_state=random_state,
            n_iter_no_change=10,
            validation_fraction=0.2,
            subsample=0.8,
            learning_rate=learning_rate,
            max_depth=max_depth,
            min_samples_leaf=min_samples_leaf,
            min_samples_split=min_samples_split,
            n_estimators=n_estimators,
            # warm_start=True  # 开启 warm_start
        )
        self.progress_callback = progress_callback  # 保存回调函数


    def train(self):
        start_time_gb = time.time()

        # 逐步训练模型，计算进度百分比
        for i in range(1, self.gb_model.n_estimators + 1):
            self.gb_model.n_estimators = i  # 增加迭代次数
            self.gb_model.fit(self.X_train_scaled, self.y_train)

            # 更新训练进度
            # progress_percentage = (i / self.gb_model.n_estimators) * 100  # 计算进度百分比
            # if self.progress_callback:
            #     self.progress_callback(progress_percentage)  # 调用回调函数

            # 打印进度
            # print(f"训练进度: {progress_percentage:.2f}%")  # 打印训练进度百分比
            # 加入睡眠时间以便观察进度
            # time.sleep(0.1)  # 根据需要调整时间，0.1 秒为例

        self.gbr_train_time = time.time() - start_time_gb

        # self.gb_model.fit(self.X_train_scaled, self.y_train)
        # self.gbr_train_time = time.time() - start_time_gb

        # 预测
        self.y_train_pred = self.gb_model.predict(self.X_train_scaled)
        self.y_test_pred = self.gb_model.predict(self.X_test_scaled)

        # 计算R² 和 MSE分数
        self.r2_train = r2_score(self.y_train, self.y_train_pred)
        self.r2_test = r2_score(self.y_test, self.y_test_pred)
        self.mse_train = mean_squared_error(self.y_train, self.y_train_pred)
        self.mse_test = mean_squared_error(self.y_test, self.y_test_pred)

        # 输出结果
        print(f"梯度提升训练时间: {self.gbr_train_time:.2f} 秒")
        print(f"梯度提升模型训练集R²: {self.r2_train}")
        print(f"梯度提升模型测试集R²: {self.r2_test}")
        print(f"梯度提升模型训练集MSE: {self.mse_train}")
        print(f"梯度提升模型测试集MSE: {self.mse_test}")

        # 泛化能力（交叉验证得分）
        cv_scores = cross_val_score(self.gb_model, self.X_train_scaled, self.y_train, cv=5, scoring='r2')
        print(f"梯度提升交叉验证得分: {np.round(cv_scores, 4)}")
        cv_mean_score = np.mean(cv_scores)
        print(f"梯度提升交叉验证平均得分: {cv_mean_score}")

        # 特征重要性
        # importances = self.gb_model.feature_importances_
        # print("\n梯度提升特征重要性:")
        # for feature, importance in zip(self.X.columns, importances):
        #     print(f"{feature}: {importance}")

        results = {
            "task_type": self.task_type,
            "model_type": self.model_type,
            "model_name": "梯度提升树",
            "train_time": self.gbr_train_time,
            "r2_train": self.r2_train,
            "r2_test": self.r2_test,
            "mse_train": self.mse_train,
            "mse_test": self.mse_test,
            "cv_scores": cv_scores.tolist(),
            "cv_mean_score": cv_mean_score,
            # "feature_importances": {feature: importance for feature, importance in zip(self.X.columns, importances)}
        }
        return results

    # def plot_shap(self):
    #     # 使用SHAP解释模型
    #     explainer = shap.Explainer(self.gb_model)
    #     shap_values = explainer.shap_values(self.X_test_scaled)
    #     shap.summary_plot(shap_values, self.X_test_scaled, feature_names=self.X.columns)

    def plot_loss_curve(self):
        # 绘制损失曲线
        train_loss = []
        test_loss = []

        # 逐步预测训练集和测试集，计算每个阶段的 MSE
        for y_train_pred, y_test_pred in zip(self.gb_model.staged_predict(self.X_train_scaled),
                                             self.gb_model.staged_predict(self.X_test_scaled)):
            train_loss.append(mean_squared_error(self.y_train, y_train_pred))
            test_loss.append(mean_squared_error(self.y_test, y_test_pred))

        # 绘制损失曲线
        plt.figure(figsize=(10, 6))
        plt.plot(range(1, len(train_loss) + 1), train_loss, label='Training Loss', color='blue')
        plt.plot(range(1, len(test_loss) + 1), test_loss, label='Testing Loss', color='orange')
        plt.title('Loss Curve')
        plt.xlabel('Number of Estimators')
        plt.ylabel('Mean Squared Error')
        plt.legend()
        plt.grid()
        plt.savefig(r'C:\Users\13945\Desktop\picture\plot1.png')
        plt.show()
        plt.close()
# test_size=0.2,random_state=42,learning_rate=0.1,max_depth=6,min_samples_leaf=1,min_samples_split=2,n_estimators=90
def main(
         task_type=None,
         model_type=None,
         test_size=None,
         random_state=None,
         learning_rate=None,
         max_depth=None,
         min_samples_leaf=None,
         min_samples_split=None,
         n_estimators=None):
    print(f"Parameters123:{task_type}, {model_type}, {test_size}, {random_state}, {learning_rate}, {max_depth}, {min_samples_leaf}, {min_samples_split}, {n_estimators}")
    # 输入参数
    excel_path = r'C:\Users\13945\Desktop\MLDS铝合金成分设计数据.xlsx'
    features = ['Si/％', 'Mn/％', 'Zn/％', 'Mg/％', 'Cu/％', 'Cr/％', 'Zr/％', 'Ti/％', 'Fe/％', 'Ni/％', 'other']
    target = 'Ultimate tensile strength/MPa'

    # 创建模型实例
    model = GradientBoostingModel(
        task_type=task_type,
        model_type=model_type,
        excel_path=excel_path,
        features=features,
        target=target,
        **{k: v for k, v in {
            'test_size': test_size,
            'random_state': random_state,
            'learning_rate': learning_rate,
            'max_depth': max_depth,
            'min_samples_leaf': min_samples_leaf,
            'min_samples_split': min_samples_split,
            'n_estimators': n_estimators
        }.items() if v is not None}  # 过滤掉值为 None 的参数
    )

    # 训练模型并打印结果
    results = model.train()

    # 绘制SHAP图
    # model.plot_shap()

    # 绘制损失曲线
    model.plot_loss_curve()

    return results

if __name__ == "__main__":
    main()
