from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeRegressor
from math import sqrt
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score
import joblib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.legend_handler import HandlerLine2D
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd

# 定义计算平均绝对百分比误差的函数
def mean_absolute_percentage_error(y_true, y_pred):
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100

def MyTreeRegression():
    # 数据集
    train_df = pd.read_excel("携程航班处理数据.xlsx")

    # 删除Price列
    data = train_df.drop(["price"], axis=1)
    data = data.drop(["flightNo"], axis=1)

    # 处理分类数据和数值数据
    train_categorical_data = data.select_dtypes(exclude=['int64', 'float', 'int32'])
    train_numerical_data = data.select_dtypes(include=['int64', 'float', 'int32'])

    # 获取所有列名
    column_names = train_categorical_data.columns.tolist()
    # 遍历每一列
    for column in column_names:
        column_df = pd.read_excel(f'./encode/{column}_encoded.xlsx')
        # 将第二个Excel文件转换为一个字典，其中键和值分别是两列的数据
        map_dict = {row[column]: row[column + '_encoded'] for index, row in column_df.iterrows()}
        train_categorical_data[column] = train_categorical_data[column].map(map_dict)

    X = pd.concat([train_categorical_data, train_numerical_data], axis=1)
    y = train_df['price']

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42)

    print("训练集输入大小", X_train.shape)
    print("训练集输出大小", y_train.shape)
    print("测试集输入大小", X_test.shape)
    print("测试集输出大小", y_test.shape)
    # GridSearchCV决策树回归
    #depth = list(range(3,30))
    #param_grid = dict(max_depth = depth)
    #tree = GridSearchCV(DecisionTreeRegressor(), param_grid, cv = 10)
    tree = DecisionTreeClassifier(max_depth=25,max_features=9)
    tree.fit(X_train,y_train)
    # 预测训练和测试结果
    y_train_pred = tree.predict(X_train)

    # 保存model
    joblib.dump(tree, './model/best_tree.joblib')
    # joblib.dump(tree.best_params_, './model/best_tree.pkl', compress = 1) # Only best parameters
    # 加载model
    # model = joblib.load('./model/best_tree.joblib')
    # tree_params = joblib.load('./model/best_tree.pkl')
    # print(y_train_pred.tolist())
    # print(y_train.values[0])
    y_test_pred = tree.predict(X_test)

    def changemmax_depth():
        max_depths = np.linspace(1, 32, 32, endpoint=True)
        train_results = []
        test_results = []

        for max_depth in max_depths:
            dt = DecisionTreeClassifier(max_depth=int(max_depth))
            dt.fit(X_train, y_train)
            train_pred = dt.predict(X_train)

            roc_r2 = r2_score(y_train.values, train_pred)
            train_results.append(roc_r2)

            y_pred = dt.predict(X_test)

            roc_r2 = r2_score(y_test, y_pred)
            # Add auc score to previous test results
            test_results.append(roc_r2)

        line1, = plt.plot(max_depths, train_results, 'b', label='Train R2')
        line2, = plt.plot(max_depths, test_results, 'r', label='Test R2')
        plt.legend(handler_map={line1: HandlerLine2D(numpoints=2),line2: HandlerLine2D(numpoints=2)})
        plt.ylabel('R-Squared')
        plt.xlabel('Tree depth')
        plt.show()

    def changemin_samples_split():
        min_samples_splits = np.linspace(0.1, 1.0, 10, endpoint=True)
        train_results = []
        test_results = []

        for min_samples_split in min_samples_splits:
            dt = DecisionTreeClassifier(min_samples_split=min_samples_split)
            dt.fit(X_train, y_train)
            train_pred = dt.predict(X_train)

            roc_r2 = r2_score(y_train.values, train_pred)
            train_results.append(roc_r2)

            y_pred = dt.predict(X_test)

            roc_r2 = r2_score(y_test, y_pred)
            # Add auc score to previous test results
            test_results.append(roc_r2)

        line1, = plt.plot(min_samples_splits, train_results, 'b', label='Train R2')
        line2, = plt.plot(min_samples_splits, test_results, 'r', label='Test R2')
        plt.legend(handler_map={line1: HandlerLine2D(numpoints=2), line2: HandlerLine2D(numpoints=2)})
        plt.ylabel('R-Squared')
        plt.xlabel('min samples split')
        plt.show()

    def changemin_samples_leaf():
        min_samples_leafs = np.linspace(0.1, 0.5, 5, endpoint=True)
        train_results = []
        test_results = []

        for min_samples_leaf in min_samples_leafs:
            dt = DecisionTreeClassifier(min_samples_leaf=min_samples_leaf)
            dt.fit(X_train, y_train)
            train_pred = dt.predict(X_train)

            roc_r2 = r2_score(y_train.values, train_pred)
            train_results.append(roc_r2)

            y_pred = dt.predict(X_test)

            roc_r2 = r2_score(y_test, y_pred)
            # Add auc score to previous test results
            test_results.append(roc_r2)

        line1, = plt.plot(min_samples_leafs, train_results, 'b', label='Train R2')
        line2, = plt.plot(min_samples_leafs, test_results, 'r', label='Test R2')
        plt.legend(handler_map={line1: HandlerLine2D(numpoints=2), line2: HandlerLine2D(numpoints=2)})
        plt.ylabel('R-Squared')
        plt.xlabel('min samples leaf')
        plt.show()

    def changemax_features():
        max_features = list(range(1,X_train.shape[1]))
        train_results = []
        test_results = []

        for max_feature in max_features:
            dt = DecisionTreeClassifier(max_features=max_feature)
            dt.fit(X_train, y_train)

            train_pred = dt.predict(X_train)
            roc_r2 = r2_score(y_train.values, train_pred)
            train_results.append(roc_r2)

            y_pred = dt.predict(X_test)
            roc_r2 = r2_score(y_test, y_pred)
            test_results.append(roc_r2)

        line1, = plt.plot(max_features, train_results, 'b', label='Train R2')
        line2, = plt.plot(max_features, test_results, 'r', label='Test R2')
        plt.legend(handler_map={line1: HandlerLine2D(numpoints=2), line2: HandlerLine2D(numpoints=2)})
        plt.ylabel('R-Squared')
        plt.xlabel('max features')
        plt.show()

    # 打印模型评估结果
    print("Train Results for Decision Tree Regressor Model:")
    print("Root Mean Squared Error: ", sqrt(mse(y_train.values, y_train_pred)))
    print("Mean Absolute % Error: ", round(mean_absolute_percentage_error(y_train.values, y_train_pred)))
    print("R-Squared: ", r2_score(y_train.values, y_train_pred))

    print("Test Results for Decision Tree Regressor Model:")
    print("Root Mean Squared Error: ", sqrt(mse(y_test, y_test_pred)))
    print("Mean Absolute % Error: ", round(mean_absolute_percentage_error(y_test, y_test_pred)))
    print("R-Squared: ", r2_score(y_test, y_test_pred))

    def picShow():
        plt.figure()
        X = np.arange(1, len(y_train) + 1)
        plt.scatter(X, y_train, s=20, edgecolor="black",
                            c="darkorange", label="Train Data")
        plt.plot(X, y_train_pred, color="blue",
                         label="Train Results")
        plt.xlabel("data")
        plt.ylabel("predict")
        plt.title("Decision Tree Regression")
        plt.legend()
        plt.show()