
import pandas as pd
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import joblib

data = pd.read_csv("D:\daily work\ml\\test\\train_data8.csv")

data.dropna(inplace=True)

y1 = (data["y1"] / 0.002).astype(int) * 0.002
y2 = (data["y2"] / 0.002).astype(int) * 0.002

x = data.drop(["y1", "y2"], axis=1)

x_train, x_test, y_train, y_test = train_test_split(x, y1, test_size=0.2, random_state=0)

scaler = StandardScaler()

X_train = scaler.fit_transform(x_train)
X_test = scaler.transform(x_test)

regr = DecisionTreeRegressor(max_depth=10)
#regr = DecisionTreeClassifier(max_depth=10)
regr.fit(X_train, y_train)
y_p = regr.predict(X_test)

features = x_test.columns.to_list()
importance = regr.feature_importances_
for i, v in enumerate(importance):
    print(features[i], v)
mse = mean_squared_error(y_test, y_p)
rmse = mean_squared_error(y_test, y_p, squared=False)
mae = mean_absolute_error(y_test, y_p)
r2 = r2_score(y_test, y_p)

print(f"MSE: {mse}")
print(f"RMSE: {rmse}")
print(f"MAE: {mae}")
print(f"R²: {r2}")

import shap
explainer = shap.Explainer(regr, X_train)
shap_values = explainer.shap_values(X_test)
shap.summary_plot(shap_values, X_test, feature_names=features)

joblib.dump(regr, "D:\daily work\ml\\test\\decision_tree_cl_8.joblib")
