"""
An example showing the usage of the TwoStageTrAdaBoostR2 algorithm.

"""
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
from TwoStageTrAdaBoostR2 import TwoStageTrAdaBoostR2  # import the two-stage algorithm

##=============================================================================

#                                Example 1

##=============================================================================

x_source1 = np.array([[-3.],
                      [-2.],
                      [-1.],
                      [0.],
                      [1.],
                      [2.],
                      [3.],
                      [4.],
                      [5.],
                      [6.],
                      [7.],
                      [8.],
                      [9.],
                      [10.],
                      [11.],
                      [12.]])
y_source1 = np.array([0.3436,  0.4659,  0.5765,  0.6897,  0.7912,
                      0.8989,  1.0053,  1.1114,  1.2159,  1.3185,
                      1.4057,  1.4797,  1.4409,  1.4177,  1.4134,
                      1.4171])

x_target_train = np.array([[-2.89],
                           [-1.38],
                           [0.21],
                           [1.66],
                           [2.81287616],
                           [3.57197344],
                           [5.0],
                           [6.73798781],
                           [8.08],
                           [10.55112103]])

y_target_train = np.array([0.285, 0.437, 0.602, 0.744, 0.85391824, 0.92711525, 1.0685,
                           1.2193055, 1.328, 1.41848132])

x_target_test = np.array([[-1.84699134],
                          [-1.19559941],
                          [0.54197261],
                          [1.95175166],
                          [3.32],
                          [4.95],
                          [6.94],
                          [9.15430047],
                          [10.1],
                          [11.3]])

y_target_test = np.array([0.3902178, 0.45613591, 0.63451042, 0.76733963, 0.903, 1.059,
                          1.238, 1.38011953, 1.426, 1.406])


# =======================================================================================
# 3.3 plot the generated data
plt.figure()
plt.plot(x_source1, y_source1, c="g", label="source1", linewidth=1)
# plt.plot(x_source2, y_source2, c="y", label="source2", linewidth=1)
plt.plot(x_target_test, y_target_test, c="b", label="target_test", linewidth=0.5)
plt.scatter(x_target_train, y_target_train, c="k", label="target_train")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Multiple datasets")
plt.legend()
plt.show()

# 4. transfer learning regressiong for the target_train data
# 4.1 data combination and initial setting specification
X = np.concatenate((x_source1, x_target_train))
y = np.concatenate((y_source1, y_target_train))

sample_size = [x_source1.shape[0], x_target_train.shape[0]]

# ==============================================================================

n_estimators = 100
steps = 10
fold = 5
random_state = np.random.RandomState(1)

# ==============================================================================

# 4.2 TwoStageAdaBoostR2
regr_1 = TwoStageTrAdaBoostR2(DecisionTreeRegressor(max_depth=6),
                              n_estimators=n_estimators, sample_size=sample_size,
                              steps=steps, fold=fold,
                              random_state=random_state)
regr_1.fit(X, y)
y_pred1 = regr_1.predict(x_target_test)

# 4.3 As comparision, use AdaBoostR2 without transfer learning
# ==============================================================================
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=6),
                           n_estimators=n_estimators)
# ==============================================================================
regr_2.fit(x_target_train, y_target_train)
y_pred2 = regr_2.predict(x_target_test)

# 4.4 Plot the results
plt.figure()
plt.scatter(x_target_train, y_target_train, c="k", label="target_train")
plt.plot(x_source1, y_source1, c="g", label="source1", linewidth=1)
plt.plot(x_target_test, y_target_test, c="b", label="target_test", linewidth=0.5)
plt.plot(x_target_test, y_pred1, c="r", label="TwoStageTrAdaBoostR2", linewidth=2)
# plt.plot(x_target_test, y_pred2, c="y", label="AdaBoostR2", linewidth=2)
plt.xlabel("x")
plt.ylabel("y")
plt.title("Two-stage Transfer Learning Boosted Decision Tree Regression")
plt.legend()
plt.show()
# 4.5 Calculate mse
mse_twostageboost = mean_squared_error(y_target_test, y_pred1)
mse_adaboost = mean_squared_error(y_target_test, y_pred2)

print("MSE of regular AdaboostR2:", mse_adaboost)
print("MSE of TwoStageTrAdaboostR2:", mse_twostageboost)
