import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
import pickle


student = pd.read_csv('student-mat.csv')
grade_counts = student['G3'].value_counts().sort_values().plot.barh(width=0.8)
plt.show()

plt.hist(student["G3"], bins=50, edgecolor="black")
plt.xlabel("score")
plt.ylabel("number")
plt.show()

plt.hist(student["age"], bins=20)
plt.xlabel("age")
plt.ylabel("number")
plt.show()

female = student[student["sex"] == "F"]["G3"]
male = student[student["sex"] == "M"]["G3"]
error = np.abs(female - male)
labels = ["female", "male"]
plt.hist([female, male], bins=5, label=labels)
plt.xlabel("score")
plt.ylabel("number")
plt.legend()
plt.show()

sns.boxplot(student, x="age", y="G3")
x1 = student.loc[student["address"] == "U", "G3"] # 城市

x2 = student.loc[student["address"] == "R", "G3"] # 农村

sns.kdeplot(x1)
sns.kdeplot(x2)
plt.legend(["urban", "rural"])
plt.xlabel("final score")
plt.ylabel("rate")
plt.show()

def EuclideanDistance(feature1, feature2):

    feature1 = feature1.astype(np.float32)
    feature2 = feature2.astype(np.float32)
    feature1_mean = np.mean(feature1)
    feature2_mean = np.mean(feature2)
    feature1 -= feature1_mean

    feature2 -= feature2_mean

    # 计算平方和

    square = (feature1 - feature2) ** 2

    sum_square = np.sum(square)

    # 计算相关性

    result = 1 / (1 + np.sqrt(sum_square)) * 10

    return result

number = list()
number.append(EuclideanDistance(student["failures"], student["G3"]))
number.append(EuclideanDistance(student["age"], student["G3"]))
number.append(EuclideanDistance(student["goout"], student["G3"]))
number.append(EuclideanDistance(student["traveltime"], student["G3"]))
number.append(EuclideanDistance(student["health"], student["G3"]))
number.append(EuclideanDistance(student["Dalc"], student["G3"]))
number.append(EuclideanDistance(student["Walc"], student["G3"]))
number.append(EuclideanDistance(student["freetime"], student["G3"]))
number.append(EuclideanDistance(student["absences"], student["G3"]))
number.append(EuclideanDistance(student["famrel"], student["G3"]))
number.append(EuclideanDistance(student["studytime"], student["G3"]))
number.append(EuclideanDistance(student["Fedu"], student["G3"]))
number.append(EuclideanDistance(student["Medu"], student["G3"]))
number.append(EuclideanDistance(student["goout"], student["G3"]))
number.append(EuclideanDistance(student["famrel"], student["G3"]))

print(np.argsort(number)[::-1])

student.corr(method="pearson", numeric_only=True)["G3"].sort_values()[::-1]
sns.boxplot(student, x="Medu", y="G3")
x = student["Medu"][0:300]
y = student["G3"][0:300]
sns.swarmplot(x=x, y=y)
student = pd.read_csv("student-mat.csv")
features = student["G3"]
student = student.drop(["G1", "G2"], axis="columns")
student = pd.get_dummies(student)
most_correlated = student.corr()["G3"].abs().sort_values(ascending=False)[:8]
most_correlated


failures_boxenplot = sns.boxenplot(student, x="failures", y="G3")
plt.xlabel("number of failures")
plt.ylabel("final score")
plt.title("How the number of failures affects the final score")
plt.show()

parent_ed = student["Fedu"] + student["Medu"]
sns.boxenplot(student, x=parent_ed, y="G3")

sns.boxenplot(student, x="higher_yes", y="G3")

def evaluate_predictions(y_hat, y):
    mae = np.mean(abs(y_hat - y))
    rmse = np.sqrt(np.mean((y_hat - y) ** 2))

    return mae, rmse

y = student.loc[:, "G3"]
x = student.drop("G3", axis="columns")
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456) # 固定种子
model1 = LinearRegression()
model1.fit(X_train, y_train)
y_hat = model1.predict(X_test)

# 误差计算

mae = np.mean(abs(y_hat - y_test))
rmse = np.sqrt(np.mean((y_hat - y_test) ** 2))

print(mae, rmse)

# 通过训练集训练和测试集测试来生成多个线性模型

def evaluate(X_train, X_test, y_train, y_test):

# 模型名称

    model_name_list = ['Linear Regression', 'ElasticNet Regression',

    'Random Forest', 'Extra Trees', 'SVM',

    'Gradient Boosted', 'Baseline']

# 实例化模型

    model1 = LinearRegression()
    model2 = ElasticNet(alpha=1.0, l1_ratio=0.5)
    model3 = RandomForestRegressor(n_estimators=100)
    model4 = ExtraTreesRegressor(n_estimators=100)
    model5 = SVR(kernel='rbf', degree=3, C=1.0, gamma='auto')
    model6 = GradientBoostingRegressor(n_estimators=50)

# 结果数据框

    results = pd.DataFrame(columns=['mae', 'rmse'], index = model_name_list)

# 每种模型的训练和预测

    for i, model in enumerate([model1, model2, model3, model4, model5, model6]):
        model.fit(X_train, y_train)
        predictions = model.predict(X_test)

# 误差标准

        mae = np.mean(abs(predictions - y_test))
        rmse = np.sqrt(np.mean((predictions - y_test) ** 2))
        # 将结果插入结果框

        model_name = model_name_list[i]
        results.loc[model_name, :] = [mae, rmse]
    # 中值基准度量
    baseline = np.median(y_train)
    baseline_mae = np.mean(abs(baseline - y_test))
    baseline_rmse = np.sqrt(np.mean((baseline - y_test) ** 2))
    results.loc['Baseline', :] = [baseline_mae, baseline_rmse]
    return results
results = evaluate(X_train, X_test, y_train, y_test)

print(results)
# 找出最合适的模型

plt.figure(figsize=(12, 8))

# 平均绝对误差

ax = plt.subplot(1, 2, 1)
results.sort_values('mae', ascending = True).plot.bar(y = 'mae', color = 'b',ax = ax, fontsize=20)
plt.title('error', fontsize=20)
plt.ylabel('MAE', fontsize=20)

# 均方根误差
ax = plt.subplot(1, 2, 2)
results.sort_values('rmse', ascending = True).plot.bar(y = 'rmse', color = 'r',ax = ax, fontsize=20)
plt.title('error', fontsize=20)
plt.ylabel('RMSE',fontsize=20)
plt.tight_layout()
plt.show()
model = RandomForestRegressor()
model.fit(X_train, y_train)
filename = "final_model"

pickle.dump(model, open(filename, 'wb'))
pickle_in = open("final_model", "rb")
clf = pickle.load(pickle_in)
y_hat = clf.predict(X_test)
evaluate_predictions(y_hat, y_test)

