import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import classification_report, accuracy_score, recall_score
import matplotlib.pyplot as plt
import seaborn as sns
from graphviz import Source
from IPython.display import Image

# 1. 数据导入和预处理
file_path = "C:\\Users\\卷\\Desktop\\程序\\Pycharm\\24.6.29\\Student_performance_data _.csv"
data = pd.read_csv(file_path)

# 定义特征和目标变量
features = ['Age', 'StudyTimeWeekly', 'Absences', 'GPA', 'Gender', 'Ethnicity', 'ParentalEducation',
            'Tutoring', 'ParentalSupport', 'Extracurricular', 'Sports', 'Music', 'Volunteering']
target = 'GradeClass'

# 分离特征和目标变量
X = data[features]
y = data[target]

# 2. 特征工程和编码
numeric_features = ['Age', 'StudyTimeWeekly', 'Absences', 'GPA']
categorical_features = ['Gender', 'Ethnicity', 'ParentalEducation', 'Tutoring', 'ParentalSupport',
                       'Extracurricular', 'Sports', 'Music', 'Volunteering']

# 创建预处理器
preprocessor = ColumnTransformer(
    transformers=[
        ('num', Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='mean')),
            ('scaler', StandardScaler())]), numeric_features),
        ('cat', Pipeline(steps=[
            ('imputer', SimpleImputer(strategy='most_frequent')),
            ('onehot', OneHotEncoder(handle_unknown='ignore'))]), categorical_features)
    ])

# 3. 数据集拆分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 4. 模型选择和训练
# 使用Pipeline封装预处理器和模型
pipeline = Pipeline(steps=[
    ('preprocessor', preprocessor),
    ('classifier', RandomForestClassifier(random_state=42))
])

pipeline.fit(X_train, y_train)

# 5. 模型评估
y_pred = pipeline.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_pred))
print("Recall:", recall_score(y_test, y_pred, average='weighted'))
print(classification_report(y_test, y_pred))

# 6. 数据可视化
# Age分布
plt.figure(figsize=(10, 6))
sns.histplot(data['Age'], bins=20, kde=True)
plt.title('Age Distribution')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.show()

# GPA分布
plt.figure(figsize=(10, 6))
sns.histplot(data['GPA'], bins=20, kde=True)
plt.title('GPA Distribution')
plt.xlabel('GPA')
plt.ylabel('Frequency')
plt.show()

# GradeClass分布
plt.figure(figsize=(10, 6))
sns.countplot(data['GradeClass'])
plt.title('Grade Class Distribution')
plt.xlabel('Grade Class')
plt.ylabel('Count')
plt.show()

# 7. 模型图化
# 使用决策树示例进行模型图化
tree_clf = DecisionTreeClassifier(random_state=42)
tree_clf.fit(X_train, y_train)

# 确保使用拟合后的预处理器来转换训练数据
X_train_processed = preprocessor.transform(X_train)

# 获取OneHotEncoder转换后的特征名称
# 由于我们已经拟合了预处理器，我们可以直接从onehot_encoder获取特征名称
onehot_feature_names = preprocessor.named_transformers_['cat']['onehot'].get_feature_names_out()

# 合并数值型和类别型特征名称
feature_names = numeric_features + list(onehot_feature_names)
