import pandas as pd

# 指定文件路径
file_path = "D:/pythondate/EndOfTerm/housing.csv"
# 使用pandas读取CSV文件
df = pd.read_csv(file_path)

# 打印前五行
print(df.head())

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# 指定数据集的路径
file_path = "D:/pythondate/EndOfTerm/housing.csv"

# 读取CSV文件到DataFrame
df = pd.read_csv(file_path)

# 选择你想要绘制热力图的列
# 假设我们想要分析所有数值型列的相关性
numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
data_for_heatmap = df[numeric_cols].corr()

# 绘制热力图
plt.figure(figsize=(12, 10))  # 设置图形的大小
sns.heatmap(data_for_heatmap, annot=True, cmap='coolwarm', fmt=".2f")  # annot=True显示每个格子的相关系数
plt.title('Heatmap of House Price Data')  # 设置热力图的标题
plt.savefig('heatmap.png')  # 保存热力图为PNG文件
plt.show()  # 显示热力图

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# 指定数据集的路径
file_path = "D:/pythondate/EndOfTerm/housing.csv"
# 读取CSV文件到DataFrame
df = pd.read_csv(file_path)

# 计算前50%的数据量
num_rows = int(0.5 * len(df))

# 选择前50%的数据
df_50_percent = df.head(num_rows)

# 箱线图
plt.figure(figsize=(12, 8))
plt.subplot(2, 3, 1)
sns.boxplot(x=df_50_percent['total_rooms'])
plt.title('total_rooms')

plt.subplot(2, 3, 2)
sns.boxplot(x=df_50_percent['total_bedrooms'])
plt.title('total_bedrooms')

plt.subplot(2, 3, 3)
sns.boxplot(x=df_50_percent['population'])
plt.title('population')

plt.subplot(2, 3, 4)
sns.boxplot(x=df_50_percent['households'])
plt.title('households')

plt.subplot(2, 3, 5)
sns.boxplot(x=df_50_percent['median_income'])
plt.title('median_income')

plt.tight_layout()
plt.show()

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

# 读取数据集前50%
file_path = "D:/pythondate/EndOfTerm/housing.csv"
df = pd.read_csv(file_path)
df = df.head(len(df) // 2)  # 取前50%的数据

# 绘制房价与经度的关系
plt.figure(figsize=(10, 6))
sns.scatterplot(x='longitude', y='median_house_value', data=df)
plt.title('Median House Value vs Longitude')
plt.xlabel('Longitude')
plt.ylabel('Median House Value')
plt.show()

# 绘制房价与纬度的关系
plt.figure(figsize=(10, 6))
sns.scatterplot(x='latitude', y='median_house_value', data=df)
plt.title('Median House Value vs Latitude')
plt.xlabel('Latitude')
plt.ylabel('Median House Value')
plt.show()

# 绘制房价与中位数收入的关系
plt.figure(figsize=(10, 6))
sns.scatterplot(x='median_income', y='median_house_value', data=df)
plt.title('Median House Value vs Median Income')
plt.xlabel('Median Income')
plt.ylabel('Median House Value')
plt.show()

import pandas as pd
import numpy as np

# 加载数据集
file_path = "D:/pythondate/EndOfTerm/housing.csv"
df = pd.read_csv(file_path)

# 检查缺失值
print("缺失值统计：")
print(df.isnull().sum())

#删除无效样本
df_dropped = df.dropna()

# 检查处理后的数据集大小
print("\n原始数据集大小:", df.shape)
print("删除无效样本后的数据集大小:", df_dropped.shape)

# 保存处理后的数据集
cleaned_file_path = "D:/pythondate/EndOfTerm/clean_housing.csv"
df_dropped.to_csv(cleaned_file_path, index=False)

print(f"\n处理后的数据已保存到：{cleaned_file_path}")

import pandas as pd
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
import numpy as np

# 读取数据集
data = pd.read_csv("D:/pythondate/EndOfTerm/clean_housing.csv")

# 检查 'ocean_proximity' 列的唯一值数量
unique_values = data['ocean_proximity'].nunique()
print(f"Unique values in 'ocean_proximity' column: {unique_values}")

# 独热编码
categorical_features = ['ocean_proximity']
onehot_encoder = OneHotEncoder()
# 确保 fit_transform 返回的是一个二维数组
data_encoded = onehot_encoder.fit_transform(data[categorical_features]).toarray()
data_encoded = pd.DataFrame(data_encoded, columns=onehot_encoder.get_feature_names_out(categorical_features))
data = pd.concat([data.drop(categorical_features, axis=1), data_encoded], axis=1)

# 变量缩放
scaler = StandardScaler()
numerical_features = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']
data[numerical_features] = scaler.fit_transform(data[numerical_features])

# 异常值处理
for column in numerical_features:
    # 计算Z-score
    z_scores = (data[column] - data[column].mean()) / data[column].std()
    # 定义异常值阈值
    threshold = 3
    # 标记异常值
    outliers = np.abs(z_scores) > threshold
    # 处理异常值
    data.loc[outliers, column] = np.nan
    # 使用均值填充缺失值
    imputer = SimpleImputer(strategy='mean')
    data[column] = imputer.fit_transform(data[[column]])

# 保存处理后的数据集
cleaned_file_path = "D:\\pythondate\\EndOfTerm\\clean_housing.csv"
data.to_csv(cleaned_file_path, index=False)

print(f"\n处理后的数据已保存到：{cleaned_file_path}")


# 随机森林回归模型
# 导入必要的库
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import pandas as pd

# 读取数据集
# Bug 修复：将 \p 修改为 \\
data = pd.read_csv("D:\\pythondate\\EndOfTerm\\clean_housing.csv")

# 特征列名列表
feature_columns = ['median_income']
# 目标变量列名
target_column = 'median_house_value'

# 提取特征和目标变量
X = data[feature_columns]
y = data[target_column]

# 将数据集拆分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 创建随机森林回归模型
rf_model = RandomForestRegressor(n_estimators=100, random_state=42)

# 在训练集上训练模型
rf_model.fit(X_train, y_train)

# 在测试集上进行预测
y_pred = rf_model.predict(X_test)

# 评估模型性能
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)

print(f"Mean Squared Error: {mse}")
print(f"R-squared: {r2}")

# 梯度提升树回归模型
# 导入必要的库
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import pandas as pd

# 读取数据集
data = pd.read_csv("D:\pythondate\EndOfTerm\clean_housing.csv")

# 特征列名列表
feature_columns = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']
# 目标变量列名
target_column = 'median_house_value'

# 提取特征和目标变量
X = data[feature_columns]
y = data[target_column]

# 将数据集拆分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 创建梯度提升树回归模型
gbr_model = GradientBoostingRegressor(n_estimators=100, random_state=42)

# 在训练集上训练模型
gbr_model.fit(X_train, y_train)

# 在测试集上进行预测
y_pred = gbr_model.predict(X_test)

# 评估模型性能
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)

print(f"Mean Squared Error: {mse}")
print(f"R-squared: {r2}")

# XGBoost 算法
import xgboost as xgb

# 创建 XGBoost 回归模型
xgb_model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=100, random_state=42)

# 在训练集上训练模型
xgb_model.fit(X_train, y_train)

# 在测试集上进行预测
y_pred_xgb = xgb_model.predict(X_test)

# 评估模型性能
mse_xgb = mean_squared_error(y_test, y_pred_xgb)
r2_xgb = r2_score(y_test, y_pred_xgb)

print(f"XGBoost Mean Squared Error: {mse_xgb}")
print(f"XGBoost R-squared: {r2_xgb}")

# 导入必要的库
import matplotlib.pyplot as plt
import shap

# 绘制预测结果与真实值的散点图
plt.figure(figsize=(8, 6))
plt.scatter(y_test, y_pred)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)
plt.xlabel('True Values')
plt.ylabel('Predicted Values')
plt.title('Predicted vs True Median House Values')
plt.show()

# 特征重要性图
feature_importances = gbr_model.feature_importances_
feature_importance_df = pd.DataFrame({'Feature': feature_columns, 'Importance': feature_importances})
feature_importance_df = feature_importance_df.sort_values('Importance', ascending=False)
plt.figure(figsize=(10, 6))
plt.barh(feature_importance_df['Feature'], feature_importance_df['Importance'])
plt.xlabel('Feature Importance')
plt.ylabel('Feature')
plt.title('Feature Importance Plot')
plt.show()

# SHAP 分析
explainer = shap.TreeExplainer(gbr_model)
shap_values = explainer.shap_values(X_test)

# 绘制 SHAP 值的摘要图
shap.summary_plot(shap_values, X_test, feature_names=feature_columns)
plt.show()

# 绘制单个实例的 SHAP 值
# 选择一个测试实例进行解释
instance_index = 0  # 选择第一个测试实例
shap.force_plot(explainer.expected_value, shap_values[instance_index], X_test.iloc[instance_index], feature_names=feature_columns)
plt.show()

