import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.cluster import KMeans, Birch, DBSCAN
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.metrics import silhouette_score

# 加载数据
data = pd.read_csv(r"C:\Users\卷\Desktop\程序\Pycharm\24.3.25\dataset.csv")

# 分离数值型和非数值型特征
numeric_data = data.select_dtypes(include=['int', 'float'])
categorical_data = data.select_dtypes(include=['object'])

# 填充缺失值
numeric_imputer = SimpleImputer(strategy='mean')
numeric_data_filled = pd.DataFrame(numeric_imputer.fit_transform(numeric_data), columns=numeric_data.columns)

categorical_imputer = SimpleImputer(strategy='most_frequent')
categorical_data_filled = pd.DataFrame(categorical_imputer.fit_transform(categorical_data), columns=categorical_data.columns)

# 对非数值型数据进行独热编码
onehot_encoder = OneHotEncoder()
categorical_data_encoded = pd.DataFrame(onehot_encoder.fit_transform(categorical_data_filled).toarray())

# 合并处理后的数据
data_filled = pd.concat([numeric_data_filled, categorical_data_encoded], axis=1)

# 将列名转换为字符串类型
data_filled.columns = data_filled.columns.astype(str)

# 进行标准化
scaler = StandardScaler()
data_filled = pd.DataFrame(scaler.fit_transform(data_filled), columns=data_filled.columns)

# 构建多维模型结构
kmeans = KMeans(n_clusters=3, random_state=0)
data_filled['cluster'] = kmeans.fit_predict(data_filled)

# 数据统计分析挖掘
cluster_counts = data_filled['cluster'].value_counts()
print("每个簇中的样本数量:")
print(cluster_counts)

# 可视化簇的分布
plt.figure(figsize=(8, 6))
plt.scatter(data_filled['age'], data_filled['call_duration'], c=data_filled['cluster'], cmap='viridis')
plt.xlabel('Age')
plt.ylabel('Call Duration')
plt.title('Cluster Analysis')
plt.colorbar(label='Cluster')
plt.show()

# 对预处理好的数据集运用已学的算法（至少包括K-means、Birch、DBSCAN等）进行建模，进行调参实验和聚类效果比较评估
X = data_filled.drop('cluster', axis=1)

# 使用K-means进行聚类
kmeans_model = KMeans(n_clusters=3, random_state=0)
kmeans_labels = kmeans_model.fit_predict(X)

# 使用Birch进行聚类
birch_model = Birch(n_clusters=3)
birch_labels = birch_model.fit_predict(X)

# 使用DBSCAN进行聚类
dbscan_model = DBSCAN(eps=0.5, min_samples=5)
dbscan_labels = dbscan_model.fit_predict(X)

# 计算各个算法的轮廓系数
kmeans_silhouette = silhouette_score(X, kmeans_labels)
birch_silhouette = silhouette_score(X, birch_labels)
dbscan_silhouette = silhouette_score(X, dbscan_labels)

print("K-means 聚类轮廓系数:", kmeans_silhouette)
print("Birch 聚类轮廓系数:", birch_silhouette)
print("DBSCAN 聚类轮廓系数:", dbscan_silhouette)
# 加载数据
data = pd.read_csv(r"C:\Users\卷\Desktop\程序\Pycharm\24.3.25\dataset.csv")

# 分离数值型和非数值型特征
numeric_data = data.select_dtypes(include=['int', 'float'])
categorical_data = data.select_dtypes(include=['object'])

# 填充缺失值
numeric_imputer = SimpleImputer(strategy='mean')
numeric_data_filled = pd.DataFrame(numeric_imputer.fit_transform(numeric_data), columns=numeric_data.columns)

categorical_imputer = SimpleImputer(strategy='most_frequent')
categorical_data_filled = pd.DataFrame(categorical_imputer.fit_transform(categorical_data), columns=categorical_data.columns)

# 对非数值型数据进行独热编码
onehot_encoder = OneHotEncoder()
categorical_data_encoded = pd.DataFrame(onehot_encoder.fit_transform(categorical_data_filled).toarray())

# 合并处理后的数据
data_filled = pd.concat([numeric_data_filled, categorical_data_encoded], axis=1)

# 将列名转换为字符串类型
data_filled.columns = data_filled.columns.astype(str)

# 进行标准化
scaler = StandardScaler()
data_filled = pd.DataFrame(scaler.fit_transform(data_filled), columns=data_filled.columns)

# 构建多维模型结构
kmeans = KMeans(n_clusters=3, random_state=0)
data_filled['cluster'] = kmeans.fit_predict(data_filled)

# 数据统计分析挖掘
cluster_counts = data_filled['cluster'].value_counts()
print("每个簇中的样本数量:")
print(cluster_counts)

# 可视化簇的分布
plt.figure(figsize=(8, 6))
plt.scatter(data_filled['age'], data_filled['call_duration'], c=data_filled['cluster'], cmap='viridis')
plt.xlabel('Age')
plt.ylabel('Call Duration')
plt.title('Cluster Analysis')
plt.colorbar(label='Cluster')
plt.show()

# 对预处理好的数据集运用已学的算法（至少包括K-means、Birch、DBSCAN等）进行建模，进行调参实验和聚类效果比较评估
X = data_filled.drop('cluster', axis=1)

# 使用K-means进行聚类
kmeans_model = KMeans(n_clusters=3, random_state=0)
kmeans_labels = kmeans_model.fit_predict(X)

# 使用Birch进行聚类
birch_model = Birch(n_clusters=3)
birch_labels = birch_model.fit_predict(X)

# 使用DBSCAN进行聚类
dbscan_model = DBSCAN(eps=0.5, min_samples=5)
dbscan_labels = dbscan_model.fit_predict(X)

# 计算各个算法的轮廓系数
kmeans_silhouette = silhouette_score(X, kmeans_labels)
birch_silhouette = silhouette_score(X, birch_labels)
dbscan_silhouette = silhouette_score(X, dbscan_labels)

print("K-means 聚类轮廓系数:", kmeans_silhouette)
print("Birch 聚类轮廓系数:", birch_silhouette)
print("DBSCAN 聚类轮廓系数:", dbscan_silhouette)

# 使用其他机器学习算法进行建模和评估
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, data_filled['cluster'], test_size=0.2, random_state=42)

# 决策树
dt_classifier = DecisionTreeClassifier(random_state=0)
dt_classifier.fit(X_train, y_train)
dt_pred = dt_classifier.predict(X_test)
dt_accuracy = accuracy_score(y_test, dt_pred)
print("决策树准确率:", dt_accuracy)
print(classification_report(y_test, dt_pred))

# 朴素贝叶斯
nb_classifier = GaussianNB()
nb_classifier.fit(X_train, y_train)
nb_pred = nb_classifier.predict(X_test)
nb_accuracy = accuracy_score(y_test, nb_pred)
print("朴素贝叶斯准确率:", nb_accuracy)
print(classification_report(y_test, nb_pred))

# 神经网络
mlp_classifier = MLPClassifier(random_state=0)
mlp_classifier.fit(X_train, y_train)
mlp_pred = mlp_classifier.predict(X_test)
mlp_accuracy = accuracy_score(y_test, mlp_pred)
print("神经网络准确率:", mlp_accuracy)
print(classification_report(y_test, mlp_pred))

# 支持向量机
svm_classifier = SVC(random_state=0)
svm_classifier.fit(X_train, y_train)
svm_pred = svm_classifier.predict(X_test)
svm_accuracy = accuracy_score(y_test, svm_pred)
print("支持向量机准确率:", svm_accuracy)
print(classification_report(y_test, svm_pred))