import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.neighbors import LocalOutlierFactor
from imblearn.over_sampling import RandomOverSampler
from collections import Counter
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import SMOTE
from tensorflow.keras.callbacks import EarlyStopping
# 加载检查数据
print("加载检查数据:")
df = pd.read_csv("star_classification.csv")
# df = df.sample(frac=0.1, random_state=42)
print(df.head())
print(df.info())
print(df["class"].value_counts())
# 共有三类，星系、类星体和恒星
# df["class"] = [0 if i == "GALAXY" else 1 if i == "STAR" else 2 for i in df["class"]]
class_mapping = {"GALAXY": 0, "STAR": 1, "QSO": 2}
df["class"] = df["class"].map(class_mapping)
# 类别数量可视化
sns.countplot(x='class',data=df,color="skyblue")
plt.title("Class ",fontsize=10)
plt.savefig('类别数量可视化.png')
plt.show()
# 使用LOF算法检测异常值
clf = LocalOutlierFactor()
y_pred = clf.fit_predict(df)
x_score = clf.negative_outlier_factor_
outlier_score = pd.DataFrame()
outlier_score["score"] = x_score
#threshold
threshold2 = -1.5
filtre2 = outlier_score["score"] < threshold2
outlier_index = outlier_score[filtre2].index.tolist()
df.drop(outlier_index, inplace=True)
# 特征选择
f,ax = plt.subplots(figsize=(12,8))
sns.heatmap(df.corr(), cmap="PuBu", annot=True, linewidths=0.5, fmt= '.2f',ax=ax)
plt.savefig('特征关联性热力图.png')
plt.show()
corr = df.corr()
print(corr["class"].sort_values())
df = df.drop(['obj_ID','alpha','delta','run_ID','rerun_ID','cam_col','field_ID','fiber_ID'], axis = 1)
# 用SMOTE处理不平衡数据
x = df.drop(['class'], axis = 1)
y = df.loc[:,'class'].values
sm = SMOTE(random_state=42)
print('Original dataset shape %s' % Counter(y))
x, y = sm.fit_resample(x, y)
print('Resampled dataset shape %s' % Counter(y))

sns.countplot(x='class',data=df,color="skyblue")
plt.title("Class ",fontsize=10)
plt.savefig('类别数量可视化_处理不平衡之后.png')
plt.show()
# 数据缩放
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
# 转换为 DataFrame
x = pd.DataFrame(x, columns=df.columns[:-1])

# 数据划分
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=42)

# 模型构建
model = models.Sequential()
model.add(layers.Dense(32, activation='relu', input_shape=(x_train.shape[1],)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(3, activation='softmax'))

# 编译模型
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

# 定义一个EarlyStopping回调函数，当验证损失不再改善时，终止训练
early_stopping = EarlyStopping(monitor='val_loss', patience=10)

# 模型训练
history = model.fit(x_train, y_train, epochs=100, batch_size=128, validation_data=(x_test, y_test), callbacks=[early_stopping], verbose=1)

# 预测
model.summary()
y_pred = model.predict(x_test)
# y_pred = np.argmax(predictions, axis=1)

# 计算混淆矩阵
cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))
# 绘制混淆矩阵热力图
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('Confusion Matrix')
plt.xlabel('Predicted Labels')
plt.ylabel('True Labels')
plt.savefig('混淆矩阵.png')
plt.show()

# 训练过程中的损失函数和准确率
train_loss = history.history['loss']
val_loss = history.history['val_loss']
train_accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']

# 绘制损失函数变化图
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(train_loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Loss vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

# 绘制准确率变化图
plt.subplot(1, 2, 2)
plt.plot(train_accuracy, label='Training Accuracy')
plt.plot(val_accuracy, label='Validation Accuracy')
plt.title('Accuracy vs Epochs')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.tight_layout()
plt.savefig('损失函数准确率变化图.png')
plt.show()
