import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import os


np.random.seed(42)
tf.random.set_seed(42)


dataset_path = "数据集/pull_up_landmarks.csv"


if not os.path.exists(dataset_path):
    print(f"错误: 数据集文件 '{dataset_path}' 不存在")
    exit(1)


print("正在加载数据集...")
try:
    data = pd.read_csv(dataset_path)
    print(f"成功加载数据集，共有 {len(data)} 条记录")
except Exception as e:
    print(f"加载数据集时出错: {e}")
    exit(1)


print("\n数据集信息:")
print(f"特征数量: {data.shape[1] - 1}") 
print(f"正样本数量 (成功引体向上): {len(data[data['label'] == 1])}")
print(f"负样本数量 (未成功引体向上): {len(data[data['label'] == 0])}")


X = data.iloc[:, :-1].values  
y = data.iloc[:, -1].values  


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)


scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)


import joblib
os.makedirs("模型", exist_ok=True)
joblib.dump(scaler, "模型/scaler.pkl")


model = Sequential([
    # 第一层: 256个神经元
    Dense(256, activation='relu', input_shape=(X_train.shape[1],)),
    Dropout(0.3),  # 添加Dropout以减少过拟合
    
    # 第二层: 32个神经元
    Dense(32, activation='relu'),
    Dropout(0.2),  
    
    # 输出层: 1个神经元，使用sigmoid激活函数输出0-1之间的概率
    Dense(1, activation='sigmoid')
])


model.compile(
    optimizer='adam',
    loss='binary_crossentropy',
    metrics=['accuracy']
)


model.summary()


print("\n开始训练模型...")
history = model.fit(
    X_train, y_train,
    epochs=50,
    batch_size=32,
    validation_split=0.2,
    verbose=1
)


print("\n在测试集上评估模型...")
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print(f"测试集准确率: {test_accuracy:.4f}")

# 保存模型
model_path = "模型/pull_up_classifier.h5"
model.save(model_path)
print(f"\n模型已保存到: {model_path}")

# 绘制训练过程中的损失和准确率变化
plt.figure(figsize=(12, 5))


plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='validation loss')
plt.title('loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()

# 绘制准确率曲线
plt.subplot(1, 2, 2)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.title('accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend()

plt.tight_layout()
plt.savefig("模型/training_history.png")
print("训练历史图表已保存到: 模型/training_history.png")

# 混淆矩阵
from sklearn.metrics import confusion_matrix, classification_report

y_pred_prob = model.predict(X_test)
y_pred = (y_pred_prob > 0.5).astype(int).flatten()

cm = confusion_matrix(y_test, y_pred)
print("\n混淆矩阵:")
print(cm)

# 分类报告
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=['not_success', 'success']))


plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('digit recognition')
plt.colorbar()
tick_marks = np.arange(2)
plt.xticks(tick_marks, ['not success', 'success'])
plt.yticks(tick_marks, ['not success', 'success'])


thresh = cm.max() / 2.
for i in range(cm.shape[0]):
    for j in range(cm.shape[1]):
        plt.text(j, i, format(cm[i, j], 'd'),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

plt.tight_layout()
plt.ylabel('real label')
plt.xlabel('predicted label')
plt.savefig("模型/confusion_matrix.png")
print("混淆矩阵图表已保存到: 模型/confusion_matrix.png")