import numpy as np
from datetime import datetime
from net import Network

input_nodes = 784  # 28x28
hidden_nodes = 200
output_nodes = 10  # 10 个数字类别
learning_rate = 0.1

n = Network(input_nodes, hidden_nodes, output_nodes, learning_rate)

f = open("data_set/mnist_train.csv", 'r')
inputs = []
outputs = []

for line in f:
    # 去除行尾的换行符并分割
    all_value = line.strip().split(",")
    # 将像素值转换为浮点数，并进行归一化和缩放
    # 归一化到 [0, 1] 然后缩放到 [0.01, 1.0] 避免 sigmoid 的饱和区
    scaled_input = (np.asfarray(all_value[1:]) / 255.0 * 0.99) + 0.01
    inputs.append(scaled_input)
    
    # 处理标签，创建目标输出向量 (one-hot 编码的替代：直接设置对应位置为 0.99)
    target_values = np.zeros(output_nodes) + 0.01 # 初始化为 0.01
    target_values[int(all_value[0])] = 0.99 # 对应标签位置设为 0.99
    outputs.append(target_values)

f.close()

# 将列表转换为 NumPy 数组以便于后续处理
inputs = np.array(inputs)
outputs = np.array(outputs)

epochs = 1

# 训练循环示例 
batch_size = 1  # 当前是单样本训练（SGD），也可以扩展为 mini-batch

print(f"Starting training at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"Epochs: {epochs}, Training samples: {len(inputs)}, Learning rate: {n.lr}")
print("-" * 60)

for epoch in range(epochs):
    print(f"Epoch {epoch + 1}/{epochs} [{'=' * epoch}{' ' * (epochs - epoch - 1)}]")  # 简易进度条
    print(f"[{datetime.now().strftime('%H:%M:%S')}] Starting epoch {epoch + 1}...")
    
    # 用于记录当前 epoch 的平均误差
    epoch_loss = 0.0
    samples_trained = 0
    
    # 打乱数据（强烈建议每个 epoch 都打乱）
    indices = np.random.permutation(len(inputs))
    epoch_inputs = inputs[indices]
    epoch_targets = outputs[indices]
    
    for i in range(len(epoch_inputs)):
        input_data = epoch_inputs[i]
        target_data = epoch_targets[i]
        
        # 执行一次训练
        n.train(input_data, target_data)
        
        # 前向传播计算输出，用于估计 loss（可选）
        output = n.predict(input_data)
        loss = np.mean((target_data - output) ** 2)  # MSE 损失
        epoch_loss += loss
        samples_trained += 1
        
        # 每训练 1000 个样本输出一次日志
        if (i + 1) % 1000 == 0:
            avg_loss = epoch_loss / samples_trained
            print(f"  ├─ Trained {i + 1:4d}/{len(epoch_inputs)} | Avg Loss: {avg_loss:.5f} | LR: {n.lr}")
    
    # 当前 epoch 结束，输出总结
    avg_epoch_loss = epoch_loss / len(epoch_inputs)
    print(f"  └─ Epoch {epoch + 1} completed. Average Loss: {avg_epoch_loss:.5f}")
    
    # 可选：在测试集上评估准确率（需要 load_and_test_model 函数）
    # if (epoch + 1) % 1 == 0:  # 每个 epoch 都测试
    #     print(f"[{datetime.now().strftime('%H:%M:%S')}] Evaluating on test set...")
    #     test_accuracy = load_and_test_model(n, "data_set/mnist_test.csv")
    #     print(f"  └─ Test Accuracy: {test_accuracy:.4f}")

print(f"✅ Training finished at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

# 使用示例（训练结束后）
n.save_weights("trained_model")

