# 1 导入库及设置GPU
# 1.1 导入库
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from datetime import datetime
import matplotlib.pyplot as plt
import warnings

warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

# 1.2 设置GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 2 数据处理
# 2.1 导入数据
df = pd.read_csv('/Users/sunhaoqing/Desktop/pythonProject/深度学习/2 Pytorch入门/data/heart.csv')

# 2.2 数据切分
X_tr, X_te, y_tr, y_te = train_test_split(df.iloc[:,:-1].values,
                                          df.iloc[:,-1].values,
                                          test_size=0.1, random_state=1)

# 2.3 数据标准化
sc = StandardScaler()
X_tr = sc.fit_transform(X_tr)
X_te = sc.transform(X_te)

X_train = torch.as_tensor(X_tr, dtype=torch.float32).unsqueeze(1)   # 转torch
X_test  = torch.as_tensor(X_te, dtype=torch.float32).unsqueeze(1)
y_train = torch.as_tensor(y_tr, dtype=torch.int64)
y_test  = torch.as_tensor(y_te, dtype=torch.int64)

# 2.4 数据加载器
train_dl = DataLoader(
    TensorDataset(X_train, y_train),
    batch_size=64,
    shuffle=True
)

test_dl = DataLoader(
    TensorDataset(X_test, y_test),
    batch_size=64,
    shuffle=False
)

# 3 构建模型
class model_rnn(nn.Module):
    def __init__(self):
        super(model_rnn, self).__init__()
        self.rnn0 = nn.RNN(
            input_size=13,
            hidden_size=200,
            num_layers=1,
            batch_first=True
        )
        self.fc0 = nn.Linear(200, 50)
        self.fc1 = nn.Linear(50, 2)

    def forward(self, x):
        out, _ = self.rnn0(x)   # 所有时间步的输出
        out    = out[:, -1 , :] # 只取最后一个时间步的输出
        out    = self.fc0(out)
        out    = self.fc1(out)

        return out

# 4 模型训练
# 4.1 训练函数
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)          # 训练集样本总数
    num_batches = len(dataloader)           # 批次数量（即 len(dataloader)）

    train_loss, train_acc = 0, 0            # 初始化训练损失和准确率

    for X, y in dataloader:                 # 逐批读取数据
        X, y = X.to(device), y.to(device)   # 把数据搬到GPU/CPU上（根据device设置）

        # 前向传播：计算预测输出
        pred = model(X)                     # 模型预测输出
        loss = loss_fn(pred, y)             # 计算损失（预测与真实标签的差距）

        # 反向传播
        optimizer.zero_grad()               # 清空上一次迭代的梯度
        loss.backward()                     # 反向传播计算当前梯度
        optimizer.step()                    # 根据梯度更新参数

        # 记录本批次准确率与损失
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()

    # 计算整个训练集平均损失与准确率
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss

# 4.2 测试函数
def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)          # 测试集样本总数
    num_batches = len(dataloader)           # 批次数量
    test_loss, test_acc = 0, 0              # 初始化损失和准确率

    # 不进行训练（关闭梯度计算，节省显存和算力）
    with torch.no_grad():
        for imgs, target in dataloader:     # 逐批次读取测试集数据
            imgs, target = imgs.to(device), target.to(device)

            # 前向传播
            target_pred = model(imgs)       # 模型预测
            loss = loss_fn(target_pred, target)   # 计算损失

            # 累积损失与准确率
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    # 计算平均值
    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

# 4.3 训练
model = model_rnn().to(device)
loss_fn   = nn.CrossEntropyLoss()               # 定义损失函数（交叉熵）
learn_rate = 1e-4                               # 学习率
opt        = torch.optim.Adam(model.parameters(), lr=learn_rate)  # 优化器
epochs     = 50                                 # 训练轮数

train_loss, train_acc = [], []                  # 用来存储每一轮的训练指标
test_loss,  test_acc  = [], []

for epoch in range(epochs):                     # 训练 50 轮
    model.train()                               # 启用训练模式（启用 Dropout、BN）
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)

    model.eval()                                # 启用评估模式（冻结 Dropout、BN）
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    # 获取当前学习率
    lr = opt.state_dict()['param_groups'][0]['lr']

    # 输出当前轮次的结果
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr={:.1e}')
    print(template.format(epoch+1,
                          epoch_train_acc*100, epoch_train_loss,
                          epoch_test_acc*100, epoch_test_loss, lr))

print("="*20, "Done", "="*20)

# 5 模型评估
current_time = datetime.now() # 获取当前时间

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))

plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')

plt.suptitle(f"Run at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

plt.show()

# 6 混淆矩阵
model.eval()
with torch.no_grad():
    logits = model(X_test.to(device))
    pred = logits.argmax(1).cpu().numpy()
cm = confusion_matrix(y_test.cpu().numpy(), pred)

plt.figure(figsize=(6,5))
plt.suptitle('')
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")

# 修改字体大小
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title("Confusion Matrix", fontsize=12)
plt.xlabel("Predicted Label", fontsize=10)
plt.ylabel("True Label", fontsize=10)

plt.tight_layout()  # 调整布局防止重叠
plt.show()          # 显示图像

# 7 调用模型进行预测
model.eval()
with torch.no_grad():
    test_X = X_test[0].unsqueeze(0)
    pred = model(test_X.to(device)).argmax(1).item()
print("模型预测结果为:", pred)
print("="*20)
print("0: 不会患心脏病")
print("1: 可能患心脏病")