from data_loader import MyDataSet
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.utils.data import DataLoader
from data_loader import train_dataset, val_dataset, test_dataset
from mymodels import TemporalTransformer

MODEL_PATH = "/home/Dyf/code/models/st_model"

# 示例参数
input_dim = 30  # 输入特征维度
hidden_dim = 64  # 隐藏层维度
num_heads = 8  # Transformer头数
num_layers = 2  # Transformer层数

model = TemporalTransformer(input_dim, hidden_dim, num_heads, num_layers)

# N = 2
# T = 30
# L = 3

# input = torch.randn([N, T, L])
# print(model(input).shape)

# criterion = nn.CrossEntropyLoss()
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

print(len(train_dataset))
print(len(val_dataset))
print(len(test_dataset))

dataloader = DataLoader(MyDataSet(), 256, False)
# print(len(m_data))
num_epochs = 1000

# print(val_dataset[0])
best_loss = 1000.0
for epoch in range(num_epochs):
    model.train()
    for batch in dataloader:  # 假设dataloader已经定义好，能够迭代数据批次
        inputs, labels = batch  # 解包数据和标签
        # print(labels,labels.shape,labels.dtype)
        optimizer.zero_grad()  # 清零梯度
        outputs = model(inputs)  # 前向传播
        labels = torch.unsqueeze(labels, dim=1).to(torch.float)
        # print("#", outputs, labels)
        # print("#",outputs.dtype,labels.dtype)
        loss = criterion(outputs, labels)  # 计算损失
        loss.backward()  # 反向传播
        optimizer.step()  # 更新参数
        # print("loss", loss.item())

    model.eval()
    val_input = torch.stack([i[0] for i in val_dataset])
    val_label = torch.stack([i[1] for i in val_dataset])
    val_label = torch.unsqueeze(val_label, dim=1).to(torch.float)
    val_out_put = model(val_input)
    val_line = val_out_put > 0.6
    val_label_line = val_label == 1.0
    # 使用 torch.eq() 进行逐元素比较
    equals = torch.eq(val_line, val_label_line)
    # 使用 torch.sum() 计算等于 True 的元素数量
    num_equals = torch.sum(equals)
    print("Percent {}".format(num_equals / len(val_label)))
    val_loss = criterion(val_out_put, val_label)
    # v_loss = weighted_mse_loss(out, val_edge_label, weight=weight)
    # v_loss = F.binary_cross_entropy_with_logits(out, edge_label)
    print("epoch val_loss", epoch, val_loss.item())
    if val_loss < best_loss and epoch > 800:
        best_loss = val_loss
        torch.save(model, os.path.join(MODEL_PATH, 'STdata_{}.pth').format(epoch))
        print("=> saved best model", epoch, val_loss)

test_input = torch.stack([i[0] for i in test_dataset])
test_label = torch.stack([i[1] for i in test_dataset])
test_label = torch.unsqueeze(test_label, dim=1).to(torch.float)
test_out_put = model(test_input)
test_line = test_out_put > 0.5
test_label_line = test_label == 1.0
# 使用 torch.eq() 进行逐元素比较
equals = torch.eq(test_line, test_label_line)
# 使用 torch.sum() 计算等于 True 的元素数量
num_equals = torch.sum(equals)
print("Percent TEST {}".format(num_equals / len(test_label)))
