from torch import nn,optim
import torch
import torch.nn.functional as F
def train(model,traindata,testdata,adj1,adj2,adj3,adj4,learn_rate = 0.005,epochs = 10):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("训练设备:",device)
    model.to(device)
    adj1 = adj1.to(device)
    adj2 = adj2.to(device)
    adj3 = adj3.to(device)
    adj4 = adj4.to(device)
    for epoch in range(epochs):
        print("epoch:",epoch)
        train_once(model,traindata,adj1,adj2,adj3,adj4,device,learn_rate)
        test(model,testdata,adj1,adj2,adj3,adj4,device)

def test(model,testdata,adj1,adj2,adj3,adj4,device):
    print("\n开始测试")
    pree = torch.rand(216,1,69,69).to(device)
    true = torch.rand(216,1,69,69).to(device)
    for i,data in enumerate(testdata):
        x,y = data   #x.size【6 1 6 69 69】  y.size[6 1 69 69]
        x = x.float().to(device)
        y = y.float().to(device)
        pre = model(x,adj1,adj2,adj3,adj4)  #pre:[6 1 69 69]
        pree[i*6:i*6+pre.shape[0],:,:,:] = pre
        true[i*6:i*6+pre.shape[0],:,:,:] = y
    mse = F.mse_loss(pree, true)
    print("MSE:",mse)

def train_once(model,traindata,adj1,adj2,adj3,adj4,device,learn_rate = 0.005):
    #定义代价函数
    loss_function = nn.MSELoss()
    #优化器
    opt = optim.SGD(model.parameters(),learn_rate)
    print("开始训练")
    for i,data in enumerate(traindata):
        x,y = data   #x.size【6 1 6 69 69】  y.size[6 1 69 69]
        x = x.float().to(device)
        y = y.float().to(device)
        print(f"\r进度: {(i+1) / len(traindata) * 100:.2f}%", end='', flush=True)
        pre = model(x,adj1,adj2,adj3,adj4)  #pre:[6 1 69 69]
        loss = loss_function(pre,y)
        loss.requires_grad_(True)
        #梯度清零
        opt.zero_grad()
        #后向传播
        loss.backward()
        #修改权值
        opt.step()