from torch import device
import oxford_data
from model import *
from data_process import *
import torch
import torch.nn as nn
import torch.optim as optim
device = 'cuda'
def init_weights(m):
    if isinstance(m, nn.Linear):
        nn.init.xavier_uniform_(m.weight)
        if m.bias is not None:
            nn.init.constant_(m.bias, 0)
def test(model, val_loader, criterion):
    model.eval()
    val_loss = 0.0
    target_sum = 0.0
    output_sum = 0.0
    with torch.no_grad():
        for inputs, targets in val_loader:
            #print(inputs.shape)
            #print(targets.shape)
            inputs, targets = inputs.to(device), targets.to(device)
            # 前向传播
            outputs = model(inputs)
            target_sum += torch.sum(targets).item()
            output_sum += torch.sum(outputs).item()

            loss = criterion(outputs, targets)
            val_loss += loss.item() * inputs.size(0)

    # 计算验证集的平均损失
    val_loss /= len(val_loader.dataset)
    print('ratios = ', target_sum / output_sum)
    return val_loss

class AngularLoss(nn.Module):
    def __init__(self):
        super(AngularLoss, self).__init__()

    def forward(self, predicted, target):
        # 将角度差值限制在 [-pi, pi] 范围内
        delta_angle = torch.atan2(torch.sin(predicted - target), torch.cos(predicted - target))
        # 返回角度差的绝对值作为损失
        return torch.mean(torch.abs(delta_angle))

#train_loader, test_loader = get_dataloader(10)
train_loader, test_loader = oxford_data.get_dataloader(1000)
# 输入维度：加速度和陀螺仪数据（6个特征：ax, ay, az, gx, gy, gz）
input_size = 6
# 隐藏层大小
hidden_size = 800
#hidden_size = [128, 64, 32, 16]
# 输出维度：预测的角度
output_size = 1

# 实例化模型
model = IMULSTMModel(input_size=input_size, hidden_size=hidden_size, output_size=output_size)
#model = IMUDNNModel(input_size=input_size, hidden_sizes=hidden_size, output_size=output_size)
model.apply(init_weights)
model = model.cuda()
# 定义损失函数和优化器
criterion = AngularLoss()
#criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        output = model(data)
        train_loss = criterion(output, target)
        train_loss.backward()
        optimizer.step()
    val_loss = test(model, test_loader, criterion)
    print(f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}')



