import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import ctypes
xgpu_proxy = ctypes.CDLL("../target/release/libxgpu_proxy.so")
def start_minibatch():
    xgpu_proxy.minibatch_begin_hook()

def set_deterministic(seed=42):
    import random
    random.seed(seed)
    
    import numpy as np
    np.random.seed(seed)
    
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    torch.cuda.synchronize() if torch.cuda.is_available() else None

set_deterministic(seed=42)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
if not torch.cuda.is_available():
    print("警告：未检测到GPU，将使用CPU训练")

def generate_data(num_samples=100, input_dim=2):
    x = torch.linspace(-1, 1, num_samples * input_dim).reshape(-1, input_dim)
    y = 3 * x[:, 0] + 2 * x[:, 1] + 1 + 0.01 * torch.sin(torch.linspace(0, 10, num_samples))
    return x, y

x, y = generate_data(num_samples=10000)
dataset = TensorDataset(x, y)
dataloader = DataLoader(dataset, batch_size=20, shuffle=False)

class SimpleModel(nn.Module):
    def __init__(self, input_dim=2):
        super().__init__()
        self.linear = nn.Linear(input_dim, 1)
    
    def forward(self, x):
        return self.linear(x)

model = SimpleModel().to(device)
with torch.no_grad():
    model.linear.weight = nn.Parameter(torch.tensor([[0.5, -0.5]], device=device))
    model.linear.bias = nn.Parameter(torch.tensor([0.0], device=device))

criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)

print("\n" + "="*50)
print("开始训练")
print("="*50 + "\n")

for epoch in range(10):
    epoch_loss = 0.0
    print(f"\n===== Epoch {epoch+1}/5 =====")
    
    for batch_idx, (inputs, labels) in enumerate(dataloader):
        start_minibatch()
        
        inputs = inputs.to(device)
        labels = labels.to(device).unsqueeze(1)
        
        print(f"\nBatch {batch_idx+1}/{len(dataloader)}:")
        print(f"  输入数据（前3个样本）: \n{inputs[:3].cpu().numpy()}")
        print(f"  标签（前3个样本）: \n{labels[:3].cpu().numpy()}")
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        
        loss.backward()
        
        print(f"  损失值: {loss.item():.6f}")
        print(f"  权重梯度: \n{model.linear.weight.grad.cpu().numpy()}")
        print(f"  偏置梯度: \n{model.linear.bias.grad.cpu().numpy()}")
        
        optimizer.step()
        
        print(f"  更新后权重: \n{model.linear.weight.detach().cpu().numpy()}")
        print(f"  更新后偏置: \n{model.linear.bias.detach().cpu().numpy()}")
        
        epoch_loss += loss.item()
    
    avg_loss = epoch_loss / len(dataloader)
    print(f"\nEpoch {epoch+1} 平均损失: {avg_loss:.6f}")
    print("-"*40)

    if epoch == 10000:
        break


print("\n" + "="*50)
print("训练结束")
print("="*50)