import torch
from torch.cuda.amp import autocast, GradScaler

class FP16Optimizer:
    def __init__(self, model, lr=0.001):
        self.model = model
        self.optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        self.scaler = GradScaler()
        
    def step(self, data, target):
        with autocast():
            output = self.model(data)
            loss = torch.nn.functional.cross_entropy(output, target)
            
        # 反向传播使用FP32精度
        self.scaler.scale(loss).backward()
        
        # 梯度量化与FPGA传输
        quant_grads = self.quantize_gradients()
        fpga_update = self.fpga_backend(quant_grads)
        
        # 更新模型参数
        self.scaler.step(self.optimizer)
        self.scaler.update()
        
    def quantize_gradients(self, bits=8):
        grads = [p.grad for p in self.model.parameters()]
        scales = [torch.max(torch.abs(g)) / (2**bits-1) for g in grads]
        quantized = [torch.clamp(torch.round(g/s), -2**(bits-1), 2**(bits-1)-1) 
                    for g, s in zip(grads, scales)]
        return {'data': quantized, 'scales': scales}
    
    def fpga_backend(self, grads):
        with FPGACluster() as cluster:
            # 分布式梯度聚合
            aggregated = cluster.allreduce(grads['data'])
            # 反量化
            return [g * s for g, s in zip(aggregated, grads['scales'])]