import torch
import torch.nn as nn

class CustomAddFunction(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x, y, scale):
        # 前向计算: (x + y) * scale
        ctx.save_for_backward(x, y, scale)
        return (x + y) * scale

    @staticmethod
    def backward(ctx, grad_output):
        # 反向传播
        x, y, scale = ctx.saved_tensors
        return grad_output * scale, grad_output * scale, None

    @staticmethod
    def symbolic(g, x, y, scale):
        # 直接传递scale作为输入张量
        return g.op("mydomain::CustomAdd", x, y, scale, domain_s="mydomain")

# 封装为易用的函数
def custom_add(x, y, scale=1.0):
    # 确保scale是张量
    if not isinstance(scale, torch.Tensor):
        scale = torch.tensor(scale, dtype=x.dtype, device=x.device)
    return CustomAddFunction.apply(x, y, scale)

# 测试模型
class CustomAddModel(nn.Module):
    def __init__(self, scale=3.0):
        super().__init__()
        self.scale = scale
        
    def forward(self, a, b):
        # 将scale转换为张量
        scale_tensor = torch.tensor(self.scale, dtype=a.dtype, device=a.device)
        return custom_add(a, b, scale_tensor)

# 创建模型实例
model = CustomAddModel(scale=3.0)
model.eval()

# 测试数据
x = torch.tensor([1.0, 2.0, 3.0])
y = torch.tensor([4.0, 5.0, 6.0])

# 测试前向传播
with torch.no_grad():
    output = model(x, y)
    print("PyTorch Output:", output)  # 应该输出 [15., 21., 27.]

# 导出ONNX模型
onnx_path = "custom_add_model.onnx"

# 准备输入 - 现在只有两个输入
dummy_input = (x, y)

torch.onnx.export(
    model,
    dummy_input,
    onnx_path,
    opset_version=15,
    input_names=["input_x", "input_y"],
    output_names=["output"],
    custom_opsets={"mydomain": 1},
    verbose=True
)

print(f"ONNX model exported to {onnx_path}")
